diff --git a/.gitmodules b/.gitmodules index a012324010..a773677e62 100644 --- a/.gitmodules +++ b/.gitmodules @@ -14,3 +14,7 @@ path = upp url = https://github.com/NOAA-EMC/UPP branch = develop +[submodule "mpas/MPAS-Model"] + path = mpas/MPAS-Model + url = https://github.com/ufs-community/MPAS-Model.git + branch = feature/mpas-in-ufs diff --git a/CMakeLists.txt b/CMakeLists.txt index 857a8b50d3..ceee4e5022 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -18,10 +18,14 @@ if(BUILD_CI_TESTING) if (FV3) project(ufsatm_fv3 VERSION 1.0 LANGUAGES C CXX Fortran) endif() + if (MPAS) + project(ufsatm_mpas VERSION 1.0 LANGUAGES C CXX Fortran) + endif() include(ci/CMakeLists.txt) endif() # Set variables for all dycore build options in UFSATM. +set(DYCORE_TARGET_MPAS ufsatm_mpas) set(DYCORE_TARGET_FV3 ufsatm_fv3) ############################################################################### @@ -56,14 +60,38 @@ if(BUILD_CI_TESTING) add_subdirectory(tests) endif() +# Not used yet by MPAS in UFSATM, but needed by ufsatm_cap.F90 to work. +list(APPEND coupling_srcs + cpl/module_block_data.F90 + cpl/module_cplfields.F90 + cpl/module_cap_cpl.F90 + cpl/module_cplscalars.F90) + +list(APPEND io_srcs + io/module_write_netcdf.F90 + io/module_write_restart_netcdf.F90 + io/module_fv3_io_def.F90 + io/module_write_internal_state.F90 + io/module_wrt_grid_comp.F90) + +# Eventually these could be shared by MPAS, and merged with {io_srcs} list. +list(APPEND fv3_io_srcs + io/fv3atm_common_io.F90 + io/fv3atm_clm_lake_io.F90 + io/fv3atm_rrfs_sd_io.F90 + io/fv3atm_sfc_io.F90 + io/fv3atm_oro_io.F90 + io/fv3atm_history_io.F90 + io/fv3atm_restart_io.F90) + ############################################################################### ### UFSATM with FV3 dynamical core ############################################################################### if (FV3) + add_definitions(-DFV3) set(DYCORE_TARGET ${DYCORE_TARGET_FV3} CACHE INTERNAL "DYCORE_TARGET Options: fv3atm") - set(DYCORE_TARGET_CAP_MOD fv3atm_cap_mod PARENT_SCOPE) - + set(DYCORE_TARGET_CAP_MOD ufsatm_cap_mod PARENT_SCOPE) # These ifdefs need to be turned ON in the dycore. set(use_WRTCOMP ON) @@ -83,7 +111,7 @@ if (FV3) if(INLINE_POST) set(BUILD_POSTEXEC OFF) add_subdirectory(upp) - set(POST_SRC fv3/io/post_nems_routines.F90 fv3/io/post_fv3.F90) + set(POST_SRC io/post_nems_routines.F90 io/post_fv3.F90) list(APPEND _ufsatm_defs_private INLINE_POST) endif() @@ -130,27 +158,15 @@ if (FV3) # FV3 drivers and dependencies add_library(${DYCORE_TARGET} + ufsatm_cap.F90 + ufsatm_util.F90 fv3/atmos_model.F90 - fv3/fv3_cap.F90 fv3/module_fv3_config.F90 fv3/module_fcst_grid_comp.F90 fv3/stochastic_physics/stochastic_physics_wrapper.F90 - cpl/module_block_data.F90 - cpl/module_cplfields.F90 - cpl/module_cap_cpl.F90 - cpl/module_cplscalars.F90 - fv3/io/fv3atm_common_io.F90 - fv3/io/fv3atm_clm_lake_io.F90 - fv3/io/fv3atm_rrfs_sd_io.F90 - fv3/io/fv3atm_sfc_io.F90 - fv3/io/fv3atm_oro_io.F90 - fv3/io/fv3atm_history_io.F90 - fv3/io/fv3atm_restart_io.F90 - fv3/io/module_write_netcdf.F90 - fv3/io/module_write_restart_netcdf.F90 - fv3/io/module_fv3_io_def.F90 - fv3/io/module_write_internal_state.F90 - fv3/io/module_wrt_grid_comp.F90 + ${coupling_srcs} + ${fv3_io_srcs} + ${io_srcs} ${moving_nest_srcs} ${POST_SRC} ) @@ -159,8 +175,75 @@ if (FV3) list(APPEND _ufsatm_defs_private GFS_PHYS INTERNAL_FILE_NML use_WRTCOMP) +else() + remove_definitions(-DFV3) endif() +############################################################################### +### UFSATM with MPAS dynamical core. +############################################################################### +if (MPAS) + add_definitions(-DMPAS) + set(DYCORE_TARGET ${DYCORE_TARGET_MPAS}) + + # Include MPAS Cmake tools. + include(${CMAKE_CURRENT_SOURCE_DIR}/mpas/MPAS-Model/cmake/Functions/MPAS_Functions.cmake) + + # Set any pre-processor directive needed in MPAS dycore. + get_mpas_version(MPAS_VERSION) + set(MPAS_ALL_CORES atmosphere) + set(MPAS_CORES atmosphere CACHE STRING "MPAS cores to build. Options: ${MPAS_ALL_CORES}") + if(MPAS_CORES MATCHES " ") #Convert strings separated with spaces to CMake list separated with ';' + string(REPLACE " " ";" MPAS_CORES ${MPAS_CORES}) + set(MPAS_CORES ${MPAS_CORES} CACHE STRING "MPAS cores to build. Options: ${MPAS_ALL_CORES}" FORCE) + endif() + set(MPAS_CAM_DYCORE TRUE) + set(MPAS_USE_PIO TRUE) + add_definitions(-DMPAS_USE_MPI_F08) + add_definitions(-DMPAS_PIO_SUPPORT) + add_definitions(-DMPAS_CAM_DYCORE) + add_definitions(-DMPAS_UFS_DYCORE) + add_definitions(-DSINGLE_PRECISION) + + # Source files for MPAS dynamical core drivers. + set(MPAS_MAIN_SRC ${CMAKE_CURRENT_SOURCE_DIR}/mpas/MPAS-Model/src/driver/mpas.F) + set(MPAS_SUBDRIVER_SRC ${CMAKE_CURRENT_SOURCE_DIR}/mpas/MPAS-Model/src/driver/mpas_subdriver.F) + + # MPAS dynamical core + add_subdirectory(mpas) + + # MPAS drivers and dependencies + add_library(${DYCORE_TARGET} + ufsatm_cap.F90 + ufsatm_util.F90 + mpas/atmos_model.F90 + mpas/module_mpas_config.F90 + mpas/module_fcst_grid_comp.F90 + mpas/atmos_coupling.F90 + mpas/ufs_mpas_subdriver.F90 + ${coupling_srcs} + ${io_srcs} + ccpp/data/MPAS_typedefs.F90 + ccpp/driver/MPAS_init.F90 + ) + add_dependencies(${DYCORE_TARGET} mpas mpasccpp) + + if(NOT MPAS_GIT_VERSION) + find_package(Git QUIET) + if(GIT_FOUND) + execute_process(COMMAND ${GIT_EXECUTABLE} describe --dirty + WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/mpas/MPAS-Model" + OUTPUT_VARIABLE _mpas_git_version + ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE) + else() + set(_mpas_git_version "Unknown") + endif() + set(MPAS_GIT_VERSION ${_mpas_git_version} CACHE STRING "MPAS-Model git version") + message(STATUS "Setting MPAS_GIT_VERSION ${_mpas_git_version}") + endif() +else() + remove_definitions(-DMPAS) +endif() ############################################################################### ### Link libraries @@ -183,7 +266,11 @@ if (FV3) endif() endif() - +if (MPAS) + target_link_libraries(${DYCORE_TARGET} PUBLIC mpas + mpasccpp + fms) +endif() # Always include EMC libraries in dycore install target_link_libraries(${DYCORE_TARGET} PUBLIC w3emc::w3emc_d diff --git a/README.md b/README.md index 56ef866d11..97251daf3a 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@ -# fv3atm +# ufsatm This repository contains a driver and key subcomponents of the atmospheric component of the NOAA's [Unified Forecast System @@ -7,19 +7,27 @@ atmospheric component of the NOAA's [Unified Forecast System The subcomponents include: - - The Finite-Volume Cubed-Sphere (FV3) dynamical core, originally -from the [Geophysical Fluid Dynamics -Laboratory](https://www.gfdl.noaa.gov/). - - The Common Community Physics Package (CCPP) supported by the +- The Finite-Volume Cubed-Sphere (FV3) dynamical core, originally + from the [Geophysical Fluid Dynamics Laboratory](https://www.gfdl.noaa.gov/). + +- The Model for Prediction Across Scales - Atmosphere (MPAS-A) dynamical + core developed by [NSF-NCAR Mesoscale and Micrometeorology Laboratory (MMM)](https://www.mmm.ucar.edu). + - [MPAS Model](https://github.com/ufs-community/MPAS-Model) + - [MPAS documentation](https://www.mmm.ucar.edu/models/mpas) + +- The Common Community Physics Package (CCPP) supported by the [Developmental Testbed Center (DTC)](https://dtcenter.org/community-code/common-community-physics-package-ccpp), including: - [CCPP Framework](https://github.com/NCAR/ccpp-framework). - [CCPP Physics](https://github.com/NCAR/ccpp-physics) - - wrapper code to call [UFS stochastic + +- wrapper code to call [UFS stochastic physics](https://stochastic-physics.readthedocs.io/en/latest/) - - The io code handles netCDF I/O. - - The cpl coupler code connects the different components and allows + +- The io code handles netCDF I/O. + +- The cpl coupler code connects the different components and allows them to communicate. ## Prerequisites @@ -41,13 +49,13 @@ This package also requires the following external packages: - [ESMF](https://github.com/esmf-org/esmf) - [GFDL's Flexible Modeling System](https://github.com/NOAA-GFDL/FMS) -## Obtaining fv3atm +## Obtaining ufsatm -To obtain fv3atm, clone the git repository, and update the submodules: +To obtain ufsatm, clone the git repository, and update the submodules: ``` -git clone https://github.com/NOAA-EMC/fv3atm.git -cd fv3atm +git clone https://github.com/NOAA-EMC/ufsatm.git +cd ufsatm git submodule update --init --recursive ``` diff --git a/ccpp/CCPP_driver.F90 b/ccpp/CCPP_driver.F90 index 3d4a8e07f4..c71097b079 100644 --- a/ccpp/CCPP_driver.F90 +++ b/ccpp/CCPP_driver.F90 @@ -45,7 +45,7 @@ module CCPP_driver !------------------------------- ! CCPP step !------------------------------- - subroutine CCPP_step (step, nblks, ierr) + subroutine CCPP_step (step, nblks, ierr, dycore) #ifdef _OPENMP use omp_lib @@ -56,8 +56,9 @@ subroutine CCPP_step (step, nblks, ierr) character(len=*), intent(in) :: step integer, intent(in) :: nblks integer, intent(out) :: ierr + character(len=*), intent(in) :: dycore ! Local variables - integer :: nb, nt, ntX + integer :: nb, nt integer :: ierr2 integer :: kdt_iau logical :: iauwindow_center @@ -66,6 +67,7 @@ subroutine CCPP_step (step, nblks, ierr) ierr = 0 + ! CCPP Framework init (same for all dynamical cores) if (trim(step)=="init") then ! Get and set number of OpenMP threads (module @@ -105,7 +107,7 @@ subroutine CCPP_step (step, nblks, ierr) cdata_block(nb,nt)%thrd_cnt = nthrdsX end do end do - + ! Physics init (same for all dynamical cores) else if (trim(step)=="physics_init") then ! Since the physics init step is independent of the blocking structure, @@ -120,7 +122,7 @@ subroutine CCPP_step (step, nblks, ierr) return end if - ! Timestep init = time_vary + ! Timestep init = time_vary (dycore specific) else if (trim(step)=="timestep_init") then ! Since the physics timestep init step is independent of the blocking structure, @@ -135,20 +137,39 @@ subroutine CCPP_step (step, nblks, ierr) return end if - ! call timestep_init for "phys_ps"---required for Land IAU - call ccpp_physics_timestep_init(cdata_domain, suite_name=trim(ccpp_suite),group_name="phys_ps", ierr=ierr) - if (ierr/=0) then - write(0,'(a)') "An error occurred in ccpp_physics_timestep_init for group phys_ps" - write(0,'(a)') trim(cdata_domain%errmsg) - return - end if + if (trim(dycore)=='fv3') then + ! call timestep_init for "phys_ps"---required for Land IAU + call ccpp_physics_timestep_init(cdata_domain, suite_name=trim(ccpp_suite),group_name="phys_ps", ierr=ierr) + if (ierr/=0) then + write(0,'(a)') "An error occurred in ccpp_physics_timestep_init for group phys_ps" + write(0,'(a)') trim(cdata_domain%errmsg) + return + end if + + ! call timestep_init for "phys_ts"---required for Land IAU + call ccpp_physics_timestep_init(cdata_domain, suite_name=trim(ccpp_suite),group_name="phys_ts", ierr=ierr) + if (ierr/=0) then + write(0,'(a)') "An error occurred in ccpp_physics_timestep_init for group phys_ts" + write(0,'(a)') trim(cdata_domain%errmsg) + return + end if + endif - ! call timestep_init for "phys_ts"---required for Land IAU - call ccpp_physics_timestep_init(cdata_domain, suite_name=trim(ccpp_suite),group_name="phys_ts", ierr=ierr) - if (ierr/=0) then - write(0,'(a)') "An error occurred in ccpp_physics_timestep_init for group phys_ts" - write(0,'(a)') trim(cdata_domain%errmsg) - return + if (trim(dycore)=='mpas') then + ! Physics group + call ccpp_physics_timestep_init(cdata_domain, suite_name=trim(ccpp_suite),group_name="physics", ierr=ierr) + if (ierr/=0) then + write(0,'(a)') "An error occurred in ccpp_physics_timestep_init for group physics" + write(0,'(a)') trim(cdata_domain%errmsg) + return + end if + + call ccpp_physics_timestep_init(cdata_domain, suite_name=trim(ccpp_suite),group_name="microphysics", ierr=ierr) + if (ierr/=0) then + write(0,'(a)') "An error occurred in ccpp_physics_timestep_init for group microphysics" + write(0,'(a)') trim(cdata_domain%errmsg) + return + end if end if !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! @@ -187,7 +208,7 @@ subroutine CCPP_step (step, nblks, ierr) !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ! Radiation, physics and and stochastic physics - threaded regions using blocked data structures - else if (trim(step)=="radiation" .or. trim(step)=="physics" .or. trim(step)=="stochastics") then + else if (trim(step)=="radiation" .or. trim(step)=="physics" .or. trim(step)=="stochastics" .or. trim(step)=="microphysics") then ! Set number of threads available to physics schemes to one, ! because threads are used on the host model side for blocking @@ -195,10 +216,10 @@ subroutine CCPP_step (step, nblks, ierr) !$OMP parallel num_threads (nthrds) & !$OMP default (none) & -!$OMP shared (nblks, nthrdsX, non_uniform_blocks, & -!$OMP cdata_block, ccpp_suite, step, & -!$OMP GFS_Control, GFS_Interstitial) & -!$OMP private (nb, nt, ntX, ierr2) & +!$OMP shared (nblks, cdata_block, ccpp_suite, & +!$OMP step, GFS_Control, GFS_Interstitial,& +!$OMP dycore) & +!$OMP private (nb, nt, ierr2) & !$OMP reduction (+:ierr) #ifdef _OPENMP nt = omp_get_thread_num()+1 @@ -207,48 +228,71 @@ subroutine CCPP_step (step, nblks, ierr) #endif !$OMP do schedule (dynamic,1) do nb = 1,nblks - ! For non-uniform blocks/chunks, the last block/chunk has a different (shorter) - ! length than the other blocks/chunks; use special CCPP_Interstitial(nthrdsX) - if (non_uniform_blocks .and. nb==nblks) then - ntX = nthrdsX - else - ntX = nt - end if + ! Allocate physics interstitals for current thread + call GFS_Interstitial(nt)%create(ixs=GFS_control%chunk_begin(nb), ixe=GFS_control%chunk_end(nb), model=GFS_control) !--- Call CCPP radiation/physics/stochastics group if (trim(step)=="physics") then - ! Reset GFS_Interstitial DDT physics fields for this thread - call GFS_Interstitial(ntX)%phys_reset(GFS_control) - ! Process-split physics - call ccpp_physics_run(cdata_block(nb,ntX), suite_name=trim(ccpp_suite), group_name="phys_ps", ierr=ierr2) - if (ierr2/=0) then - write(0,'(2a,3(a,i4),a)') "An error occurred in ccpp_physics_run for group ", "phys_ps", & - ", block/chunk ", nb, " and thread ", nt, " (ntX=", ntX, "):" - write(0,'(a)') trim(cdata_block(nb,ntX)%errmsg) - ierr = ierr + ierr2 - endif - ! Time-split physics - call ccpp_physics_run(cdata_block(nb,ntX), suite_name=trim(ccpp_suite), group_name="phys_ts", ierr=ierr2) - if (ierr2/=0) then - write(0,'(2a,3(a,i4),a)') "An error occurred in ccpp_physics_run for group ", "phys_ts", & - ", block/chunk ", nb, " and thread ", nt, " (ntX=", ntX, "):" - write(0,'(a)') trim(cdata_block(nb,ntX)%errmsg) - ierr = ierr + ierr2 - endif + if (trim(dycore)=="fv3") then + ! Reset GFS_Interstitial DDT fields for this thread + call GFS_Interstitial(nt)%reset(GFS_control) + ! Process-split physics + call ccpp_physics_run(cdata_block(nb,nt), suite_name=trim(ccpp_suite), group_name="phys_ps", ierr=ierr2) + if (ierr2/=0) then + write(0,'(2a,3(a,i4),a)') "An error occurred in ccpp_physics_run for group ", "phys_ps", & + ", block/chunk ", nb, " and thread ", nt, " (nt=", nt, "):" + write(0,'(a)') trim(cdata_block(nb,nt)%errmsg) + ierr = ierr + ierr2 + endif + ! Time-split physics + call ccpp_physics_run(cdata_block(nb,nt), suite_name=trim(ccpp_suite), group_name="phys_ts", ierr=ierr2) + if (ierr2/=0) then + write(0,'(2a,3(a,i4),a)') "An error occurred in ccpp_physics_run for group ", "phys_ts", & + ", block/chunk ", nb, " and thread ", nt, " (nt=", nt, "):" + write(0,'(a)') trim(cdata_block(nb,nt)%errmsg) + ierr = ierr + ierr2 + endif + endif + if (trim(dycore)=="mpas") then + ! Physics + call ccpp_physics_run(cdata_block(nb,nt), suite_name=trim(ccpp_suite), group_name="physics", ierr=ierr2) + if (ierr2/=0) then + write(0,'(2a,3(a,i4),a)') "An error occurred in ccpp_physics_run for group ", "physics", & + ", block/chunk ", nb, " and thread ", nt, " (nt=", nt, "):" + write(0,'(a)') trim(cdata_block(nb,nt)%errmsg) + ierr = ierr + ierr2 + endif + endif else - if (trim(step)=="radiation") then - ! Reset GFS_Interstitial DDT radiation fields for this thread - call GFS_Interstitial(ntX)%rad_reset(GFS_control) - end if - ! Radiation - call ccpp_physics_run(cdata_block(nb,ntX), suite_name=trim(ccpp_suite), group_name=trim(step), ierr=ierr2) - if (ierr2/=0) then - write(0,'(2a,3(a,i4),a)') "An error occurred in ccpp_physics_run for group ", trim(step), & - ", block/chunk ", nb, " and thread ", nt, " (ntX=", ntX, "):" - write(0,'(a)') trim(cdata_block(nb,ntX)%errmsg) - ierr = ierr + ierr2 - endif - end if - end do + if (trim(step)=="radiation") then + ! Reset GFS_Interstitial DDT fields for this thread + call GFS_Interstitial(nt)%reset(GFS_control) + endif + ! Radiation + call ccpp_physics_run(cdata_block(nb,nt), suite_name=trim(ccpp_suite), group_name=trim(step), ierr=ierr2) + if (ierr2/=0) then + write(0,'(2a,3(a,i4),a)') "An error occurred in ccpp_physics_run for group ", trim(step), & + ", block/chunk ", nb, " and thread ", nt, " (nt=", nt, "):" + write(0,'(a)') trim(cdata_block(nb,nt)%errmsg) + ierr = ierr + ierr2 + endif + ! Microphysics (MPAS only) + if (trim(step)=="microphysics") then + if (trim(dycore)=="mpas") then + call ccpp_physics_run(cdata_block(nb,nt), suite_name=trim(ccpp_suite), group_name="microphysics", ierr=ierr2) + if (ierr2/=0) then + write(0,'(2a,3(a,i4),a)') "An error occurred in ccpp_physics_run for group ", "microphysics", & + ", block/chunk ", nb, " and thread ", nt, " (nt=", nt, "):" + write(0,'(a)') trim(cdata_block(nb,nt)%errmsg) + ierr = ierr + ierr2 + endif + else + write(0,'(a)') "An error occurred in ccpp_physics_run for group microphysics. Group microphysics only valid with MPAS dycore." + ierr = ierr + 1 + endif + endif + endif + call GFS_Interstitial(nt)%destroy(GFS_control) + end do !$OMP end do !$OMP end parallel @@ -269,23 +313,40 @@ subroutine CCPP_step (step, nblks, ierr) return end if - ! call timestep_finalize for "phys_ps"---required for Land IAU - call ccpp_physics_timestep_finalize(cdata_domain, suite_name=trim(ccpp_suite), group_name="phys_ps", ierr=ierr) - if (ierr/=0) then - write(0,'(a)') "An error occurred in ccpp_physics_timestep_finalize for group phys_ps" - write(0,'(a)') trim(cdata_domain%errmsg) - return - end if - - ! call timestep_finalize for "phys_ts"---required for Land IAU - call ccpp_physics_timestep_finalize(cdata_domain, suite_name=trim(ccpp_suite), group_name="phys_ts", ierr=ierr) - if (ierr/=0) then - write(0,'(a)') "An error occurred in ccpp_physics_timestep_finalize for group phys_ts" - write(0,'(a)') trim(cdata_domain%errmsg) - return - end if + if (trim(dycore)=='fv3') then + ! call timestep_finalize for "phys_ps"---required for Land IAU + call ccpp_physics_timestep_finalize(cdata_domain, suite_name=trim(ccpp_suite), group_name="phys_ps", ierr=ierr) + if (ierr/=0) then + write(0,'(a)') "An error occurred in ccpp_physics_timestep_finalize for group phys_ps" + write(0,'(a)') trim(cdata_domain%errmsg) + return + end if + + ! call timestep_finalize for "phys_ts"---required for Land IAU + call ccpp_physics_timestep_finalize(cdata_domain, suite_name=trim(ccpp_suite), group_name="phys_ts", ierr=ierr) + if (ierr/=0) then + write(0,'(a)') "An error occurred in ccpp_physics_timestep_finalize for group phys_ts" + write(0,'(a)') trim(cdata_domain%errmsg) + return + end if + endif + if (trim(dycore)=='mpas') then + call ccpp_physics_timestep_finalize(cdata_domain, suite_name=trim(ccpp_suite), group_name="physics", ierr=ierr) + if (ierr/=0) then + write(0,'(a)') "An error occurred in ccpp_physics_timestep_finalize for group physics" + write(0,'(a)') trim(cdata_domain%errmsg) + return + end if + + call ccpp_physics_timestep_finalize(cdata_domain, suite_name=trim(ccpp_suite), group_name="microphysics", ierr=ierr) + if (ierr/=0) then + write(0,'(a)') "An error occurred in ccpp_physics_timestep_finalize for group microphysics" + write(0,'(a)') trim(cdata_domain%errmsg) + return + end if + endif - ! Physics finalize + ! Physics finalize (same for all dynamical cores) else if (trim(step)=="physics_finalize") then ! Since the physics finalize step is independent of the blocking structure, @@ -300,7 +361,7 @@ subroutine CCPP_step (step, nblks, ierr) return end if - ! Finalize + ! Finalize (same for all dynamical cores) else if (trim(step)=="finalize") then ! Deallocate cdata structure for blocks and threads if (allocated(cdata_block)) deallocate(cdata_block) diff --git a/ccpp/CMakeLists.txt b/ccpp/CMakeLists.txt index f0554c57ed..5eba68b73f 100644 --- a/ccpp/CMakeLists.txt +++ b/ccpp/CMakeLists.txt @@ -1,27 +1,41 @@ -cmake_minimum_required(VERSION 3.0) +cmake_minimum_required(VERSION 3.19) project(CCPP-UFS LANGUAGES C CXX Fortran) set(PROJECT "CCPP-UFS") +#------------------------------------------------------------------------------ +# Which dycore are we coupling the CCPP to? +#------------------------------------------------------------------------------ + #------------------------------------------------------------------------------ # FV3 dynamical core #------------------------------------------------------------------------------ if (FV3) message(STATUS "Build CCPP interface to FV3 dynamical core") set(CCPP_TARGET fv3ccpp) - set(CCPP_PREBUILD_CONFIG config/ccpp_prebuild_config.py) + set(CCPP_PREBUILD_CONFIG config/ccpp_prebuild_config_fv3.py) set(DYCORE_CCPP_SRCS driver/GFS_diagnostics.F90 driver/GFS_restart.F90 driver/GFS_init.F90 CCPP_driver.F90 ) - #list(TRANSFORM DYCORE_CCPP_SRCS PREPEND ../ccpp/driver/) # Add dycore-specific preprocessor flag (needed for some physics schemes) add_definitions(-DFV3) +endif() +#------------------------------------------------------------------------------ +# MPAS dynamical core +#------------------------------------------------------------------------------ +if (MPAS) + message(STATUS "Build CCPP interface to MPAS dynamical core") + set(CCPP_TARGET mpasccpp) + set(CCPP_PREBUILD_CONFIG config/ccpp_prebuild_config_mpas.py) + set(DYCORE_CCPP_SRCS + CCPP_driver.F90 + ) endif() #------------------------------------------------------------------------------ @@ -98,6 +112,22 @@ else(CCPP_32BIT) endif() endif(CCPP_32BIT) +#------------------------------------------------------------------------------ +# Set flags for rte-rrtmgp radiation +if(RRTMGP_32BIT) + message(STATUS "Compile CCPP RTE-RRTMGP with 32-bit precision") + add_definitions(-DRTE_USE_SP) + if(CMAKE_Fortran_COMPILER_ID MATCHES "Intel") + set(CMAKE_Fortran_FLAGS_RTERRTMGP "-real-size 32") + elseif(CMAKE_Fortran_COMPILER_ID MATCHES "GNU") + set(CMAKE_Fortran_FLAGS_RTERRTMGP "-fno-default-real-8 -fdefault-double-8") + endif() +else() + message(STATUS "Compile CCPP RTE-RRTMGP with 64-bit precision") + remove_definitions(-DRTE_USE_SP) + set(CMAKE_Fortran_FLAGS_RTERRTMGP ${CMAKE_Fortran_FLAGS_PHYSICS}) +endif() + #------------------------------------------------------------------------------ # Add model-specific flags for C/C++/Fortran preprocessor if(NOT HYDRO) @@ -115,6 +145,7 @@ endif() #------------------------------------------------------------------------------ # Build CCPP framework and physics +set(BUILD_SHARED_LIBS OFF) add_subdirectory(framework) add_subdirectory(physics) @@ -130,7 +161,7 @@ add_library( # Compile GFS_diagnostics.F90 without optimization, this leads to out of memory errors on wcoss_dell_p3 if (FV3) - set_property(SOURCE ../fv3/ccpp/driver/GFS_diagnostics.F90 APPEND_STRING PROPERTY COMPILE_FLAGS "-O0") + set_property(SOURCE driver/GFS_diagnostics.F90 APPEND_STRING PROPERTY COMPILE_FLAGS "-O0") endif() target_link_libraries(${CCPP_TARGET} PUBLIC ccpp_framework) diff --git a/ccpp/config/ccpp_prebuild_config.py b/ccpp/config/ccpp_prebuild_config_fv3.py similarity index 98% rename from ccpp/config/ccpp_prebuild_config.py rename to ccpp/config/ccpp_prebuild_config_fv3.py index 04a45711cd..968c48a772 100755 --- a/ccpp/config/ccpp_prebuild_config.py +++ b/ccpp/config/ccpp_prebuild_config_fv3.py @@ -104,7 +104,7 @@ 'physics/physics/Interstitials/UFS_SCM_NEPTUNE/GFS_photochemistry.F90', 'physics/physics/Interstitials/UFS_SCM_NEPTUNE/GFS_rad_time_vary.fv3.F90', 'physics/physics/Interstitials/UFS_SCM_NEPTUNE/GFS_radiation_surface.F90', - 'physics/physics/Interstitials/UFS_SCM_NEPTUNE/GFS_rrtmg_post.F90', + 'physics/physics/Interstitials/UFS_SCM_NEPTUNE/GFS_radiation_post.F90', 'physics/physics/Interstitials/UFS_SCM_NEPTUNE/GFS_rrtmg_pre.F90', 'physics/physics/Interstitials/UFS_SCM_NEPTUNE/GFS_rrtmg_setup.F90', 'physics/physics/Interstitials/UFS_SCM_NEPTUNE/GFS_rrtmgp_setup.F90', @@ -112,7 +112,6 @@ 'physics/physics/Interstitials/UFS_SCM_NEPTUNE/GFS_cloud_diagnostics.F90', 'physics/physics/Interstitials/UFS_SCM_NEPTUNE/GFS_rrtmgp_cloud_mp.F90', 'physics/physics/Interstitials/UFS_SCM_NEPTUNE/GFS_rrtmgp_cloud_overlap.F90', - 'physics/physics/Interstitials/UFS_SCM_NEPTUNE/GFS_rrtmgp_post.F90', 'physics/physics/Interstitials/UFS_SCM_NEPTUNE/GFS_stochastics.F90', 'physics/physics/Interstitials/UFS_SCM_NEPTUNE/GFS_suite_interstitial_1.F90', 'physics/physics/Interstitials/UFS_SCM_NEPTUNE/GFS_suite_interstitial_2.F90', diff --git a/ccpp/config/ccpp_prebuild_config_mpas.py b/ccpp/config/ccpp_prebuild_config_mpas.py new file mode 100755 index 0000000000..0403c714a4 --- /dev/null +++ b/ccpp/config/ccpp_prebuild_config_mpas.py @@ -0,0 +1,158 @@ +#!/usr/bin/env python + +# CCPP prebuild config for MPAS - Model for Prediction Across Scales + + +############################################################################### +# Definitions # +############################################################################### + +HOST_MODEL_IDENTIFIER = "MPAS" + +# Add all files with metadata tables on the host model side and in CCPP, +# relative to basedir = top-level directory of host model. This includes +# kind and type definitions used in CCPP physics. Also add any internal +# dependencies of these files to the list. +VARIABLE_DEFINITION_FILES = [ + # actual variable definition files + 'framework/src/ccpp_types.F90', + 'physics/physics/hooks/machine.F', + 'physics/physics/Radiation/RRTMG/radsw_param.f', + 'physics/physics/Radiation/RRTMG/radlw_param.f', + 'physics/physics/photochem/module_ozphys.F90', + 'physics/physics/MP/TEMPO/TEMPO/module_mp_tempo_params.F90', + 'physics/physics/photochem/module_h2ophys.F90', + 'physics/physics/SFC_Models/Land/Noahmp/lnd_iau_mod.F90', + '../ccpp/data/CCPP_typedefs.F90', + '../ccpp/data/GFS_typedefs.F90', + '../ccpp/data/MPAS_typedefs.F90', + '../ccpp/data/CCPP_data.F90' + ] + +TYPEDEFS_NEW_METADATA = { + 'ccpp_types' : { + 'ccpp_t' : 'cdata', + 'MPI_Comm' : '', + 'ccpp_types' : '', + }, + 'machine' : { + 'machine' : '', + }, + 'module_radlw_parameters' : { + 'module_radsw_parameters' : '', + }, + 'module_radlw_parameters' : { + 'module_radlw_parameters' : '', + }, + 'module_ozphys' : { + 'module_ozphys' : '', + 'ty_ozphys' : '', + }, + 'module_mp_tempo_params' : { + 'module_mp_tempo_params' : '', + 'ty_tempo_cfg' : '', + }, + 'module_h2ophys' : { + 'module_h2ophys' : '', + 'ty_h2ophys' : '', + }, + 'land_iau_mod' : { + 'land_iau_mod' : '', + 'land_iau_external_data_type' : '', + 'land_iau_state_type' : '', + 'land_iau_control_type' : '', + }, + 'CCPP_typedefs' : { + 'GFS_interstitial_type' : 'GFS_Interstitial(cdata%thrd_no)', + 'GFDL_interstitial_type' : 'GFDL_interstitial', + 'CCPP_typedefs' : '', + }, + 'CCPP_data' : { + 'CCPP_data' : '', + }, + 'MPAS_typedefs' : { + 'MPAS_typedefs' : '', + }, + 'GFS_typedefs' : { + 'GFS_control_type' : 'GFS_Control', + 'GFS_statein_type' : 'GFS_Statein', + 'GFS_stateout_type' : 'GFS_Stateout', + 'GFS_grid_type' : 'GFS_Grid', + 'GFS_tbd_type' : 'GFS_Tbd', + 'GFS_cldprop_type' : 'GFS_Cldprop', + 'GFS_sfcprop_type' : 'GFS_Sfcprop', + 'GFS_radtend_type' : 'GFS_Radtend', + 'GFS_coupling_type' : 'GFS_Coupling', + 'GFS_diag_type' : 'GFS_Intdiag', + 'GFS_typedefs' : '', + }, + } + +# Add all physics scheme files relative to basedir +SCHEME_FILES = [ + # Relative path to source (from where ccpp_prebuild.py is called) : [ list of physics sets in which scheme may be called ]; + # current restrictions are that each scheme can only belong to one physics set, and all schemes within one group in the + # suite definition file have to belong to the same physics set + 'physics/physics/Interstitials/UFS_SCM_NEPTUNE/GFS_phys_time_vary.fv3.F90', + 'physics/physics/Interstitials/UFS_SCM_NEPTUNE/GFS_rad_time_vary.mpas.F90', + 'physics/physics/Interstitials/UFS_SCM_NEPTUNE/GFS_time_vary_pre.fv3.F90', + 'physics/physics/Interstitials/UFS_SCM_NEPTUNE/GFS_radiation_surface.F90', + 'physics/physics/Interstitials/UFS_SCM_NEPTUNE/GFS_radiation_post.F90', + 'physics/physics/Interstitials/UFS_SCM_NEPTUNE/GFS_rrtmg_pre.F90', + 'physics/physics/Interstitials/UFS_SCM_NEPTUNE/GFS_rrtmg_setup.F90', + 'physics/physics/Interstitials/UFS_SCM_NEPTUNE/GFS_rrtmgp_setup.F90', + 'physics/physics/Interstitials/UFS_SCM_NEPTUNE/GFS_rrtmgp_pre.F90', + 'physics/physics/Interstitials/UFS_SCM_NEPTUNE/GFS_cloud_diagnostics.F90', + 'physics/physics/Interstitials/UFS_SCM_NEPTUNE/GFS_rrtmgp_cloud_mp.F90', + 'physics/physics/Interstitials/UFS_SCM_NEPTUNE/GFS_rrtmgp_cloud_overlap.F90', + 'physics/physics/Radiation/RRTMG/radsw_main.F90', + 'physics/physics/Radiation/RRTMG/radlw_main.F90', + 'physics/physics/Radiation/RRTMG/rrtmg_lw_post.F90', + 'physics/physics/Radiation/RRTMG/rrtmg_sw_post.F90', + 'physics/physics/Radiation/RRTMG/rad_sw_pre.F90', + 'physics/physics/Radiation/RRTMGP/rrtmgp_aerosol_optics.F90', + 'physics/physics/Radiation/RRTMGP/rrtmgp_lw_main.F90', + 'physics/physics/Radiation/RRTMGP/rrtmgp_sw_main.F90', + 'physics/physics/photochem/module_h2ophys.F90', + 'physics/physics/photochem/module_ozphys.F90' +] + +# Default build dir, relative to current working directory, +# if not specified as command-line argument +DEFAULT_BUILD_DIR = 'build' + +# Auto-generated makefile/cmakefile snippets that contain all type definitions +TYPEDEFS_MAKEFILE = '{build_dir}/physics/CCPP_TYPEDEFS.mk' +TYPEDEFS_CMAKEFILE = '{build_dir}/physics/CCPP_TYPEDEFS.cmake' +TYPEDEFS_SOURCEFILE = '{build_dir}/physics/CCPP_TYPEDEFS.sh' + +# Auto-generated makefile/cmakefile snippets that contain all schemes +SCHEMES_MAKEFILE = '{build_dir}/physics/CCPP_SCHEMES.mk' +SCHEMES_CMAKEFILE = '{build_dir}/physics/CCPP_SCHEMES.cmake' +SCHEMES_SOURCEFILE = '{build_dir}/physics/CCPP_SCHEMES.sh' + +# Auto-generated makefile/cmakefile snippets that contain all caps +CAPS_MAKEFILE = '{build_dir}/physics/CCPP_CAPS.mk' +CAPS_CMAKEFILE = '{build_dir}/physics/CCPP_CAPS.cmake' +CAPS_SOURCEFILE = '{build_dir}/physics/CCPP_CAPS.sh' + +# Directory where to put all auto-generated physics caps +CAPS_DIR = '{build_dir}/physics' + +# Directory where the suite definition files are stored +SUITES_DIR = '../ccpp/suites' + +# Directory where to write static API to +STATIC_API_DIR = '{build_dir}/physics' +STATIC_API_CMAKEFILE = '{build_dir}/physics/CCPP_STATIC_API.cmake' +STATIC_API_SOURCEFILE = '{build_dir}/physics/CCPP_STATIC_API.sh' + +# Directory for writing HTML pages generated from metadata files +# used by metadata2html.py for generating scientific documentation +METADATA_HTML_OUTPUT_DIR = '{build_dir}/physics/physics/docs' + +# HTML document containing the model-defined CCPP variables +HTML_VARTABLE_FILE = '{build_dir}/physics/CCPP_VARIABLES_MPAS.html' + +# LaTeX document containing the provided vs requested CCPP variables +LATEX_VARTABLE_FILE = '{build_dir}/framework/doc/DevelopersGuide/CCPP_VARIABLES_MPAS.tex' diff --git a/ccpp/data/CCPP_typedefs.F90 b/ccpp/data/CCPP_typedefs.F90 index d9aaebf3b2..8c18aa481c 100644 --- a/ccpp/data/CCPP_typedefs.F90 +++ b/ccpp/data/CCPP_typedefs.F90 @@ -89,7 +89,6 @@ module CCPP_typedefs real (kind=kind_phys), pointer :: cnv_ndrop(:,:) => null() !< real (kind=kind_phys), pointer :: cnv_nice(:,:) => null() !< real (kind=kind_phys), pointer :: cnvc(:,:) => null() !< - real (kind=kind_phys), pointer :: cnvw(:,:) => null() !< real (kind=kind_phys), pointer :: ctei_r(:) => null() !< real (kind=kind_phys), pointer :: ctei_rml(:) => null() !< real (kind=kind_phys), pointer :: cumabs(:) => null() !< @@ -413,8 +412,8 @@ module CCPP_typedefs contains procedure :: create => gfs_interstitial_create !< allocate array data - procedure :: rad_reset => gfs_interstitial_rad_reset !< reset array data for radiation - procedure :: phys_reset => gfs_interstitial_phys_reset !< reset array data for physics + procedure :: destroy => gfs_interstitial_destroy !< deallocate array data + procedure :: reset => gfs_interstitial_reset !< reset array data end type GFS_interstitial_type @@ -499,12 +498,12 @@ module CCPP_typedefs ! GFS_interstitial_type !---------------------- - subroutine gfs_interstitial_create (Interstitial, IM, Model) + subroutine gfs_interstitial_create (Interstitial, ixs, ixe, Model) ! implicit none ! class(GFS_interstitial_type) :: Interstitial - integer, intent(in) :: IM + integer, intent(in) :: ixs, ixe type(GFS_control_type), intent(in) :: Model integer :: iGas ! @@ -514,344 +513,343 @@ subroutine gfs_interstitial_create (Interstitial, IM, Model) ! Interstitial%{nvdiff,mg3_as_mg2,nn,tracers_total,ntcwx,ntiwx,ntk,ntkev,otspt,nsamftrac,ncstrac,nscav} call gfs_interstitial_setup_tracers(Interstitial, Model) ! Allocate arrays - allocate (Interstitial%adjsfculw_land (IM)) - allocate (Interstitial%adjsfculw_ice (IM)) - allocate (Interstitial%adjsfculw_water (IM)) - allocate (Interstitial%adjnirbmd (IM)) - allocate (Interstitial%adjnirbmu (IM)) - allocate (Interstitial%adjnirdfd (IM)) - allocate (Interstitial%adjnirdfu (IM)) - allocate (Interstitial%adjvisbmd (IM)) - allocate (Interstitial%adjvisbmu (IM)) - allocate (Interstitial%adjvisdfu (IM)) - allocate (Interstitial%adjvisdfd (IM)) - allocate (Interstitial%aerodp (IM,NSPC1)) - allocate (Interstitial%alb1d (IM)) + allocate (Interstitial%adjsfculw_land (ixs:ixe)) + allocate (Interstitial%adjsfculw_ice (ixs:ixe)) + allocate (Interstitial%adjsfculw_water (ixs:ixe)) + allocate (Interstitial%adjnirbmd (ixs:ixe)) + allocate (Interstitial%adjnirbmu (ixs:ixe)) + allocate (Interstitial%adjnirdfd (ixs:ixe)) + allocate (Interstitial%adjnirdfu (ixs:ixe)) + allocate (Interstitial%adjvisbmd (ixs:ixe)) + allocate (Interstitial%adjvisbmu (ixs:ixe)) + allocate (Interstitial%adjvisdfu (ixs:ixe)) + allocate (Interstitial%adjvisdfd (ixs:ixe)) + allocate (Interstitial%aerodp (ixs:ixe,NSPC1)) + allocate (Interstitial%alb1d (ixs:ixe)) if (.not. Model%do_RRTMGP) then ! RRTMGP uses its own cloud_overlap_param - allocate (Interstitial%alpha (IM,Model%levr+LTP)) + allocate (Interstitial%alpha (ixs:ixe,Model%levr+LTP)) end if - allocate (Interstitial%bexp1d (IM)) - allocate (Interstitial%cd (IM)) - allocate (Interstitial%cd_ice (IM)) - allocate (Interstitial%cd_land (IM)) - allocate (Interstitial%cd_water (IM)) - allocate (Interstitial%cdq (IM)) - allocate (Interstitial%cdq_ice (IM)) - allocate (Interstitial%cdq_land (IM)) - allocate (Interstitial%cdq_water (IM)) - allocate (Interstitial%chh_ice (IM)) - allocate (Interstitial%chh_land (IM)) - allocate (Interstitial%chh_water (IM)) - allocate (Interstitial%cldf (IM)) - allocate (Interstitial%cldsa (IM,5)) - allocate (Interstitial%cldtaulw (IM,Model%levr+LTP)) - allocate (Interstitial%cldtausw (IM,Model%levr+LTP)) - allocate (Interstitial%cld1d (IM)) - allocate (Interstitial%clouds (IM,Model%levr+LTP,NF_CLDS)) - allocate (Interstitial%clw (IM,Model%levs,Interstitial%nn)) - allocate (Interstitial%clx (IM,4)) - allocate (Interstitial%cmm_ice (IM)) - allocate (Interstitial%cmm_land (IM)) - allocate (Interstitial%cmm_water (IM)) - allocate (Interstitial%cnvc (IM,Model%levs)) - allocate (Interstitial%cnvw (IM,Model%levs)) - allocate (Interstitial%ctei_r (IM)) - allocate (Interstitial%ctei_rml (IM)) - allocate (Interstitial%cumabs (IM)) - allocate (Interstitial%dd_mf (IM,Model%levs)) - allocate (Interstitial%de_lgth (IM)) - allocate (Interstitial%del (IM,Model%levs)) - allocate (Interstitial%del_gz (IM,Model%levs+1)) - allocate (Interstitial%delr (IM,Model%levr+LTP)) - allocate (Interstitial%dlength (IM)) - allocate (Interstitial%dqdt (IM,Model%levs,Model%ntrac)) - allocate (Interstitial%dqsfc1 (IM)) - allocate (Interstitial%drain (IM)) - allocate (Interstitial%dtdt (IM,Model%levs)) - allocate (Interstitial%dtsfc1 (IM)) - allocate (Interstitial%dt_mf (IM,Model%levs)) - allocate (Interstitial%dtzm (IM)) - allocate (Interstitial%dudt (IM,Model%levs)) - allocate (Interstitial%dusfcg (IM)) - allocate (Interstitial%dusfc1 (IM)) - allocate (Interstitial%dvdt (IM,Model%levs)) - allocate (Interstitial%dvsfcg (IM)) - allocate (Interstitial%dvsfc1 (IM)) - allocate (Interstitial%dvdftra (IM,Model%levs,Interstitial%nvdiff)) - allocate (Interstitial%dzlyr (IM,Model%levr+LTP)) - allocate (Interstitial%elvmax (IM)) - allocate (Interstitial%ep1d (IM)) - allocate (Interstitial%ep1d_ice (IM)) - allocate (Interstitial%ep1d_land (IM)) - allocate (Interstitial%ep1d_water (IM)) - allocate (Interstitial%evap_ice (IM)) - allocate (Interstitial%evap_land (IM)) - allocate (Interstitial%evap_water (IM)) - allocate (Interstitial%evbs (IM)) - allocate (Interstitial%evcw (IM)) - allocate (Interstitial%pah (IM)) - allocate (Interstitial%ecan (IM)) - allocate (Interstitial%etran (IM)) - allocate (Interstitial%edir (IM)) - allocate (Interstitial%faerlw (IM,Model%levr+LTP,NBDLW,NF_AELW)) - allocate (Interstitial%faersw (IM,Model%levr+LTP,NBDSW,NF_AESW)) - allocate (Interstitial%ffhh_ice (IM)) - allocate (Interstitial%ffhh_land (IM)) - allocate (Interstitial%ffhh_water (IM)) - allocate (Interstitial%fh2 (IM)) - allocate (Interstitial%fh2_ice (IM)) - allocate (Interstitial%fh2_land (IM)) - allocate (Interstitial%fh2_water (IM)) - allocate (Interstitial%flag_cice (IM)) - allocate (Interstitial%flag_guess (IM)) - allocate (Interstitial%flag_iter (IM)) - allocate (Interstitial%flag_lakefreeze (IM)) - allocate (Interstitial%ffmm_ice (IM)) - allocate (Interstitial%ffmm_land (IM)) - allocate (Interstitial%ffmm_water (IM)) - allocate (Interstitial%fm10 (IM)) - allocate (Interstitial%fm10_ice (IM)) - allocate (Interstitial%fm10_land (IM)) - allocate (Interstitial%fm10_water (IM)) - allocate (Interstitial%frland (IM)) + allocate (Interstitial%bexp1d (ixs:ixe)) + allocate (Interstitial%cd (ixs:ixe)) + allocate (Interstitial%cd_ice (ixs:ixe)) + allocate (Interstitial%cd_land (ixs:ixe)) + allocate (Interstitial%cd_water (ixs:ixe)) + allocate (Interstitial%cdq (ixs:ixe)) + allocate (Interstitial%cdq_ice (ixs:ixe)) + allocate (Interstitial%cdq_land (ixs:ixe)) + allocate (Interstitial%cdq_water (ixs:ixe)) + allocate (Interstitial%chh_ice (ixs:ixe)) + allocate (Interstitial%chh_land (ixs:ixe)) + allocate (Interstitial%chh_water (ixs:ixe)) + allocate (Interstitial%cldf (ixs:ixe)) + allocate (Interstitial%cldsa (ixs:ixe,5)) + allocate (Interstitial%cldtaulw (ixs:ixe,Model%levr+LTP)) + allocate (Interstitial%cldtausw (ixs:ixe,Model%levr+LTP)) + allocate (Interstitial%cld1d (ixs:ixe)) + allocate (Interstitial%clouds (ixs:ixe,Model%levr+LTP,NF_CLDS)) + allocate (Interstitial%clw (ixs:ixe,Model%levs,Interstitial%nn)) + allocate (Interstitial%clx (ixs:ixe,4)) + allocate (Interstitial%cmm_ice (ixs:ixe)) + allocate (Interstitial%cmm_land (ixs:ixe)) + allocate (Interstitial%cmm_water (ixs:ixe)) + allocate (Interstitial%cnvc (ixs:ixe,Model%levs)) + allocate (Interstitial%ctei_r (ixs:ixe)) + allocate (Interstitial%ctei_rml (ixs:ixe)) + allocate (Interstitial%cumabs (ixs:ixe)) + allocate (Interstitial%dd_mf (ixs:ixe,Model%levs)) + allocate (Interstitial%de_lgth (ixs:ixe)) + allocate (Interstitial%del (ixs:ixe,Model%levs)) + allocate (Interstitial%del_gz (ixs:ixe,Model%levs+1)) + allocate (Interstitial%delr (ixs:ixe,Model%levr+LTP)) + allocate (Interstitial%dlength (ixs:ixe)) + allocate (Interstitial%dqdt (ixs:ixe,Model%levs,Model%ntrac)) + allocate (Interstitial%dqsfc1 (ixs:ixe)) + allocate (Interstitial%drain (ixs:ixe)) + allocate (Interstitial%dtdt (ixs:ixe,Model%levs)) + allocate (Interstitial%dtsfc1 (ixs:ixe)) + allocate (Interstitial%dt_mf (ixs:ixe,Model%levs)) + allocate (Interstitial%dtzm (ixs:ixe)) + allocate (Interstitial%dudt (ixs:ixe,Model%levs)) + allocate (Interstitial%dusfcg (ixs:ixe)) + allocate (Interstitial%dusfc1 (ixs:ixe)) + allocate (Interstitial%dvdt (ixs:ixe,Model%levs)) + allocate (Interstitial%dvsfcg (ixs:ixe)) + allocate (Interstitial%dvsfc1 (ixs:ixe)) + allocate (Interstitial%dvdftra (ixs:ixe,Model%levs,Interstitial%nvdiff)) + allocate (Interstitial%dzlyr (ixs:ixe,Model%levr+LTP)) + allocate (Interstitial%elvmax (ixs:ixe)) + allocate (Interstitial%ep1d (ixs:ixe)) + allocate (Interstitial%ep1d_ice (ixs:ixe)) + allocate (Interstitial%ep1d_land (ixs:ixe)) + allocate (Interstitial%ep1d_water (ixs:ixe)) + allocate (Interstitial%evap_ice (ixs:ixe)) + allocate (Interstitial%evap_land (ixs:ixe)) + allocate (Interstitial%evap_water (ixs:ixe)) + allocate (Interstitial%evbs (ixs:ixe)) + allocate (Interstitial%evcw (ixs:ixe)) + allocate (Interstitial%pah (ixs:ixe)) + allocate (Interstitial%ecan (ixs:ixe)) + allocate (Interstitial%etran (ixs:ixe)) + allocate (Interstitial%edir (ixs:ixe)) + allocate (Interstitial%faerlw (ixs:ixe,Model%levr+LTP,NBDLW,NF_AELW)) + allocate (Interstitial%faersw (ixs:ixe,Model%levr+LTP,NBDSW,NF_AESW)) + allocate (Interstitial%ffhh_ice (ixs:ixe)) + allocate (Interstitial%ffhh_land (ixs:ixe)) + allocate (Interstitial%ffhh_water (ixs:ixe)) + allocate (Interstitial%fh2 (ixs:ixe)) + allocate (Interstitial%fh2_ice (ixs:ixe)) + allocate (Interstitial%fh2_land (ixs:ixe)) + allocate (Interstitial%fh2_water (ixs:ixe)) + allocate (Interstitial%flag_cice (ixs:ixe)) + allocate (Interstitial%flag_guess (ixs:ixe)) + allocate (Interstitial%flag_iter (ixs:ixe)) + allocate (Interstitial%flag_lakefreeze (ixs:ixe)) + allocate (Interstitial%ffmm_ice (ixs:ixe)) + allocate (Interstitial%ffmm_land (ixs:ixe)) + allocate (Interstitial%ffmm_water (ixs:ixe)) + allocate (Interstitial%fm10 (ixs:ixe)) + allocate (Interstitial%fm10_ice (ixs:ixe)) + allocate (Interstitial%fm10_land (ixs:ixe)) + allocate (Interstitial%fm10_water (ixs:ixe)) + allocate (Interstitial%frland (ixs:ixe)) allocate (Interstitial%fscav (Interstitial%nscav)) allocate (Interstitial%fswtr (Interstitial%nscav)) - allocate (Interstitial%gabsbdlw (IM)) - allocate (Interstitial%gabsbdlw_ice (IM)) - allocate (Interstitial%gabsbdlw_land (IM)) - allocate (Interstitial%gabsbdlw_water (IM)) - allocate (Interstitial%gamma (IM)) - allocate (Interstitial%gamq (IM)) - allocate (Interstitial%gamt (IM)) - allocate (Interstitial%gasvmr (IM,Model%levr+LTP,NF_VGAS)) - allocate (Interstitial%gflx (IM)) - allocate (Interstitial%gflx_ice (IM)) - allocate (Interstitial%gflx_land (IM)) - allocate (Interstitial%gflx_water (IM)) - allocate (Interstitial%gwdcu (IM,Model%levs)) - allocate (Interstitial%gwdcv (IM,Model%levs)) - allocate (Interstitial%zvfun (IM)) - allocate (Interstitial%hffac (IM)) - allocate (Interstitial%hflxq (IM)) - allocate (Interstitial%hflx_ice (IM)) - allocate (Interstitial%hflx_land (IM)) - allocate (Interstitial%hflx_water (IM)) - allocate (Interstitial%htlwc (IM,Model%levr+LTP)) - allocate (Interstitial%htlw0 (IM,Model%levr+LTP)) - allocate (Interstitial%htswc (IM,Model%levr+LTP)) - allocate (Interstitial%htsw0 (IM,Model%levr+LTP)) - allocate (Interstitial%dry (IM)) - allocate (Interstitial%idxday (IM)) - allocate (Interstitial%icy (IM)) - allocate (Interstitial%lake (IM)) - allocate (Interstitial%ocean (IM)) - allocate (Interstitial%islmsk (IM)) - allocate (Interstitial%islmsk_cice (IM)) - allocate (Interstitial%wet (IM)) - allocate (Interstitial%kbot (IM)) - allocate (Interstitial%kcnv (IM)) - allocate (Interstitial%kinver (IM)) - allocate (Interstitial%kpbl (IM)) - allocate (Interstitial%ktop (IM)) - allocate (Interstitial%mbota (IM,3)) - allocate (Interstitial%mtopa (IM,3)) - allocate (Interstitial%oa4 (IM,4)) - allocate (Interstitial%oc (IM)) - allocate (Interstitial%olyr (IM,Model%levr+LTP)) - allocate (Interstitial%plvl (IM,Model%levr+1+LTP)) - allocate (Interstitial%plyr (IM,Model%levr+LTP)) - allocate (Interstitial%prnum (IM,Model%levs)) - allocate (Interstitial%qlyr (IM,Model%levr+LTP)) - allocate (Interstitial%prcpmp (IM)) - allocate (Interstitial%qss_ice (IM)) - allocate (Interstitial%qss_land (IM)) - allocate (Interstitial%qss_water (IM)) - allocate (Interstitial%raincd (IM)) - allocate (Interstitial%raincs (IM)) - allocate (Interstitial%rainmcadj (IM)) - allocate (Interstitial%rainp (IM,Model%levs)) - allocate (Interstitial%rb (IM)) - allocate (Interstitial%rb_ice (IM)) - allocate (Interstitial%rb_land (IM)) - allocate (Interstitial%rb_water (IM)) - allocate (Interstitial%rhc (IM,Model%levs)) - allocate (Interstitial%runoff (IM)) - allocate (Interstitial%save_q (IM,Model%levs,Model%ntrac)) - allocate (Interstitial%save_t (IM,Model%levs)) - allocate (Interstitial%save_tcp (IM,Model%levs)) - allocate (Interstitial%save_u (IM,Model%levs)) - allocate (Interstitial%save_v (IM,Model%levs)) - allocate (Interstitial%sbsno (IM)) - allocate (Interstitial%scmpsw (IM)) - allocate (Interstitial%sfcalb (IM,NF_ALBD)) - allocate (Interstitial%sigma (IM)) - allocate (Interstitial%sigmaf (IM)) - allocate (Interstitial%sigmafrac (IM,Model%levs)) - allocate (Interstitial%sigmatot (IM,Model%levs+1)) - allocate (Interstitial%snowc (IM)) - allocate (Interstitial%snohf (IM)) - allocate (Interstitial%snowmt (IM)) - allocate (Interstitial%stress (IM)) - allocate (Interstitial%stress_ice (IM)) - allocate (Interstitial%stress_land (IM)) - allocate (Interstitial%stress_water (IM)) - allocate (Interstitial%theta (IM)) - allocate (Interstitial%tkeh (IM,Model%levs+1)) !Vertical turbulent kinetic energy at model layer interfaces - allocate (Interstitial%tlvl (IM,Model%levr+1+LTP)) - allocate (Interstitial%tlyr (IM,Model%levr+LTP)) - allocate (Interstitial%tprcp_ice (IM)) - allocate (Interstitial%tprcp_land (IM)) - allocate (Interstitial%tprcp_water (IM)) - allocate (Interstitial%trans (IM)) - allocate (Interstitial%tseal (IM)) - allocate (Interstitial%tsfa (IM)) - allocate (Interstitial%tsfc_water (IM)) - allocate (Interstitial%tsfg (IM)) - allocate (Interstitial%tsurf_ice (IM)) - allocate (Interstitial%tsurf_land (IM)) - allocate (Interstitial%tsurf_water (IM)) - allocate (Interstitial%ud_mf (IM,Model%levs)) - allocate (Interstitial%uustar_ice (IM)) - allocate (Interstitial%uustar_land (IM)) - allocate (Interstitial%uustar_water (IM)) - allocate (Interstitial%vdftra (IM,Model%levs,Interstitial%nvdiff)) !GJF first dimension was set as 'IX' in GFS_physics_driver - allocate (Interstitial%vegf1d (IM)) - allocate (Interstitial%wcbmax (IM)) - allocate (Interstitial%wind (IM)) - allocate (Interstitial%work1 (IM)) - allocate (Interstitial%work2 (IM)) - allocate (Interstitial%work3 (IM)) - allocate (Interstitial%xcosz (IM)) - allocate (Interstitial%xlai1d (IM)) - allocate (Interstitial%xmu (IM)) - allocate (Interstitial%z01d (IM)) - allocate (Interstitial%zt1d (IM)) - allocate (Interstitial%ztmax_ice (IM)) - allocate (Interstitial%ztmax_land (IM)) - allocate (Interstitial%ztmax_water (IM)) + allocate (Interstitial%gabsbdlw (ixs:ixe)) + allocate (Interstitial%gabsbdlw_ice (ixs:ixe)) + allocate (Interstitial%gabsbdlw_land (ixs:ixe)) + allocate (Interstitial%gabsbdlw_water (ixs:ixe)) + allocate (Interstitial%gamma (ixs:ixe)) + allocate (Interstitial%gamq (ixs:ixe)) + allocate (Interstitial%gamt (ixs:ixe)) + allocate (Interstitial%gasvmr (ixs:ixe,Model%levr+LTP,NF_VGAS)) + allocate (Interstitial%gflx (ixs:ixe)) + allocate (Interstitial%gflx_ice (ixs:ixe)) + allocate (Interstitial%gflx_land (ixs:ixe)) + allocate (Interstitial%gflx_water (ixs:ixe)) + allocate (Interstitial%gwdcu (ixs:ixe,Model%levs)) + allocate (Interstitial%gwdcv (ixs:ixe,Model%levs)) + allocate (Interstitial%zvfun (ixs:ixe)) + allocate (Interstitial%hffac (ixs:ixe)) + allocate (Interstitial%hflxq (ixs:ixe)) + allocate (Interstitial%hflx_ice (ixs:ixe)) + allocate (Interstitial%hflx_land (ixs:ixe)) + allocate (Interstitial%hflx_water (ixs:ixe)) + allocate (Interstitial%htlwc (ixs:ixe,Model%levr+LTP)) + allocate (Interstitial%htlw0 (ixs:ixe,Model%levr+LTP)) + allocate (Interstitial%htswc (ixs:ixe,Model%levr+LTP)) + allocate (Interstitial%htsw0 (ixs:ixe,Model%levr+LTP)) + allocate (Interstitial%dry (ixs:ixe)) + allocate (Interstitial%idxday (ixs:ixe)) + allocate (Interstitial%icy (ixs:ixe)) + allocate (Interstitial%lake (ixs:ixe)) + allocate (Interstitial%ocean (ixs:ixe)) + allocate (Interstitial%islmsk (ixs:ixe)) + allocate (Interstitial%islmsk_cice (ixs:ixe)) + allocate (Interstitial%wet (ixs:ixe)) + allocate (Interstitial%kbot (ixs:ixe)) + allocate (Interstitial%kcnv (ixs:ixe)) + allocate (Interstitial%kinver (ixs:ixe)) + allocate (Interstitial%kpbl (ixs:ixe)) + allocate (Interstitial%ktop (ixs:ixe)) + allocate (Interstitial%mbota (ixs:ixe,3)) + allocate (Interstitial%mtopa (ixs:ixe,3)) + allocate (Interstitial%oa4 (ixs:ixe,4)) + allocate (Interstitial%oc (ixs:ixe)) + allocate (Interstitial%olyr (ixs:ixe,Model%levr+LTP)) + allocate (Interstitial%plvl (ixs:ixe,Model%levr+1+LTP)) + allocate (Interstitial%plyr (ixs:ixe,Model%levr+LTP)) + allocate (Interstitial%prnum (ixs:ixe,Model%levs)) + allocate (Interstitial%qlyr (ixs:ixe,Model%levr+LTP)) + allocate (Interstitial%prcpmp (ixs:ixe)) + allocate (Interstitial%qss_ice (ixs:ixe)) + allocate (Interstitial%qss_land (ixs:ixe)) + allocate (Interstitial%qss_water (ixs:ixe)) + allocate (Interstitial%raincd (ixs:ixe)) + allocate (Interstitial%raincs (ixs:ixe)) + allocate (Interstitial%rainmcadj (ixs:ixe)) + allocate (Interstitial%rainp (ixs:ixe,Model%levs)) + allocate (Interstitial%rb (ixs:ixe)) + allocate (Interstitial%rb_ice (ixs:ixe)) + allocate (Interstitial%rb_land (ixs:ixe)) + allocate (Interstitial%rb_water (ixs:ixe)) + allocate (Interstitial%rhc (ixs:ixe,Model%levs)) + allocate (Interstitial%runoff (ixs:ixe)) + allocate (Interstitial%save_q (ixs:ixe,Model%levs,Model%ntrac)) + allocate (Interstitial%save_t (ixs:ixe,Model%levs)) + allocate (Interstitial%save_tcp (ixs:ixe,Model%levs)) + allocate (Interstitial%save_u (ixs:ixe,Model%levs)) + allocate (Interstitial%save_v (ixs:ixe,Model%levs)) + allocate (Interstitial%sbsno (ixs:ixe)) + allocate (Interstitial%scmpsw (ixs:ixe)) + allocate (Interstitial%sfcalb (ixs:ixe,NF_ALBD)) + allocate (Interstitial%sigma (ixs:ixe)) + allocate (Interstitial%sigmaf (ixs:ixe)) + allocate (Interstitial%sigmafrac (ixs:ixe,Model%levs)) + allocate (Interstitial%sigmatot (ixs:ixe,Model%levs+1)) + allocate (Interstitial%snowc (ixs:ixe)) + allocate (Interstitial%snohf (ixs:ixe)) + allocate (Interstitial%snowmt (ixs:ixe)) + allocate (Interstitial%stress (ixs:ixe)) + allocate (Interstitial%stress_ice (ixs:ixe)) + allocate (Interstitial%stress_land (ixs:ixe)) + allocate (Interstitial%stress_water (ixs:ixe)) + allocate (Interstitial%theta (ixs:ixe)) + allocate (Interstitial%tkeh (ixs:ixe,Model%levs+1)) !Vertical turbulent kinetic energy at model layer interfaces + allocate (Interstitial%tlvl (ixs:ixe,Model%levr+1+LTP)) + allocate (Interstitial%tlyr (ixs:ixe,Model%levr+LTP)) + allocate (Interstitial%tprcp_ice (ixs:ixe)) + allocate (Interstitial%tprcp_land (ixs:ixe)) + allocate (Interstitial%tprcp_water (ixs:ixe)) + allocate (Interstitial%trans (ixs:ixe)) + allocate (Interstitial%tseal (ixs:ixe)) + allocate (Interstitial%tsfa (ixs:ixe)) + allocate (Interstitial%tsfc_water (ixs:ixe)) + allocate (Interstitial%tsfg (ixs:ixe)) + allocate (Interstitial%tsurf_ice (ixs:ixe)) + allocate (Interstitial%tsurf_land (ixs:ixe)) + allocate (Interstitial%tsurf_water (ixs:ixe)) + allocate (Interstitial%ud_mf (ixs:ixe,Model%levs)) + allocate (Interstitial%uustar_ice (ixs:ixe)) + allocate (Interstitial%uustar_land (ixs:ixe)) + allocate (Interstitial%uustar_water (ixs:ixe)) + allocate (Interstitial%vdftra (ixs:ixe,Model%levs,Interstitial%nvdiff)) !GJF first dimension was set as 'IX' in GFS_physics_driver + allocate (Interstitial%vegf1d (ixs:ixe)) + allocate (Interstitial%wcbmax (ixs:ixe)) + allocate (Interstitial%wind (ixs:ixe)) + allocate (Interstitial%work1 (ixs:ixe)) + allocate (Interstitial%work2 (ixs:ixe)) + allocate (Interstitial%work3 (ixs:ixe)) + allocate (Interstitial%xcosz (ixs:ixe)) + allocate (Interstitial%xlai1d (ixs:ixe)) + allocate (Interstitial%xmu (ixs:ixe)) + allocate (Interstitial%z01d (ixs:ixe)) + allocate (Interstitial%zt1d (ixs:ixe)) + allocate (Interstitial%ztmax_ice (ixs:ixe)) + allocate (Interstitial%ztmax_land (ixs:ixe)) + allocate (Interstitial%ztmax_water (ixs:ixe)) ! RRTMGP if (Model%do_RRTMGP) then - allocate (Interstitial%tracer (IM, Model%levs,Model%ntrac)) - allocate (Interstitial%tv_lay (IM, Model%levs)) - allocate (Interstitial%relhum (IM, Model%levs)) - allocate (Interstitial%qs_lay (IM, Model%levs)) - allocate (Interstitial%q_lay (IM, Model%levs)) - allocate (Interstitial%deltaZ (IM, Model%levs)) - allocate (Interstitial%deltaZc (IM, Model%levs)) - allocate (Interstitial%deltaP (IM, Model%levs)) - allocate (Interstitial%p_lev (IM, Model%levs+1)) - allocate (Interstitial%p_lay (IM, Model%levs)) - allocate (Interstitial%t_lev (IM, Model%levs+1)) - allocate (Interstitial%t_lay (IM, Model%levs)) - allocate (Interstitial%cloud_overlap_param (IM, Model%levs)) - allocate (Interstitial%precip_overlap_param (IM, Model%levs)) - allocate (Interstitial%fluxlwUP_allsky (IM, Model%levs+1)) - allocate (Interstitial%fluxlwDOWN_allsky (IM, Model%levs+1)) - allocate (Interstitial%fluxlwUP_clrsky (IM, Model%levs+1)) - allocate (Interstitial%fluxlwDOWN_clrsky (IM, Model%levs+1)) - allocate (Interstitial%fluxswUP_allsky (IM, Model%levs+1)) - allocate (Interstitial%fluxswDOWN_allsky (IM, Model%levs+1)) - allocate (Interstitial%fluxswUP_clrsky (IM, Model%levs+1)) - allocate (Interstitial%fluxswDOWN_clrsky (IM, Model%levs+1)) - allocate (Interstitial%aerosolslw (IM, Model%levs, Model%rrtmgp_nBandsLW, NF_AELW)) - allocate (Interstitial%aerosolssw (IM, Model%levs, Model%rrtmgp_nBandsSW, NF_AESW)) - allocate (Interstitial%precip_frac (IM, Model%levs)) - allocate (Interstitial%cld_cnv_frac (IM, Model%levs)) - allocate (Interstitial%cnv_cloud_overlap_param(IM, Model%levs)) - allocate (Interstitial%cld_cnv_lwp (IM, Model%levs)) - allocate (Interstitial%cld_cnv_reliq (IM, Model%levs)) - allocate (Interstitial%cld_cnv_iwp (IM, Model%levs)) - allocate (Interstitial%cld_cnv_reice (IM, Model%levs)) - allocate (Interstitial%cld_pbl_lwp (IM, Model%levs)) - allocate (Interstitial%cld_pbl_reliq (IM, Model%levs)) - allocate (Interstitial%cld_pbl_iwp (IM, Model%levs)) - allocate (Interstitial%cld_pbl_reice (IM, Model%levs)) - allocate (Interstitial%flxprf_lw (IM, Model%levs+1)) - allocate (Interstitial%flxprf_sw (IM, Model%levs+1)) - allocate (Interstitial%sfc_emiss_byband (Model%rrtmgp_nBandsLW,IM)) - allocate (Interstitial%sec_diff_byband (Model%rrtmgp_nBandsLW,IM)) - allocate (Interstitial%sfc_alb_nir_dir (Model%rrtmgp_nBandsSW,IM)) - allocate (Interstitial%sfc_alb_nir_dif (Model%rrtmgp_nBandsSW,IM)) - allocate (Interstitial%sfc_alb_uvvis_dir (Model%rrtmgp_nBandsSW,IM)) - allocate (Interstitial%sfc_alb_uvvis_dif (Model%rrtmgp_nBandsSW,IM)) - allocate (Interstitial%toa_src_sw (IM,Model%rrtmgp_nGptsSW)) - allocate (Interstitial%toa_src_lw (IM,Model%rrtmgp_nGptsLW)) - allocate (Interstitial%vmr_o2 (IM, Model%levs)) - allocate (Interstitial%vmr_h2o (IM, Model%levs)) - allocate (Interstitial%vmr_o3 (IM, Model%levs)) - allocate (Interstitial%vmr_ch4 (IM, Model%levs)) - allocate (Interstitial%vmr_n2o (IM, Model%levs)) - allocate (Interstitial%vmr_co2 (IM, Model%levs)) + allocate (Interstitial%tracer (ixs:ixe, Model%levs,Model%ntrac)) + allocate (Interstitial%tv_lay (ixs:ixe, Model%levs)) + allocate (Interstitial%relhum (ixs:ixe, Model%levs)) + allocate (Interstitial%qs_lay (ixs:ixe, Model%levs)) + allocate (Interstitial%q_lay (ixs:ixe, Model%levs)) + allocate (Interstitial%deltaZ (ixs:ixe, Model%levs)) + allocate (Interstitial%deltaZc (ixs:ixe, Model%levs)) + allocate (Interstitial%deltaP (ixs:ixe, Model%levs)) + allocate (Interstitial%p_lev (ixs:ixe, Model%levs+1)) + allocate (Interstitial%p_lay (ixs:ixe, Model%levs)) + allocate (Interstitial%t_lev (ixs:ixe, Model%levs+1)) + allocate (Interstitial%t_lay (ixs:ixe, Model%levs)) + allocate (Interstitial%cloud_overlap_param (ixs:ixe, Model%levs)) + allocate (Interstitial%precip_overlap_param (ixs:ixe, Model%levs)) + allocate (Interstitial%fluxlwUP_allsky (ixs:ixe, Model%levs+1)) + allocate (Interstitial%fluxlwDOWN_allsky (ixs:ixe, Model%levs+1)) + allocate (Interstitial%fluxlwUP_clrsky (ixs:ixe, Model%levs+1)) + allocate (Interstitial%fluxlwDOWN_clrsky (ixs:ixe, Model%levs+1)) + allocate (Interstitial%fluxswUP_allsky (ixs:ixe, Model%levs+1)) + allocate (Interstitial%fluxswDOWN_allsky (ixs:ixe, Model%levs+1)) + allocate (Interstitial%fluxswUP_clrsky (ixs:ixe, Model%levs+1)) + allocate (Interstitial%fluxswDOWN_clrsky (ixs:ixe, Model%levs+1)) + allocate (Interstitial%aerosolslw (ixs:ixe, Model%levs, Model%rrtmgp_nBandsLW, NF_AELW)) + allocate (Interstitial%aerosolssw (ixs:ixe, Model%levs, Model%rrtmgp_nBandsSW, NF_AESW)) + allocate (Interstitial%precip_frac (ixs:ixe, Model%levs)) + allocate (Interstitial%cld_cnv_frac (ixs:ixe, Model%levs)) + allocate (Interstitial%cnv_cloud_overlap_param(ixs:ixe, Model%levs)) + allocate (Interstitial%cld_cnv_lwp (ixs:ixe, Model%levs)) + allocate (Interstitial%cld_cnv_reliq (ixs:ixe, Model%levs)) + allocate (Interstitial%cld_cnv_iwp (ixs:ixe, Model%levs)) + allocate (Interstitial%cld_cnv_reice (ixs:ixe, Model%levs)) + allocate (Interstitial%cld_pbl_lwp (ixs:ixe, Model%levs)) + allocate (Interstitial%cld_pbl_reliq (ixs:ixe, Model%levs)) + allocate (Interstitial%cld_pbl_iwp (ixs:ixe, Model%levs)) + allocate (Interstitial%cld_pbl_reice (ixs:ixe, Model%levs)) + allocate (Interstitial%flxprf_lw (ixs:ixe, Model%levs+1)) + allocate (Interstitial%flxprf_sw (ixs:ixe, Model%levs+1)) + allocate (Interstitial%sfc_emiss_byband (Model%rrtmgp_nBandsLW,ixs:ixe)) + allocate (Interstitial%sec_diff_byband (Model%rrtmgp_nBandsLW,ixs:ixe)) + allocate (Interstitial%sfc_alb_nir_dir (Model%rrtmgp_nBandsSW,ixs:ixe)) + allocate (Interstitial%sfc_alb_nir_dif (Model%rrtmgp_nBandsSW,ixs:ixe)) + allocate (Interstitial%sfc_alb_uvvis_dir (Model%rrtmgp_nBandsSW,ixs:ixe)) + allocate (Interstitial%sfc_alb_uvvis_dif (Model%rrtmgp_nBandsSW,ixs:ixe)) + allocate (Interstitial%toa_src_sw (ixs:ixe,Model%rrtmgp_nGptsSW)) + allocate (Interstitial%toa_src_lw (ixs:ixe,Model%rrtmgp_nGptsLW)) + allocate (Interstitial%vmr_o2 (ixs:ixe, Model%levs)) + allocate (Interstitial%vmr_h2o (ixs:ixe, Model%levs)) + allocate (Interstitial%vmr_o3 (ixs:ixe, Model%levs)) + allocate (Interstitial%vmr_ch4 (ixs:ixe, Model%levs)) + allocate (Interstitial%vmr_n2o (ixs:ixe, Model%levs)) + allocate (Interstitial%vmr_co2 (ixs:ixe, Model%levs)) end if ! UGWP common - allocate (Interstitial%tau_mtb (IM)) - allocate (Interstitial%tau_ogw (IM)) - allocate (Interstitial%tau_tofd (IM)) - allocate (Interstitial%tau_ngw (IM)) - allocate (Interstitial%tau_oss (IM)) - allocate (Interstitial%dudt_mtb (IM,Model%levs)) - allocate (Interstitial%dudt_tms (IM,Model%levs)) - allocate (Interstitial%zmtb (IM) ) - allocate (Interstitial%zlwb (IM) ) - allocate (Interstitial%zogw (IM) ) - allocate (Interstitial%zngw (IM) ) + allocate (Interstitial%tau_mtb (ixs:ixe)) + allocate (Interstitial%tau_ogw (ixs:ixe)) + allocate (Interstitial%tau_tofd (ixs:ixe)) + allocate (Interstitial%tau_ngw (ixs:ixe)) + allocate (Interstitial%tau_oss (ixs:ixe)) + allocate (Interstitial%dudt_mtb (ixs:ixe,Model%levs)) + allocate (Interstitial%dudt_tms (ixs:ixe,Model%levs)) + allocate (Interstitial%zmtb (ixs:ixe) ) + allocate (Interstitial%zlwb (ixs:ixe) ) + allocate (Interstitial%zogw (ixs:ixe) ) + allocate (Interstitial%zngw (ixs:ixe) ) ! CIRES UGWP v1 if (Model%ldiag_ugwp .or. Model%do_ugwp_v0 .or. Model%do_ugwp_v0_nst_only & .or. Model%do_ugwp_v1) then - allocate (Interstitial%dudt_ngw (IM,Model%levs)) - allocate (Interstitial%dvdt_ngw (IM,Model%levs)) - allocate (Interstitial%dtdt_ngw (IM,Model%levs)) - allocate (Interstitial%kdis_ngw (IM,Model%levs)) + allocate (Interstitial%dudt_ngw (ixs:ixe,Model%levs)) + allocate (Interstitial%dvdt_ngw (ixs:ixe,Model%levs)) + allocate (Interstitial%dtdt_ngw (ixs:ixe,Model%levs)) + allocate (Interstitial%kdis_ngw (ixs:ixe,Model%levs)) end if !-- GSL drag suite if (Model%gwd_opt==3 .or. Model%gwd_opt==33 .or. & Model%gwd_opt==2 .or. Model%gwd_opt==22 ) then - allocate (Interstitial%varss (IM)) - allocate (Interstitial%ocss (IM)) - allocate (Interstitial%oa4ss (IM,4)) - allocate (Interstitial%clxss (IM,4)) + allocate (Interstitial%varss (ixs:ixe)) + allocate (Interstitial%ocss (ixs:ixe)) + allocate (Interstitial%oa4ss (ixs:ixe,4)) + allocate (Interstitial%clxss (ixs:ixe,4)) end if ! ! Allocate arrays that are conditional on physics choices if (Model%imp_physics == Model%imp_physics_gfdl .or. Model%imp_physics == Model%imp_physics_thompson & .or. Model%imp_physics == Model%imp_physics_tempo .or. Model%imp_physics == Model%imp_physics_nssl & ) then - allocate (Interstitial%graupelmp (IM)) - allocate (Interstitial%icemp (IM)) - allocate (Interstitial%rainmp (IM)) - allocate (Interstitial%snowmp (IM)) + allocate (Interstitial%graupelmp (ixs:ixe)) + allocate (Interstitial%icemp (ixs:ixe)) + allocate (Interstitial%rainmp (ixs:ixe)) + allocate (Interstitial%snowmp (ixs:ixe)) else if (Model%imp_physics == Model%imp_physics_mg) then - allocate (Interstitial%ncgl (IM,Model%levs)) - allocate (Interstitial%ncpr (IM,Model%levs)) - allocate (Interstitial%ncps (IM,Model%levs)) - allocate (Interstitial%qgl (IM,Model%levs)) - allocate (Interstitial%qrn (IM,Model%levs)) - allocate (Interstitial%qsnw (IM,Model%levs)) - allocate (Interstitial%qlcn (IM,Model%levs)) - allocate (Interstitial%qicn (IM,Model%levs)) - allocate (Interstitial%w_upi (IM,Model%levs)) - allocate (Interstitial%cf_upi (IM,Model%levs)) - allocate (Interstitial%cnv_mfd (IM,Model%levs)) - allocate (Interstitial%cnv_dqldt (IM,Model%levs)) - allocate (Interstitial%clcn (IM,Model%levs)) - allocate (Interstitial%cnv_fice (IM,Model%levs)) - allocate (Interstitial%cnv_ndrop (IM,Model%levs)) - allocate (Interstitial%cnv_nice (IM,Model%levs)) + allocate (Interstitial%ncgl (ixs:ixe,Model%levs)) + allocate (Interstitial%ncpr (ixs:ixe,Model%levs)) + allocate (Interstitial%ncps (ixs:ixe,Model%levs)) + allocate (Interstitial%qgl (ixs:ixe,Model%levs)) + allocate (Interstitial%qrn (ixs:ixe,Model%levs)) + allocate (Interstitial%qsnw (ixs:ixe,Model%levs)) + allocate (Interstitial%qlcn (ixs:ixe,Model%levs)) + allocate (Interstitial%qicn (ixs:ixe,Model%levs)) + allocate (Interstitial%w_upi (ixs:ixe,Model%levs)) + allocate (Interstitial%cf_upi (ixs:ixe,Model%levs)) + allocate (Interstitial%cnv_mfd (ixs:ixe,Model%levs)) + allocate (Interstitial%cnv_dqldt (ixs:ixe,Model%levs)) + allocate (Interstitial%clcn (ixs:ixe,Model%levs)) + allocate (Interstitial%cnv_fice (ixs:ixe,Model%levs)) + allocate (Interstitial%cnv_ndrop (ixs:ixe,Model%levs)) + allocate (Interstitial%cnv_nice (ixs:ixe,Model%levs)) end if if (Model%lsm == Model%lsm_noahmp) then - allocate (Interstitial%t2mmp (IM)) - allocate (Interstitial%q2mp (IM)) + allocate (Interstitial%t2mmp (ixs:ixe)) + allocate (Interstitial%q2mp (ixs:ixe)) end if ! ! Set components that do not change Interstitial%frain = Model%dtf/Model%dtp - Interstitial%ipr = min(IM,10) + Interstitial%ipr = min(ixe-ixs+1,10) Interstitial%latidxprnt = 1 Interstitial%levi = Model%levs+1 Interstitial%lmk = Model%levr+LTP @@ -875,11 +873,354 @@ subroutine gfs_interstitial_create (Interstitial, IM, Model) Interstitial%phys_hydrostatic = .true. ! ! Reset all other variables - call Interstitial%rad_reset (Model) - call Interstitial%phys_reset (Model) + call Interstitial%reset (Model) ! end subroutine gfs_interstitial_create + subroutine gfs_interstitial_destroy (Interstitial, Model) + ! + implicit none + ! + class(GFS_interstitial_type) :: Interstitial + type(GFS_control_type), intent(in) :: Model + + deallocate (Interstitial%otspt) + deallocate (Interstitial%otsptflag) + ! Allocate arrays + deallocate (Interstitial%adjsfculw_land) + deallocate (Interstitial%adjsfculw_ice) + deallocate (Interstitial%adjsfculw_water) + deallocate (Interstitial%adjnirbmd) + deallocate (Interstitial%adjnirbmu) + deallocate (Interstitial%adjnirdfd) + deallocate (Interstitial%adjnirdfu) + deallocate (Interstitial%adjvisbmd) + deallocate (Interstitial%adjvisbmu) + deallocate (Interstitial%adjvisdfu) + deallocate (Interstitial%adjvisdfd) + deallocate (Interstitial%aerodp) + deallocate (Interstitial%alb1d) + if (.not. Model%do_RRTMGP) then + deallocate (Interstitial%alpha) + end if + deallocate (Interstitial%bexp1d) + deallocate (Interstitial%cd) + deallocate (Interstitial%cd_ice) + deallocate (Interstitial%cd_land) + deallocate (Interstitial%cd_water) + deallocate (Interstitial%cdq) + deallocate (Interstitial%cdq_ice) + deallocate (Interstitial%cdq_land) + deallocate (Interstitial%cdq_water) + deallocate (Interstitial%chh_ice) + deallocate (Interstitial%chh_land) + deallocate (Interstitial%chh_water) + deallocate (Interstitial%cldf) + deallocate (Interstitial%cldsa) + deallocate (Interstitial%cldtaulw) + deallocate (Interstitial%cldtausw) + deallocate (Interstitial%cld1d) + deallocate (Interstitial%clouds) + deallocate (Interstitial%clw) + deallocate (Interstitial%clx) + deallocate (Interstitial%cmm_ice) + deallocate (Interstitial%cmm_land) + deallocate (Interstitial%cmm_water) + deallocate (Interstitial%cnvc) + deallocate (Interstitial%ctei_r) + deallocate (Interstitial%ctei_rml) + deallocate (Interstitial%cumabs) + deallocate (Interstitial%dd_mf) + deallocate (Interstitial%de_lgth) + deallocate (Interstitial%del) + deallocate (Interstitial%del_gz) + deallocate (Interstitial%delr) + deallocate (Interstitial%dlength) + deallocate (Interstitial%dqdt) + deallocate (Interstitial%dqsfc1) + deallocate (Interstitial%drain) + deallocate (Interstitial%dtdt) + deallocate (Interstitial%dtsfc1) + deallocate (Interstitial%dt_mf) + deallocate (Interstitial%dtzm) + deallocate (Interstitial%dudt) + deallocate (Interstitial%dusfcg) + deallocate (Interstitial%dusfc1) + deallocate (Interstitial%dvdt) + deallocate (Interstitial%dvsfcg) + deallocate (Interstitial%dvsfc1) + deallocate (Interstitial%dvdftra) + deallocate (Interstitial%dzlyr) + deallocate (Interstitial%elvmax) + deallocate (Interstitial%ep1d) + deallocate (Interstitial%ep1d_ice) + deallocate (Interstitial%ep1d_land) + deallocate (Interstitial%ep1d_water) + deallocate (Interstitial%evap_ice) + deallocate (Interstitial%evap_land) + deallocate (Interstitial%evap_water) + deallocate (Interstitial%evbs) + deallocate (Interstitial%evcw) + deallocate (Interstitial%pah) + deallocate (Interstitial%ecan) + deallocate (Interstitial%etran) + deallocate (Interstitial%edir) + deallocate (Interstitial%faerlw) + deallocate (Interstitial%faersw) + deallocate (Interstitial%ffhh_ice) + deallocate (Interstitial%ffhh_land) + deallocate (Interstitial%ffhh_water) + deallocate (Interstitial%fh2) + deallocate (Interstitial%fh2_ice) + deallocate (Interstitial%fh2_land) + deallocate (Interstitial%fh2_water) + deallocate (Interstitial%flag_cice) + deallocate (Interstitial%flag_guess) + deallocate (Interstitial%flag_iter) + deallocate (Interstitial%flag_lakefreeze) + deallocate (Interstitial%ffmm_ice) + deallocate (Interstitial%ffmm_land) + deallocate (Interstitial%ffmm_water) + deallocate (Interstitial%fm10) + deallocate (Interstitial%fm10_ice) + deallocate (Interstitial%fm10_land) + deallocate (Interstitial%fm10_water) + deallocate (Interstitial%frland) + deallocate (Interstitial%fscav) + deallocate (Interstitial%fswtr) + deallocate (Interstitial%gabsbdlw) + deallocate (Interstitial%gabsbdlw_ice) + deallocate (Interstitial%gabsbdlw_land) + deallocate (Interstitial%gabsbdlw_water) + deallocate (Interstitial%gamma) + deallocate (Interstitial%gamq) + deallocate (Interstitial%gamt) + deallocate (Interstitial%gasvmr) + deallocate (Interstitial%gflx) + deallocate (Interstitial%gflx_ice) + deallocate (Interstitial%gflx_land) + deallocate (Interstitial%gflx_water) + deallocate (Interstitial%gwdcu) + deallocate (Interstitial%gwdcv) + deallocate (Interstitial%zvfun) + deallocate (Interstitial%hffac) + deallocate (Interstitial%hflxq) + deallocate (Interstitial%hflx_ice) + deallocate (Interstitial%hflx_land) + deallocate (Interstitial%hflx_water) + deallocate (Interstitial%htlwc) + deallocate (Interstitial%htlw0) + deallocate (Interstitial%htswc) + deallocate (Interstitial%htsw0) + deallocate (Interstitial%dry) + deallocate (Interstitial%idxday) + deallocate (Interstitial%icy) + deallocate (Interstitial%lake) + deallocate (Interstitial%ocean) + deallocate (Interstitial%islmsk) + deallocate (Interstitial%islmsk_cice) + deallocate (Interstitial%wet) + deallocate (Interstitial%kbot) + deallocate (Interstitial%kcnv) + deallocate (Interstitial%kinver) + deallocate (Interstitial%kpbl) + deallocate (Interstitial%ktop) + deallocate (Interstitial%mbota) + deallocate (Interstitial%mtopa) + deallocate (Interstitial%oa4) + deallocate (Interstitial%oc) + deallocate (Interstitial%olyr) + deallocate (Interstitial%plvl) + deallocate (Interstitial%plyr) + deallocate (Interstitial%prnum) + deallocate (Interstitial%qlyr) + deallocate (Interstitial%prcpmp) + deallocate (Interstitial%qss_ice) + deallocate (Interstitial%qss_land) + deallocate (Interstitial%qss_water) + deallocate (Interstitial%raincd) + deallocate (Interstitial%raincs) + deallocate (Interstitial%rainmcadj) + deallocate (Interstitial%rainp) + deallocate (Interstitial%rb) + deallocate (Interstitial%rb_ice) + deallocate (Interstitial%rb_land) + deallocate (Interstitial%rb_water) + deallocate (Interstitial%rhc) + deallocate (Interstitial%runoff) + deallocate (Interstitial%save_q) + deallocate (Interstitial%save_t) + deallocate (Interstitial%save_tcp) + deallocate (Interstitial%save_u) + deallocate (Interstitial%save_v) + deallocate (Interstitial%sbsno) + deallocate (Interstitial%scmpsw) + deallocate (Interstitial%sfcalb) + deallocate (Interstitial%sigma) + deallocate (Interstitial%sigmaf) + deallocate (Interstitial%sigmafrac) + deallocate (Interstitial%sigmatot) + deallocate (Interstitial%snowc) + deallocate (Interstitial%snohf) + deallocate (Interstitial%snowmt) + deallocate (Interstitial%stress) + deallocate (Interstitial%stress_ice) + deallocate (Interstitial%stress_land) + deallocate (Interstitial%stress_water) + deallocate (Interstitial%theta) + deallocate (Interstitial%tkeh) + deallocate (Interstitial%tlvl) + deallocate (Interstitial%tlyr) + deallocate (Interstitial%tprcp_ice) + deallocate (Interstitial%tprcp_land) + deallocate (Interstitial%tprcp_water) + deallocate (Interstitial%trans) + deallocate (Interstitial%tseal) + deallocate (Interstitial%tsfa) + deallocate (Interstitial%tsfc_water) + deallocate (Interstitial%tsfg) + deallocate (Interstitial%tsurf_ice) + deallocate (Interstitial%tsurf_land) + deallocate (Interstitial%tsurf_water) + deallocate (Interstitial%ud_mf) + deallocate (Interstitial%uustar_ice) + deallocate (Interstitial%uustar_land) + deallocate (Interstitial%uustar_water) + deallocate (Interstitial%vdftra) + deallocate (Interstitial%vegf1d) + deallocate (Interstitial%wcbmax) + deallocate (Interstitial%wind) + deallocate (Interstitial%work1) + deallocate (Interstitial%work2) + deallocate (Interstitial%work3) + deallocate (Interstitial%xcosz) + deallocate (Interstitial%xlai1d) + deallocate (Interstitial%xmu) + deallocate (Interstitial%z01d) + deallocate (Interstitial%zt1d) + deallocate (Interstitial%ztmax_ice) + deallocate (Interstitial%ztmax_land) + deallocate (Interstitial%ztmax_water) + + ! RRTMGP + if (Model%do_RRTMGP) then + deallocate (Interstitial%tracer) + deallocate (Interstitial%tv_lay) + deallocate (Interstitial%relhum) + deallocate (Interstitial%qs_lay) + deallocate (Interstitial%q_lay) + deallocate (Interstitial%deltaZ) + deallocate (Interstitial%deltaZc) + deallocate (Interstitial%deltaP) + deallocate (Interstitial%p_lev) + deallocate (Interstitial%p_lay) + deallocate (Interstitial%t_lev) + deallocate (Interstitial%t_lay) + deallocate (Interstitial%cloud_overlap_param) + deallocate (Interstitial%precip_overlap_param) + deallocate (Interstitial%fluxlwUP_allsky) + deallocate (Interstitial%fluxlwDOWN_allsky) + deallocate (Interstitial%fluxlwUP_clrsky) + deallocate (Interstitial%fluxlwDOWN_clrsky) + deallocate (Interstitial%fluxswUP_allsky) + deallocate (Interstitial%fluxswDOWN_allsky) + deallocate (Interstitial%fluxswUP_clrsky) + deallocate (Interstitial%fluxswDOWN_clrsky) + deallocate (Interstitial%aerosolslw) + deallocate (Interstitial%aerosolssw) + deallocate (Interstitial%precip_frac) + deallocate (Interstitial%cld_cnv_frac) + deallocate (Interstitial%cnv_cloud_overlap_param) + deallocate (Interstitial%cld_cnv_lwp) + deallocate (Interstitial%cld_cnv_reliq) + deallocate (Interstitial%cld_cnv_iwp) + deallocate (Interstitial%cld_cnv_reice) + deallocate (Interstitial%cld_pbl_lwp) + deallocate (Interstitial%cld_pbl_reliq) + deallocate (Interstitial%cld_pbl_iwp) + deallocate (Interstitial%cld_pbl_reice) + deallocate (Interstitial%flxprf_lw) + deallocate (Interstitial%flxprf_sw) + deallocate (Interstitial%sfc_emiss_byband) + deallocate (Interstitial%sec_diff_byband) + deallocate (Interstitial%sfc_alb_nir_dir) + deallocate (Interstitial%sfc_alb_nir_dif) + deallocate (Interstitial%sfc_alb_uvvis_dir) + deallocate (Interstitial%sfc_alb_uvvis_dif) + deallocate (Interstitial%toa_src_sw) + deallocate (Interstitial%toa_src_lw) + deallocate (Interstitial%vmr_o2) + deallocate (Interstitial%vmr_h2o) + deallocate (Interstitial%vmr_o3) + deallocate (Interstitial%vmr_ch4) + deallocate (Interstitial%vmr_n2o) + deallocate (Interstitial%vmr_co2) + end if + + ! UGWP common + deallocate (Interstitial%tau_mtb) + deallocate (Interstitial%tau_ogw) + deallocate (Interstitial%tau_tofd) + deallocate (Interstitial%tau_ngw) + deallocate (Interstitial%tau_oss) + deallocate (Interstitial%dudt_mtb) + deallocate (Interstitial%dudt_tms) + deallocate (Interstitial%zmtb) + deallocate (Interstitial%zlwb) + deallocate (Interstitial%zogw) + deallocate (Interstitial%zngw) + + ! CIRES UGWP v1 + if (Model%ldiag_ugwp .or. Model%do_ugwp_v0 .or. Model%do_ugwp_v0_nst_only & + .or. Model%do_ugwp_v1) then + deallocate (Interstitial%dudt_ngw) + deallocate (Interstitial%dvdt_ngw) + deallocate (Interstitial%dtdt_ngw) + deallocate (Interstitial%kdis_ngw) + end if + + !-- GSL drag suite + if (Model%gwd_opt==3 .or. Model%gwd_opt==33 .or. & + Model%gwd_opt==2 .or. Model%gwd_opt==22 ) then + deallocate (Interstitial%varss) + deallocate (Interstitial%ocss) + deallocate (Interstitial%oa4ss) + deallocate (Interstitial%clxss) + end if + + ! Allocate arrays that are conditional on physics choices + if (Model%imp_physics == Model%imp_physics_gfdl .or. Model%imp_physics == Model%imp_physics_thompson & + .or. Model%imp_physics == Model%imp_physics_tempo .or. Model%imp_physics == Model%imp_physics_nssl & + ) then + deallocate (Interstitial%graupelmp) + deallocate (Interstitial%icemp) + deallocate (Interstitial%rainmp) + deallocate (Interstitial%snowmp) + else if (Model%imp_physics == Model%imp_physics_mg) then + deallocate (Interstitial%ncgl) + deallocate (Interstitial%ncpr) + deallocate (Interstitial%ncps) + deallocate (Interstitial%qgl) + deallocate (Interstitial%qrn) + deallocate (Interstitial%qsnw) + deallocate (Interstitial%qlcn) + deallocate (Interstitial%qicn) + deallocate (Interstitial%w_upi) + deallocate (Interstitial%cf_upi) + deallocate (Interstitial%cnv_mfd) + deallocate (Interstitial%cnv_dqldt) + deallocate (Interstitial%clcn) + deallocate (Interstitial%cnv_fice) + deallocate (Interstitial%cnv_ndrop) + deallocate (Interstitial%cnv_nice) + end if + if (Model%lsm == Model%lsm_noahmp) then + deallocate (Interstitial%t2mmp) + deallocate (Interstitial%q2mp) + end if + + end subroutine gfs_interstitial_destroy + subroutine gfs_interstitial_setup_tracers(Interstitial, Model) ! implicit none @@ -1084,121 +1425,7 @@ subroutine gfs_interstitial_setup_tracers(Interstitial, Model) end subroutine gfs_interstitial_setup_tracers - subroutine gfs_interstitial_rad_reset (Interstitial, Model) - ! - implicit none - ! - class(GFS_interstitial_type) :: Interstitial - type(GFS_control_type), intent(in) :: Model - integer :: iGas - ! - Interstitial%aerodp = clear_val - Interstitial%alb1d = clear_val - if (.not. Model%do_RRTMGP) then - Interstitial%alpha = clear_val - end if - Interstitial%cldsa = clear_val - Interstitial%cldtaulw = clear_val - Interstitial%cldtausw = clear_val - Interstitial%clouds = clear_val - Interstitial%de_lgth = clear_val - Interstitial%delr = clear_val - Interstitial%dzlyr = clear_val - Interstitial%faerlw = clear_val - Interstitial%faersw = clear_val - Interstitial%gasvmr = clear_val - Interstitial%htlwc = clear_val - Interstitial%htlw0 = clear_val - Interstitial%htswc = clear_val - Interstitial%htsw0 = clear_val - Interstitial%idxday = 0 - Interstitial%kb = 0 - Interstitial%kd = 0 - Interstitial%kt = 0 - Interstitial%mbota = 0 - Interstitial%mtopa = 0 - Interstitial%nday = 0 - Interstitial%olyr = clear_val - Interstitial%plvl = clear_val - Interstitial%plyr = clear_val - Interstitial%qlyr = clear_val - Interstitial%raddt = clear_val - Interstitial%sfcalb = clear_val - Interstitial%tlvl = clear_val - Interstitial%tlyr = clear_val - Interstitial%tsfa = clear_val - Interstitial%tsfg = clear_val - - ! Interstitials used by both RRTMG and RRTMGP - Interstitial%scmpsw%uvbfc = clear_val - Interstitial%scmpsw%uvbf0 = clear_val - Interstitial%scmpsw%nirbm = clear_val - Interstitial%scmpsw%nirdf = clear_val - Interstitial%scmpsw%visbm = clear_val - Interstitial%scmpsw%visdf = clear_val - if (Model%do_RRTMGP) then - Interstitial%tracer = clear_val - Interstitial%tv_lay = clear_val - Interstitial%relhum = clear_val - Interstitial%qs_lay = clear_val - Interstitial%q_lay = clear_val - Interstitial%deltaZ = clear_val - Interstitial%deltaZc = clear_val - Interstitial%deltaP = clear_val - Interstitial%p_lev = clear_val - Interstitial%p_lay = clear_val - Interstitial%t_lev = clear_val - Interstitial%t_lay = clear_val - Interstitial%cloud_overlap_param = clear_val - Interstitial%precip_overlap_param = clear_val - Interstitial%fluxlwUP_allsky = clear_val - Interstitial%fluxlwDOWN_allsky = clear_val - Interstitial%fluxlwUP_clrsky = clear_val - Interstitial%fluxlwDOWN_clrsky = clear_val - Interstitial%fluxswUP_allsky = clear_val - Interstitial%fluxswDOWN_allsky = clear_val - Interstitial%fluxswUP_clrsky = clear_val - Interstitial%fluxswDOWN_clrsky = clear_val - Interstitial%aerosolslw = clear_val - Interstitial%aerosolssw = clear_val - Interstitial%precip_frac = clear_val - Interstitial%cld_cnv_frac = clear_val - Interstitial%cnv_cloud_overlap_param = clear_val - Interstitial%cld_cnv_lwp = clear_val - Interstitial%cld_cnv_reliq = clear_val - Interstitial%cld_cnv_iwp = clear_val - Interstitial%cld_cnv_reice = clear_val - Interstitial%cld_pbl_lwp = clear_val - Interstitial%cld_pbl_reliq = clear_val - Interstitial%cld_pbl_iwp = clear_val - Interstitial%cld_pbl_reice = clear_val - Interstitial%sfc_emiss_byband = clear_val - Interstitial%sec_diff_byband = clear_val - Interstitial%sfc_alb_nir_dir = clear_val - Interstitial%sfc_alb_nir_dif = clear_val - Interstitial%sfc_alb_uvvis_dir = clear_val - Interstitial%sfc_alb_uvvis_dif = clear_val - Interstitial%toa_src_sw = clear_val - Interstitial%toa_src_lw = clear_val - Interstitial%vmr_o2 = clear_val - Interstitial%vmr_h2o = clear_val - Interstitial%vmr_o3 = clear_val - Interstitial%vmr_ch4 = clear_val - Interstitial%vmr_n2o = clear_val - Interstitial%vmr_co2 = clear_val - Interstitial%flxprf_lw%upfxc = clear_val - Interstitial%flxprf_lw%dnfxc = clear_val - Interstitial%flxprf_lw%upfx0 = clear_val - Interstitial%flxprf_lw%dnfx0 = clear_val - Interstitial%flxprf_sw%upfxc = clear_val - Interstitial%flxprf_sw%dnfxc = clear_val - Interstitial%flxprf_sw%upfx0 = clear_val - Interstitial%flxprf_sw%dnfx0 = clear_val - end if - ! - end subroutine gfs_interstitial_rad_reset - - subroutine gfs_interstitial_phys_reset (Interstitial, Model) + subroutine gfs_interstitial_reset (Interstitial, Model) ! implicit none ! @@ -1216,6 +1443,11 @@ subroutine gfs_interstitial_phys_reset (Interstitial, Model) Interstitial%adjvisbmu = clear_val Interstitial%adjvisdfu = clear_val Interstitial%adjvisdfd = clear_val + Interstitial%aerodp = clear_val + Interstitial%alb1d = clear_val + if (.not. Model%do_RRTMGP) then + Interstitial%alpha = clear_val + end if Interstitial%bexp1d = clear_val Interstitial%cd = clear_val Interstitial%cd_ice = Model%huge @@ -1228,8 +1460,12 @@ subroutine gfs_interstitial_phys_reset (Interstitial, Model) Interstitial%chh_ice = Model%huge Interstitial%chh_land = Model%huge Interstitial%chh_water = Model%huge - Interstitial%cld1d = clear_val Interstitial%cldf = clear_val + Interstitial%cldsa = clear_val + Interstitial%cldtaulw = clear_val + Interstitial%cldtausw = clear_val + Interstitial%cld1d = clear_val + Interstitial%clouds = clear_val Interstitial%clw = clear_val Interstitial%clw(:,:,2) = -999.9 Interstitial%clx = clear_val @@ -1237,28 +1473,30 @@ subroutine gfs_interstitial_phys_reset (Interstitial, Model) Interstitial%cmm_land = Model%huge Interstitial%cmm_water = Model%huge Interstitial%cnvc = clear_val - Interstitial%cnvw = clear_val Interstitial%ctei_r = clear_val Interstitial%ctei_rml = clear_val Interstitial%cumabs = clear_val Interstitial%dd_mf = clear_val + Interstitial%de_lgth = clear_val Interstitial%del = clear_val Interstitial%del_gz = clear_val + Interstitial%delr = clear_val Interstitial%dlength = clear_val Interstitial%dqdt = clear_val Interstitial%dqsfc1 = clear_val Interstitial%drain = clear_val - Interstitial%dt_mf = clear_val Interstitial%dtdt = clear_val Interstitial%dtsfc1 = clear_val + Interstitial%dt_mf = clear_val Interstitial%dtzm = clear_val Interstitial%dudt = clear_val Interstitial%dusfcg = clear_val Interstitial%dusfc1 = clear_val - Interstitial%dvdftra = clear_val Interstitial%dvdt = clear_val Interstitial%dvsfcg = clear_val Interstitial%dvsfc1 = clear_val + Interstitial%dvdftra = clear_val + Interstitial%dzlyr = clear_val Interstitial%elvmax = clear_val Interstitial%ep1d = clear_val Interstitial%ep1d_ice = Model%huge @@ -1273,6 +1511,8 @@ subroutine gfs_interstitial_phys_reset (Interstitial, Model) Interstitial%ecan = clear_val Interstitial%etran = clear_val Interstitial%edir = clear_val + Interstitial%faerlw = clear_val + Interstitial%faersw = clear_val Interstitial%ffhh_ice = Model%huge Interstitial%ffhh_land = Model%huge Interstitial%ffhh_water = Model%huge @@ -1301,6 +1541,7 @@ subroutine gfs_interstitial_phys_reset (Interstitial, Model) Interstitial%gamma = clear_val Interstitial%gamq = clear_val Interstitial%gamt = clear_val + Interstitial%gasvmr = clear_val Interstitial%gflx = clear_val Interstitial%gflx_ice = clear_val Interstitial%gflx_land = clear_val @@ -1313,25 +1554,42 @@ subroutine gfs_interstitial_phys_reset (Interstitial, Model) Interstitial%hflx_ice = Model%huge Interstitial%hflx_land = Model%huge Interstitial%hflx_water = Model%huge + Interstitial%htlwc = clear_val + Interstitial%htlw0 = clear_val + Interstitial%htswc = clear_val + Interstitial%htsw0 = clear_val Interstitial%dry = .false. + Interstitial%idxday = 0 Interstitial%icy = .false. Interstitial%lake = .false. + Interstitial%lndp_vgf = clear_val Interstitial%ocean = .false. Interstitial%islmsk = 0 Interstitial%islmsk_cice = 0 Interstitial%wet = .false. + Interstitial%kb = 0 Interstitial%kbot = Model%levs Interstitial%kcnv = 0 + Interstitial%kd = 0 Interstitial%kinver = Model%levs Interstitial%kpbl = 0 + Interstitial%kt = 0 Interstitial%ktop = 1 + Interstitial%mbota = 0 + Interstitial%mtopa = 0 + Interstitial%nday = 0 Interstitial%oa4 = clear_val Interstitial%oc = clear_val - Interstitial%prcpmp = clear_val + Interstitial%olyr = clear_val + Interstitial%plvl = clear_val + Interstitial%plyr = clear_val Interstitial%prnum = clear_val + Interstitial%qlyr = clear_val + Interstitial%prcpmp = clear_val Interstitial%qss_ice = Model%huge Interstitial%qss_land = Model%huge Interstitial%qss_water = Model%huge + Interstitial%raddt = clear_val Interstitial%raincd = clear_val Interstitial%raincs = clear_val Interstitial%rainmcadj = clear_val @@ -1348,6 +1606,13 @@ subroutine gfs_interstitial_phys_reset (Interstitial, Model) Interstitial%save_u = clear_val Interstitial%save_v = clear_val Interstitial%sbsno = clear_val + Interstitial%scmpsw%uvbfc = clear_val + Interstitial%scmpsw%uvbf0 = clear_val + Interstitial%scmpsw%nirbm = clear_val + Interstitial%scmpsw%nirdf = clear_val + Interstitial%scmpsw%visbm = clear_val + Interstitial%scmpsw%visdf = clear_val + Interstitial%sfcalb = clear_val Interstitial%sigma = clear_val Interstitial%sigmaf = clear_val Interstitial%sigmafrac = clear_val @@ -1361,12 +1626,16 @@ subroutine gfs_interstitial_phys_reset (Interstitial, Model) Interstitial%stress_water = Model%huge Interstitial%theta = clear_val Interstitial%tkeh = 0 + Interstitial%tlvl = clear_val + Interstitial%tlyr = clear_val Interstitial%tprcp_ice = Model%huge Interstitial%tprcp_land = Model%huge Interstitial%tprcp_water = Model%huge Interstitial%trans = clear_val Interstitial%tseal = clear_val + Interstitial%tsfa = clear_val Interstitial%tsfc_water = Model%huge + Interstitial%tsfg = clear_val Interstitial%tsurf_ice = Model%huge Interstitial%tsurf_land = Model%huge Interstitial%tsurf_water = Model%huge @@ -1376,7 +1645,6 @@ subroutine gfs_interstitial_phys_reset (Interstitial, Model) Interstitial%uustar_water = Model%huge Interstitial%vdftra = clear_val Interstitial%vegf1d = clear_val - Interstitial%lndp_vgf = clear_val Interstitial%wcbmax = clear_val Interstitial%wind = Model%huge Interstitial%work1 = clear_val @@ -1391,7 +1659,68 @@ subroutine gfs_interstitial_phys_reset (Interstitial, Model) Interstitial%ztmax_land = clear_val Interstitial%ztmax_water = clear_val -! UGWP common + ! RRTMGP + if (Model%do_RRTMGP) then + Interstitial%tracer = clear_val + Interstitial%tv_lay = clear_val + Interstitial%relhum = clear_val + Interstitial%qs_lay = clear_val + Interstitial%q_lay = clear_val + Interstitial%deltaZ = clear_val + Interstitial%deltaZc = clear_val + Interstitial%deltaP = clear_val + Interstitial%p_lev = clear_val + Interstitial%p_lay = clear_val + Interstitial%t_lev = clear_val + Interstitial%t_lay = clear_val + Interstitial%cloud_overlap_param = clear_val + Interstitial%precip_overlap_param = clear_val + Interstitial%fluxlwUP_allsky = clear_val + Interstitial%fluxlwDOWN_allsky = clear_val + Interstitial%fluxlwUP_clrsky = clear_val + Interstitial%fluxlwDOWN_clrsky = clear_val + Interstitial%fluxswUP_allsky = clear_val + Interstitial%fluxswDOWN_allsky = clear_val + Interstitial%fluxswUP_clrsky = clear_val + Interstitial%fluxswDOWN_clrsky = clear_val + Interstitial%aerosolslw = clear_val + Interstitial%aerosolssw = clear_val + Interstitial%precip_frac = clear_val + Interstitial%cld_cnv_frac = clear_val + Interstitial%cnv_cloud_overlap_param = clear_val + Interstitial%cld_cnv_lwp = clear_val + Interstitial%cld_cnv_reliq = clear_val + Interstitial%cld_cnv_iwp = clear_val + Interstitial%cld_cnv_reice = clear_val + Interstitial%cld_pbl_lwp = clear_val + Interstitial%cld_pbl_reliq = clear_val + Interstitial%cld_pbl_iwp = clear_val + Interstitial%cld_pbl_reice = clear_val + Interstitial%flxprf_lw%upfxc = clear_val + Interstitial%flxprf_lw%dnfxc = clear_val + Interstitial%flxprf_lw%upfx0 = clear_val + Interstitial%flxprf_lw%dnfx0 = clear_val + Interstitial%flxprf_sw%upfxc = clear_val + Interstitial%flxprf_sw%dnfxc = clear_val + Interstitial%flxprf_sw%upfx0 = clear_val + Interstitial%flxprf_sw%dnfx0 = clear_val + Interstitial%sfc_emiss_byband = clear_val + Interstitial%sec_diff_byband = clear_val + Interstitial%sfc_alb_nir_dir = clear_val + Interstitial%sfc_alb_nir_dif = clear_val + Interstitial%sfc_alb_uvvis_dir = clear_val + Interstitial%sfc_alb_uvvis_dif = clear_val + Interstitial%toa_src_sw = clear_val + Interstitial%toa_src_lw = clear_val + Interstitial%vmr_o2 = clear_val + Interstitial%vmr_h2o = clear_val + Interstitial%vmr_o3 = clear_val + Interstitial%vmr_ch4 = clear_val + Interstitial%vmr_n2o = clear_val + Interstitial%vmr_co2 = clear_val + end if + + ! UGWP common Interstitial%tau_mtb = clear_val Interstitial%tau_ogw = clear_val Interstitial%tau_tofd = clear_val @@ -1407,65 +1736,64 @@ subroutine gfs_interstitial_phys_reset (Interstitial, Model) ! CIRES UGWP v1 if (Model%ldiag_ugwp .or. Model%do_ugwp_v0 .or. Model%do_ugwp_v0_nst_only & .or. Model%do_ugwp_v1) then - Interstitial%dudt_ngw = clear_val - Interstitial%dvdt_ngw = clear_val - Interstitial%dtdt_ngw = clear_val - Interstitial%kdis_ngw = clear_val + Interstitial%dudt_ngw = clear_val + Interstitial%dvdt_ngw = clear_val + Interstitial%dtdt_ngw = clear_val + Interstitial%kdis_ngw = clear_val end if !-- GSL drag suite if (Model%gwd_opt==3 .or. Model%gwd_opt==33 .or. & - Model%gwd_opt==2 .or. Model%gwd_opt==22) then - Interstitial%varss = clear_val - Interstitial%ocss = clear_val - Interstitial%oa4ss = clear_val - Interstitial%clxss = clear_val + Model%gwd_opt==2 .or. Model%gwd_opt==22 ) then + Interstitial%varss = clear_val + Interstitial%ocss = clear_val + Interstitial%oa4ss = clear_val + Interstitial%clxss = clear_val end if ! - ! Reset fields that are conditional on physics choices - if (Model%imp_physics == Model%imp_physics_gfdl .or. Model%imp_physics == Model%imp_physics_thompson & - .or. Model%imp_physics == Model%imp_physics_tempo .or. Model%imp_physics == Model%imp_physics_nssl & - ) then - Interstitial%graupelmp = clear_val - Interstitial%icemp = clear_val - Interstitial%rainmp = clear_val - Interstitial%snowmp = clear_val + ! Allocate arrays that are conditional on physics choices + if (Model%imp_physics == Model%imp_physics_gfdl .or. Model%imp_physics == Model%imp_physics_thompson & + .or. Model%imp_physics == Model%imp_physics_tempo .or. Model%imp_physics == Model%imp_physics_nssl & + ) then + Interstitial%graupelmp = clear_val + Interstitial%icemp = clear_val + Interstitial%rainmp = clear_val + Interstitial%snowmp = clear_val else if (Model%imp_physics == Model%imp_physics_mg) then - Interstitial%ncgl = clear_val - Interstitial%ncpr = clear_val - Interstitial%ncps = clear_val - Interstitial%qgl = clear_val - Interstitial%qrn = clear_val - Interstitial%qsnw = clear_val - Interstitial%qlcn = clear_val - Interstitial%qicn = clear_val - Interstitial%w_upi = clear_val - Interstitial%cf_upi = clear_val - Interstitial%cnv_mfd = clear_val - Interstitial%cnv_dqldt = clear_val - Interstitial%clcn = clear_val - Interstitial%cnv_fice = clear_val - Interstitial%cnv_ndrop = clear_val - Interstitial%cnv_nice = clear_val + Interstitial%ncgl = clear_val + Interstitial%ncpr = clear_val + Interstitial%ncps = clear_val + Interstitial%qgl = clear_val + Interstitial%qrn = clear_val + Interstitial%qsnw = clear_val + Interstitial%qlcn = clear_val + Interstitial%qicn = clear_val + Interstitial%w_upi = clear_val + Interstitial%cf_upi = clear_val + Interstitial%cnv_mfd = clear_val + Interstitial%cnv_dqldt = clear_val + Interstitial%clcn = clear_val + Interstitial%cnv_fice = clear_val + Interstitial%cnv_ndrop = clear_val + Interstitial%cnv_nice = clear_val end if if (Model%lsm == Model%lsm_noahmp) then - Interstitial%t2mmp = clear_val - Interstitial%q2mp = clear_val + Interstitial%t2mmp = clear_val + Interstitial%q2mp = clear_val end if - ! + ! Set flag for resetting maximum hourly output fields Interstitial%max_hourly_reset = mod(Model%kdt-1, nint(Model%avg_max_length/Model%dtp)) == 0 ! Use same logic in UFS to reset Thompson extended diagnostics Interstitial%ext_diag_thompson_reset = Interstitial%max_hourly_reset - ! + ! Frequency flag for computing the full radar reflectivity (water coated ice) if (Model%nsfullradar_diag<0) then Interstitial%fullradar_diag = .true. else Interstitial%fullradar_diag = (Model%kdt == 1 .or. mod(Model%kdt, nint(Model%nsfullradar_diag/Model%dtp)) == 0) end if - ! - end subroutine gfs_interstitial_phys_reset + end subroutine gfs_interstitial_reset !----------------------- ! GFDL_interstitial_type diff --git a/ccpp/data/CCPP_typedefs.meta b/ccpp/data/CCPP_typedefs.meta index 6e6f8e90c1..809487b1de 100644 --- a/ccpp/data/CCPP_typedefs.meta +++ b/ccpp/data/CCPP_typedefs.meta @@ -10,98 +10,98 @@ standard_name = surface_upwelling_longwave_flux_over_water long_name = surface upwelling longwave flux at current time over water units = W m-2 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [adjsfculw_land] standard_name = surface_upwelling_longwave_flux_over_land long_name = surface upwelling longwave flux at current time over land units = W m-2 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [adjsfculw_ice] standard_name = surface_upwelling_longwave_flux_over_ice long_name = surface upwelling longwave flux at current time over ice units = W m-2 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [adjnirbmd] standard_name = surface_downwelling_direct_near_infrared_shortwave_flux long_name = surface downwelling beam near-infrared shortwave flux at current time units = W m-2 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [adjnirbmu] standard_name = surface_upwelling_direct_near_infrared_shortwave_flux long_name = surface upwelling beam near-infrared shortwave flux at current time units = W m-2 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [adjnirdfd] standard_name = surface_downwelling_diffuse_near_infrared_shortwave_flux long_name = surface downwelling diffuse near-infrared shortwave flux at current time units = W m-2 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [adjnirdfu] standard_name = surface_upwelling_diffuse_near_infrared_shortwave_flux long_name = surface upwelling diffuse near-infrared shortwave flux at current time units = W m-2 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [adjvisbmd] standard_name = surface_downwelling_direct_ultraviolet_and_visible_shortwave_flux long_name = surface downwelling beam ultraviolet plus visible shortwave flux at current time units = W m-2 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [adjvisbmu] standard_name = surface_upwelling_direct_ultraviolet_and_visible_shortwave_flux long_name = surface upwelling beam ultraviolet plus visible shortwave flux at current time units = W m-2 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [adjvisdfu] standard_name = surface_upwelling_diffuse_ultraviolet_and_visible_shortwave_flux long_name = surface upwelling diffuse ultraviolet plus visible shortwave flux at current time units = W m-2 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [adjvisdfd] standard_name = surface_downwelling_diffuse_ultraviolet_and_visible_shortwave_flux long_name = surface downwelling diffuse ultraviolet plus visible shortwave flux at current time units = W m-2 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [aerodp] standard_name = atmosphere_optical_thickness_due_to_ambient_aerosol_particles long_name = vertical integrated optical depth for various aerosol species units = none - dimensions = (horizontal_loop_extent,number_of_species_for_aerosol_optical_depth) + dimensions = (horizontal_dimension,number_of_species_for_aerosol_optical_depth) type = real kind = kind_phys [alb1d] standard_name = surface_albedo_perturbation long_name = surface albedo perturbation units = frac - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [alpha] standard_name = cloud_overlap_decorrelation_parameter long_name = cloud overlap decorrelation parameter for RRTMG (but not for RRTMGP) units = frac - dimensions = (horizontal_loop_extent,adjusted_vertical_layer_dimension_for_radiation) + dimensions = (horizontal_dimension,adjusted_vertical_layer_dimension_for_radiation) type = real kind = kind_phys active = (.not. flag_for_rrtmgp_radiation_scheme) @@ -109,91 +109,91 @@ standard_name = perturbation_of_soil_type_b_parameter long_name = perturbation of soil type "b" parameter units = frac - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [cd] standard_name = surface_drag_coefficient_for_momentum_in_air long_name = surface exchange coeff for momentum units = none - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [cd_water] standard_name = surface_drag_coefficient_for_momentum_in_air_over_water long_name = surface exchange coeff for momentum over water units = none - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [cd_land] standard_name = surface_drag_coefficient_for_momentum_in_air_over_land long_name = surface exchange coeff for momentum over land units = none - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [cd_ice] standard_name = surface_drag_coefficient_for_momentum_in_air_over_ice long_name = surface exchange coeff for momentum over ice units = none - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [cdq] standard_name = surface_drag_coefficient_for_heat_and_moisture_in_air long_name = surface exchange coeff heat & moisture units = none - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [cdq_water] standard_name = surface_drag_coefficient_for_heat_and_moisture_in_air_over_water long_name = surface exchange coeff heat surface exchange coeff heat & moisture over ocean moisture over water units = none - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [cdq_land] standard_name = surface_drag_coefficient_for_heat_and_moisture_in_air_over_land long_name = surface exchange coeff heat & moisture over land units = none - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [cdq_ice] standard_name = surface_drag_coefficient_for_heat_and_moisture_in_air_over_ice long_name = surface exchange coeff heat & moisture over ice units = none - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [chh_water] standard_name = surface_drag_mass_flux_for_heat_and_moisture_in_air_over_water long_name = thermal exchange coefficient over water units = kg m-2 s-1 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [chh_land] standard_name = surface_drag_mass_flux_for_heat_and_moisture_in_air_over_land long_name = thermal exchange coefficient over land units = kg m-2 s-1 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [chh_ice] standard_name = surface_drag_mass_flux_for_heat_and_moisture_in_air_over_ice long_name = thermal exchange coefficient over ice units = kg m-2 s-1 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [cf_upi] standard_name = convective_cloud_fraction_for_microphysics long_name = convective cloud fraction for microphysics units = frac - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (control_for_microphysics_scheme == identifier_for_morrison_gettelman_microphysics_scheme) @@ -201,7 +201,7 @@ standard_name = convective_cloud_volume_fraction long_name = convective cloud volume fraction units = frac - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (control_for_microphysics_scheme == identifier_for_morrison_gettelman_microphysics_scheme) @@ -209,140 +209,140 @@ standard_name = cloud_area_fraction long_name = fraction of grid box area in which updrafts occur units = frac - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [cldsa] standard_name = cloud_area_fraction_for_radiation long_name = fraction of clouds for low, middle, high, total and BL units = frac - dimensions = (horizontal_loop_extent,5) + dimensions = (horizontal_dimension,5) type = real kind = kind_phys [cldtaulw] standard_name = cloud_optical_depth_layers_at_10mu_band long_name = approx 10mu band layer cloud optical depth units = none - dimensions = (horizontal_loop_extent,adjusted_vertical_layer_dimension_for_radiation) + dimensions = (horizontal_dimension,adjusted_vertical_layer_dimension_for_radiation) type = real kind = kind_phys [cldtausw] standard_name = cloud_optical_depth_layers_at_0p55mu_band long_name = approx .55mu band layer cloud optical depth units = none - dimensions = (horizontal_loop_extent,adjusted_vertical_layer_dimension_for_radiation) + dimensions = (horizontal_dimension,adjusted_vertical_layer_dimension_for_radiation) type = real kind = kind_phys [cld1d] standard_name = cloud_work_function long_name = cloud work function units = m2 s-2 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [clouds(:,:,1)] standard_name = total_cloud_fraction long_name = layer total cloud fraction units = frac - dimensions = (horizontal_loop_extent,adjusted_vertical_layer_dimension_for_radiation) + dimensions = (horizontal_dimension,adjusted_vertical_layer_dimension_for_radiation) type = real kind = kind_phys [clouds(:,:,2)] standard_name = cloud_liquid_water_path long_name = layer cloud liquid water path units = g m-2 - dimensions = (horizontal_loop_extent,adjusted_vertical_layer_dimension_for_radiation) + dimensions = (horizontal_dimension,adjusted_vertical_layer_dimension_for_radiation) type = real kind = kind_phys [clouds(:,:,3)] standard_name = mean_effective_radius_for_liquid_cloud long_name = mean effective radius for liquid cloud units = um - dimensions = (horizontal_loop_extent,adjusted_vertical_layer_dimension_for_radiation) + dimensions = (horizontal_dimension,adjusted_vertical_layer_dimension_for_radiation) type = real kind = kind_phys [clouds(:,:,4)] standard_name = cloud_ice_water_path long_name = layer cloud ice water path units = g m-2 - dimensions = (horizontal_loop_extent,adjusted_vertical_layer_dimension_for_radiation) + dimensions = (horizontal_dimension,adjusted_vertical_layer_dimension_for_radiation) type = real kind = kind_phys [clouds(:,:,5)] standard_name = mean_effective_radius_for_ice_cloud long_name = mean effective radius for ice cloud units = um - dimensions = (horizontal_loop_extent,adjusted_vertical_layer_dimension_for_radiation) + dimensions = (horizontal_dimension,adjusted_vertical_layer_dimension_for_radiation) type = real kind = kind_phys [clouds(:,:,6)] standard_name = cloud_rain_water_path long_name = cloud rain water path units = g m-2 - dimensions = (horizontal_loop_extent,adjusted_vertical_layer_dimension_for_radiation) + dimensions = (horizontal_dimension,adjusted_vertical_layer_dimension_for_radiation) type = real kind = kind_phys [clouds(:,:,7)] standard_name = mean_effective_radius_for_rain_drop long_name = mean effective radius for rain drop units = um - dimensions = (horizontal_loop_extent,adjusted_vertical_layer_dimension_for_radiation) + dimensions = (horizontal_dimension,adjusted_vertical_layer_dimension_for_radiation) type = real kind = kind_phys [clouds(:,:,8)] standard_name = cloud_snow_water_path long_name = cloud snow water path units = g m-2 - dimensions = (horizontal_loop_extent,adjusted_vertical_layer_dimension_for_radiation) + dimensions = (horizontal_dimension,adjusted_vertical_layer_dimension_for_radiation) type = real kind = kind_phys [clouds(:,:,9)] standard_name = mean_effective_radius_for_snow_flake long_name = mean effective radius for snow flake units = um - dimensions = (horizontal_loop_extent,adjusted_vertical_layer_dimension_for_radiation) + dimensions = (horizontal_dimension,adjusted_vertical_layer_dimension_for_radiation) type = real kind = kind_phys [clw] standard_name = convective_transportable_tracers long_name = array to contain cloud water and other convective trans. tracers units = kg kg-1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension,number_of_tracers_for_convective_transport) + dimensions = (horizontal_dimension,vertical_layer_dimension,number_of_tracers_for_convective_transport) type = real kind = kind_phys [clw(:,:,1)] standard_name = ice_water_mixing_ratio_convective_transport_tracer long_name = ratio of mass of ice water to mass of dry air plus vapor (without condensates) in the convectively transported tracer array units = kg kg-1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys [clw(:,:,2)] standard_name = cloud_condensed_water_mixing_ratio_convective_transport_tracer long_name = ratio of mass of cloud water to mass of dry air plus vapor (without condensates) in the convectively transported tracer array units = kg kg-1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys [clw(:,:,index_for_turbulent_kinetic_energy_convective_transport_tracer)] standard_name = turbulent_kinetic_energy_convective_transport_tracer long_name = turbulent kinetic energy in the convectively transported tracer array units = m2 s-2 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys [clx] standard_name = fraction_of_grid_box_with_subgrid_orography_higher_than_critical_height long_name = frac. of grid box with by subgrid height_above_mean_sea_level higher than critical height units = frac - dimensions = (horizontal_loop_extent,4) + dimensions = (horizontal_dimension,4) type = real kind = kind_phys [clxss] standard_name = fraction_of_grid_box_with_subgrid_orography_higher_than_critical_height_small_scale long_name = frac. of grid box with by subgrid height_above_mean_sea_level higher than critical height small scale units = frac - dimensions = (horizontal_loop_extent,4) + dimensions = (horizontal_dimension,4) type = real kind = kind_phys active = (control_for_drag_suite_gravity_wave_drag == 2 .or. control_for_drag_suite_gravity_wave_drag == 3 .or. control_for_drag_suite_gravity_wave_drag == 22 .or. control_for_drag_suite_gravity_wave_drag == 33) @@ -350,28 +350,28 @@ standard_name = surface_drag_wind_speed_for_momentum_in_air_over_water long_name = momentum exchange coefficient over water units = m s-1 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [cmm_land] standard_name = surface_drag_wind_speed_for_momentum_in_air_over_land long_name = momentum exchange coefficient over land units = m s-1 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [cmm_ice] standard_name = surface_drag_wind_speed_for_momentum_in_air_over_ice long_name = momentum exchange coefficient over ice units = m s-1 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [cnv_dqldt] standard_name = tendency_of_cloud_water_due_to_convective_microphysics long_name = tendency of cloud water due to convective microphysics units = kg m-2 s-1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (control_for_microphysics_scheme == identifier_for_morrison_gettelman_microphysics_scheme) @@ -379,7 +379,7 @@ standard_name = ice_fraction_in_convective_tower long_name = ice fraction in convective tower units = frac - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (control_for_microphysics_scheme == identifier_for_morrison_gettelman_microphysics_scheme) @@ -387,7 +387,7 @@ standard_name = detrained_mass_flux long_name = detrained mass flux units = kg m-2 s-1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (control_for_microphysics_scheme == identifier_for_morrison_gettelman_microphysics_scheme) @@ -395,7 +395,7 @@ standard_name = number_concentration_of_cloud_liquid_water_particles_for_detrainment long_name = droplet number concentration in convective detrainment units = m-3 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (control_for_microphysics_scheme == identifier_for_morrison_gettelman_microphysics_scheme) @@ -403,7 +403,7 @@ standard_name = number_concentration_of_ice_crystals_for_detrainment long_name = crystal number concentration in convective detrainment units = m-3 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (control_for_microphysics_scheme == identifier_for_morrison_gettelman_microphysics_scheme) @@ -411,119 +411,112 @@ standard_name = convective_cloud_cover long_name = convective cloud cover units = frac - dimensions = (horizontal_loop_extent,vertical_layer_dimension) - type = real - kind = kind_phys -[cnvw] - standard_name = convective_cloud_water_mixing_ratio - long_name = moist convective cloud water mixing ratio - units = kg kg-1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys [ctei_r] standard_name = cloud_top_entrainment_instability_value long_name = cloud top entrainment instability value units = none - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [ctei_rml] standard_name = grid_sensitive_critical_cloud_top_entrainment_instability_criteria long_name = grid sensitive critical cloud top entrainment instability criteria units = none - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [cumabs] standard_name = maximum_column_heating_rate long_name = maximum heating rate in column units = K s-1 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [dd_mf] standard_name = instantaneous_atmosphere_downdraft_convective_mass_flux long_name = (downdraft mass flux) * delt units = kg m-2 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys [de_lgth] standard_name = cloud_decorrelation_length long_name = cloud decorrelation length units = km - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [del] standard_name = air_pressure_difference_between_midlayers long_name = air pressure difference between midlayers units = Pa - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys [del_gz] standard_name = geopotential_difference_between_midlayers_divided_by_midlayer_virtual_temperature long_name = difference between mid-layer geopotentials divided by mid-layer virtual temperature units = m2 s-2 K-1 - dimensions = (horizontal_loop_extent,vertical_interface_dimension) + dimensions = (horizontal_dimension,vertical_interface_dimension) type = real kind = kind_phys [delr] standard_name = layer_pressure_thickness_for_radiation long_name = layer pressure thickness on radiation levels units = hPa - dimensions = (horizontal_loop_extent,adjusted_vertical_layer_dimension_for_radiation) + dimensions = (horizontal_dimension,adjusted_vertical_layer_dimension_for_radiation) type = real kind = kind_phys [dlength] standard_name = characteristic_grid_length_scale long_name = representative horizontal length scale of grid box units = m - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [dqdt] standard_name = process_split_cumulative_tendency_of_tracers long_name = updated tendency of the tracers due to model physics units = kg kg-1 s-1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension,number_of_tracers) + dimensions = (horizontal_dimension,vertical_layer_dimension,number_of_tracers) type = real kind = kind_phys [dqdt(:,:,index_of_specific_humidity_in_tracer_concentration_array)] standard_name = process_split_cumulative_tendency_of_specific_humidity long_name = water vapor specific humidity tendency due to model physics units = kg kg-1 s-1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys [dqdt(:,:,index_of_cloud_liquid_water_mixing_ratio_in_tracer_concentration_array)] standard_name = process_split_cumulative_tendency_of_cloud_liquid_water_mixing_ratio long_name = cloud condensed water mixing ratio tendency due to model physics units = kg kg-1 s-1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys [dqdt(:,:,index_of_cloud_ice_mixing_ratio_in_tracer_concentration_array)] standard_name = process_split_cumulative_tendency_of_cloud_ice_mixing_ratio long_name = cloud condensed water mixing ratio tendency due to model physics units = kg kg-1 s-1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys [dqdt(:,:,index_of_ozone_mixing_ratio_in_tracer_concentration_array)] standard_name = process_split_cumulative_tendency_of_ozone_mixing_ratio long_name = ozone mixing ratio tendency due to model physics units = kg kg-1 s-1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys [dqdt(:,:,index_of_mass_number_concentration_of_cloud_droplets_in_tracer_concentration_array)] standard_name = process_split_cumulative_tendency_of_mass_number_concentration_of_cloud_liquid_water_particles_in_air long_name = number concentration of cloud droplets (liquid) tendency due to model physics units = kg-1 s-1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (index_of_mass_number_concentration_of_cloud_droplets_in_tracer_concentration_array > 0) @@ -531,14 +524,14 @@ standard_name = process_split_cumulative_tendency_of_mass_number_concentration_of_cloud_ice_water_crystals_in_air long_name = number concentration of ice tendency due to model physics units = kg-1 s-1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys [dqdt(:,:,index_of_mass_number_concentration_of_hygroscopic_aerosols_in_tracer_concentration_array)] standard_name = process_split_cumulative_tendency_of_mass_number_concentration_of_hygroscopic_aerosols long_name = number concentration of water-friendly aerosols tendency due to model physics units = kg-1 s-1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (index_of_mass_number_concentration_of_hygroscopic_aerosols_in_tracer_concentration_array > 0) @@ -546,7 +539,7 @@ standard_name = process_split_cumulative_tendency_of_mass_number_concentration_of_nonhygroscopic_ice_nucleating_aerosols long_name = number concentration of ice-friendly aerosols tendency due to model physics units = kg-1 s-1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (index_of_mass_number_concentration_of_nonhygroscopic_ice_nucleating_aerosols_in_tracer_concentration_array > 0) @@ -554,7 +547,7 @@ standard_name = tendency_of_cloud_condensation_nuclei_number_concentration_due_to_model_physics long_name = number concentration of cloud condensation nuclei tendency due to model physics units = kg-1 s-1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = ( index_of_cloud_condensation_nuclei_number_concentration_in_tracer_concentration_array > 0 ) @@ -562,388 +555,388 @@ standard_name = process_split_cumulative_tendency_of_rain_mixing_ratio long_name = ratio of mass of rain water tendency to mass of dry air plus vapor (without condensates) due to model physics units = kg kg-1 s-1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys [dqdt(:,:,index_of_snow_mixing_ratio_in_tracer_concentration_array)] standard_name = process_split_cumulative_tendency_of_snow_mixing_ratio long_name = ratio of mass of snow water tendency to mass of dry air plus vapor (without condensates) due to model physics units = kg kg-1 s-1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys [dqdt(:,:,index_of_graupel_mixing_ratio_in_tracer_concentration_array)] standard_name = process_split_cumulative_tendency_of_graupel_mixing_ratio long_name = ratio of mass of graupel tendency to mass of dry air plus vapor (without condensates) due to model physics units = kg kg-1 s-1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys [dqdt(:,:,index_of_turbulent_kinetic_energy_in_tracer_concentration_array)] standard_name = process_split_cumulative_tendency_of_turbulent_kinetic_energy long_name = turbulent kinetic energy tendency due to model physics units = J s-1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys [dqsfc1] standard_name = instantaneous_surface_upward_latent_heat_flux long_name = surface upward latent heat flux units = W m-2 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [drain] standard_name = subsurface_runoff_flux long_name = subsurface runoff flux units = kg m-2 s-1 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [dtdt] standard_name = process_split_cumulative_tendency_of_air_temperature long_name = air temperature tendency due to model physics units = K s-1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys [dtsfc1] standard_name = instantaneous_surface_upward_sensible_heat_flux long_name = surface upward sensible heat flux units = W m-2 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [dtzm] standard_name = mean_change_over_depth_in_sea_water_temperature long_name = mean of dT(z) (zsea1 to zsea2) units = K - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [dt_mf] standard_name = instantaneous_atmosphere_detrainment_convective_mass_flux long_name = (detrainment mass flux) * delt units = kg m-2 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys [dudt] standard_name = process_split_cumulative_tendency_of_x_wind long_name = zonal wind tendency due to model physics units = m s-2 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys [dusfcg] standard_name = instantaneous_x_stress_due_to_gravity_wave_drag long_name = zonal surface stress due to orographic gravity wave drag units = Pa - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [dusfc1] standard_name = instantaneous_surface_x_momentum_flux long_name = x momentum flux units = Pa - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [dvdftra] standard_name = tendency_of_vertically_diffused_tracer_concentration long_name = updated tendency of the tracers due to vertical diffusion in PBL scheme units = kg kg-1 s-1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension,number_of_vertical_diffusion_tracers) + dimensions = (horizontal_dimension,vertical_layer_dimension,number_of_vertical_diffusion_tracers) type = real kind = kind_phys [dvdt] standard_name = process_split_cumulative_tendency_of_y_wind long_name = meridional wind tendency due to model physics units = m s-2 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys [dvsfcg] standard_name = instantaneous_y_stress_due_to_gravity_wave_drag long_name = meridional surface stress due to orographic gravity wave drag units = Pa - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [dvsfc1] standard_name = instantaneous_surface_y_momentum_flux long_name = y momentum flux units = Pa - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [dzlyr] standard_name = layer_thickness_for_radiation long_name = layer thickness on radiation levels units = km - dimensions = (horizontal_loop_extent,adjusted_vertical_layer_dimension_for_radiation) + dimensions = (horizontal_dimension,adjusted_vertical_layer_dimension_for_radiation) type = real kind = kind_phys [elvmax] standard_name = maximum_subgrid_orography long_name = maximum of subgrid height_above_mean_sea_level units = m - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [ep1d] standard_name = surface_upward_potential_latent_heat_flux long_name = surface upward potential latent heat flux units = W m-2 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [ep1d_water] standard_name = surface_upward_potential_latent_heat_flux_over_water long_name = surface upward potential latent heat flux over water units = W m-2 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [ep1d_land] standard_name = surface_upward_potential_latent_heat_flux_over_land long_name = surface upward potential latent heat flux over land units = W m-2 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [ep1d_ice] standard_name = surface_upward_potential_latent_heat_flux_over_ice long_name = surface upward potential latent heat flux over ice units = W m-2 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [evap_water] standard_name = kinematic_surface_upward_latent_heat_flux_over_water long_name = kinematic surface upward latent heat flux over water units = kg kg-1 m s-1 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [evap_land] standard_name = kinematic_surface_upward_latent_heat_flux_over_land long_name = kinematic surface upward latent heat flux over land units = kg kg-1 m s-1 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [evap_ice] standard_name = kinematic_surface_upward_latent_heat_flux_over_ice long_name = kinematic surface upward latent heat flux over ice units = kg kg-1 m s-1 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [pah] standard_name = total_precipitation_advected_heat long_name = precipitation advected heat - total units = W m-2 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [ecan] standard_name = evaporation_of_intercepted_water long_name = evaporation of intercepted water units = kg m-2 s-1 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [etran] standard_name = transpiration_rate long_name = transpiration rate units = kg m-2 s-1 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [edir] standard_name = soil_surface_evaporation_rate long_name = soil surface evaporation rate units = kg m-2 s-1 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [faerlw] standard_name = aerosol_optical_properties_for_longwave_bands_01_16 long_name = aerosol optical properties for longwave bands 01-16 units = mixed - dimensions = (horizontal_loop_extent,adjusted_vertical_layer_dimension_for_radiation,number_of_aerosol_bands_for_longwave_radiation,number_of_aerosol_output_fields_for_longwave_radiation) + dimensions = (horizontal_dimension,adjusted_vertical_layer_dimension_for_radiation,number_of_aerosol_bands_for_longwave_radiation,number_of_aerosol_output_fields_for_longwave_radiation) type = real kind = kind_phys [faerlw(:,:,:,1)] standard_name = aerosol_optical_depth_for_longwave_bands_01_16 long_name = aerosol optical depth for longwave bands 01-16 units = none - dimensions = (horizontal_loop_extent,adjusted_vertical_layer_dimension_for_radiation,number_of_aerosol_bands_for_longwave_radiation) + dimensions = (horizontal_dimension,adjusted_vertical_layer_dimension_for_radiation,number_of_aerosol_bands_for_longwave_radiation) type = real kind = kind_phys [faerlw(:,:,:,2)] standard_name = aerosol_single_scattering_albedo_for_longwave_bands_01_16 long_name = aerosol single scattering albedo for longwave bands 01-16 units = frac - dimensions = (horizontal_loop_extent,adjusted_vertical_layer_dimension_for_radiation,number_of_aerosol_bands_for_longwave_radiation) + dimensions = (horizontal_dimension,adjusted_vertical_layer_dimension_for_radiation,number_of_aerosol_bands_for_longwave_radiation) type = real kind = kind_phys [faerlw(:,:,:,3)] standard_name = aerosol_asymmetry_parameter_for_longwave_bands_01_16 long_name = aerosol asymmetry parameter for longwave bands 01-16 units = none - dimensions = (horizontal_loop_extent,adjusted_vertical_layer_dimension_for_radiation,number_of_aerosol_bands_for_longwave_radiation) + dimensions = (horizontal_dimension,adjusted_vertical_layer_dimension_for_radiation,number_of_aerosol_bands_for_longwave_radiation) type = real kind = kind_phys [faersw] standard_name = aerosol_optical_properties_for_shortwave_bands_01_16 long_name = aerosol optical properties for shortwave bands 01-16 units = mixed - dimensions = (horizontal_loop_extent,adjusted_vertical_layer_dimension_for_radiation,number_of_aerosol_bands_for_shortwave_radiation,number_of_aerosol_output_fields_for_shortwave_radiation) + dimensions = (horizontal_dimension,adjusted_vertical_layer_dimension_for_radiation,number_of_aerosol_bands_for_shortwave_radiation,number_of_aerosol_output_fields_for_shortwave_radiation) type = real kind = kind_phys [faersw(:,:,:,1)] standard_name = aerosol_optical_depth_for_shortwave_bands_01_16 long_name = aerosol optical depth for shortwave bands 01-16 units = none - dimensions = (horizontal_loop_extent,adjusted_vertical_layer_dimension_for_radiation,number_of_aerosol_bands_for_shortwave_radiation) + dimensions = (horizontal_dimension,adjusted_vertical_layer_dimension_for_radiation,number_of_aerosol_bands_for_shortwave_radiation) type = real kind = kind_phys [faersw(:,:,:,2)] standard_name = aerosol_single_scattering_albedo_for_shortwave_bands_01_16 long_name = aerosol single scattering albedo for shortwave bands 01-16 units = frac - dimensions = (horizontal_loop_extent,adjusted_vertical_layer_dimension_for_radiation,number_of_aerosol_bands_for_shortwave_radiation) + dimensions = (horizontal_dimension,adjusted_vertical_layer_dimension_for_radiation,number_of_aerosol_bands_for_shortwave_radiation) type = real kind = kind_phys [faersw(:,:,:,3)] standard_name = aerosol_asymmetry_parameter_for_shortwave_bands_01_16 long_name = aerosol asymmetry parameter for shortwave bands 01-16 units = none - dimensions = (horizontal_loop_extent,adjusted_vertical_layer_dimension_for_radiation,number_of_aerosol_bands_for_shortwave_radiation) + dimensions = (horizontal_dimension,adjusted_vertical_layer_dimension_for_radiation,number_of_aerosol_bands_for_shortwave_radiation) type = real kind = kind_phys [ffhh_water] standard_name = Monin_Obukhov_similarity_function_for_heat_over_water long_name = Monin-Obukhov similarity function for heat over water units = none - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [ffhh_land] standard_name = Monin_Obukhov_similarity_function_for_heat_over_land long_name = Monin-Obukhov similarity function for heat over land units = none - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [ffhh_ice] standard_name = Monin_Obukhov_similarity_function_for_heat_over_ice long_name = Monin-Obukhov similarity function for heat over ice units = none - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [fh2] standard_name = Monin_Obukhov_similarity_function_for_heat_at_2m long_name = Monin-Obukhov similarity parameter for heat at 2m units = none - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [fh2_water] standard_name = Monin_Obukhov_similarity_function_for_heat_at_2m_over_water long_name = Monin-Obukhov similarity parameter for heat at 2m over water units = none - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [fh2_land] standard_name = Monin_Obukhov_similarity_function_for_heat_at_2m_over_land long_name = Monin-Obukhov similarity parameter for heat at 2m over land units = none - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [fh2_ice] standard_name = Monin_Obukhov_similarity_function_for_heat_at_2m_over_ice long_name = Monin-Obukhov similarity parameter for heat at 2m over ice units = none - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [flag_cice] standard_name = flag_for_cice long_name = flag for cice units = flag - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = logical [flag_guess] standard_name = flag_for_guess_run long_name = flag for guess run units = flag - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = logical [flag_iter] standard_name = flag_for_iteration long_name = flag for iteration units = flag - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = logical [flag_lakefreeze] standard_name = flag_for_lake_water_freeze long_name = flag for lake water freeze units = flag - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = logical [ffmm_water] standard_name = Monin_Obukhov_similarity_function_for_momentum_over_water long_name = Monin-Obukhov similarity function for momentum over water units = none - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [ffmm_land] standard_name = Monin_Obukhov_similarity_function_for_momentum_over_land long_name = Monin-Obukhov similarity function for momentum over land units = none - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [ffmm_ice] standard_name = Monin_Obukhov_similarity_function_for_momentum_over_ice long_name = Monin-Obukhov similarity function for momentum over ice units = none - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [fm10] standard_name = Monin_Obukhov_similarity_function_for_momentum_at_10m long_name = Monin-Obukhov similarity parameter for momentum at 10m units = none - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [fm10_water] standard_name = Monin_Obukhov_similarity_function_for_momentum_at_10m_over_water long_name = Monin-Obukhov similarity parameter for momentum at 10m over water units = none - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [fm10_land] standard_name = Monin_Obukhov_similarity_function_for_momentum_at_10m_over_land long_name = Monin-Obukhov similarity parameter for momentum at 10m over land units = none - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [fm10_ice] standard_name = Monin_Obukhov_similarity_function_for_momentum_at_10m_over_ice long_name = Monin-Obukhov similarity parameter for momentum at 10m over ice units = none - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [frain] @@ -957,7 +950,7 @@ standard_name = land_area_fraction_for_microphysics long_name = land area fraction used in microphysics schemes units = frac - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [fscav] @@ -978,154 +971,154 @@ standard_name = surface_downwelling_longwave_flux_absorbed_by_ground long_name = total sky surface downward longwave flux absorbed by the ground units = W m-2 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [gabsbdlw_water] standard_name = surface_downwelling_longwave_flux_absorbed_by_ground_over_water long_name = total sky surface downward longwave flux absorbed by the ground over water units = W m-2 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [gabsbdlw_land] standard_name = surface_downwelling_longwave_flux_absorbed_by_ground_over_land long_name = total sky surface downward longwave flux absorbed by the ground over land units = W m-2 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [gabsbdlw_ice] standard_name = surface_downwelling_longwave_flux_absorbed_by_ground_over_ice long_name = total sky surface downward longwave flux absorbed by the ground over ice units = W m-2 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [gamma] standard_name = anisotropy_of_subgrid_orography long_name = anisotropy of subgrid height_above_mean_sea_level units = none - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [gamq] standard_name = countergradient_mixing_term_for_water_vapor long_name = countergradient mixing term for water vapor units = kg kg-1 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [gamt] standard_name = countergradient_mixing_term_for_temperature long_name = countergradient mixing term for temperature units = K - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [gasvmr(:,:,1)] standard_name = volume_mixing_ratio_of_co2 long_name = volume mixing ratio co2 units = m3 m-3 - dimensions = (horizontal_loop_extent,adjusted_vertical_layer_dimension_for_radiation) + dimensions = (horizontal_dimension,adjusted_vertical_layer_dimension_for_radiation) type = real kind = kind_phys [gasvmr(:,:,2)] standard_name = volume_mixing_ratio_of_n2o long_name = volume mixing ratio no2 units = m3 m-3 - dimensions = (horizontal_loop_extent,adjusted_vertical_layer_dimension_for_radiation) + dimensions = (horizontal_dimension,adjusted_vertical_layer_dimension_for_radiation) type = real kind = kind_phys [gasvmr(:,:,3)] standard_name = volume_mixing_ratio_of_ch4 long_name = volume mixing ratio ch4 units = m3 m-3 - dimensions = (horizontal_loop_extent,adjusted_vertical_layer_dimension_for_radiation) + dimensions = (horizontal_dimension,adjusted_vertical_layer_dimension_for_radiation) type = real kind = kind_phys [gasvmr(:,:,4)] standard_name = volume_mixing_ratio_of_o2 long_name = volume mixing ratio o2 units = m3 m-3 - dimensions = (horizontal_loop_extent,adjusted_vertical_layer_dimension_for_radiation) + dimensions = (horizontal_dimension,adjusted_vertical_layer_dimension_for_radiation) type = real kind = kind_phys [gasvmr(:,:,5)] standard_name = volume_mixing_ratio_of_co long_name = volume mixing ratio co units = m3 m-3 - dimensions = (horizontal_loop_extent,adjusted_vertical_layer_dimension_for_radiation) + dimensions = (horizontal_dimension,adjusted_vertical_layer_dimension_for_radiation) type = real kind = kind_phys [gasvmr(:,:,6)] standard_name = volume_mixing_ratio_of_cfc11 long_name = volume mixing ratio cfc11 units = m3 m-3 - dimensions = (horizontal_loop_extent,adjusted_vertical_layer_dimension_for_radiation) + dimensions = (horizontal_dimension,adjusted_vertical_layer_dimension_for_radiation) type = real kind = kind_phys [gasvmr(:,:,7)] standard_name = volume_mixing_ratio_of_cfc12 long_name = volume mixing ratio cfc12 units = m3 m-3 - dimensions = (horizontal_loop_extent,adjusted_vertical_layer_dimension_for_radiation) + dimensions = (horizontal_dimension,adjusted_vertical_layer_dimension_for_radiation) type = real kind = kind_phys [gasvmr(:,:,8)] standard_name = volume_mixing_ratio_of_cfc22 long_name = volume mixing ratio cfc22 units = m3 m-3 - dimensions = (horizontal_loop_extent,adjusted_vertical_layer_dimension_for_radiation) + dimensions = (horizontal_dimension,adjusted_vertical_layer_dimension_for_radiation) type = real kind = kind_phys [gasvmr(:,:,9)] standard_name = volume_mixing_ratio_of_ccl4 long_name = volume mixing ratio ccl4 units = m3 m-3 - dimensions = (horizontal_loop_extent,adjusted_vertical_layer_dimension_for_radiation) + dimensions = (horizontal_dimension,adjusted_vertical_layer_dimension_for_radiation) type = real kind = kind_phys [gasvmr(:,:,10)] standard_name = volume_mixing_ratio_of_cfc113 long_name = volume mixing ratio cfc113 units = m3 m-3 - dimensions = (horizontal_loop_extent,adjusted_vertical_layer_dimension_for_radiation) + dimensions = (horizontal_dimension,adjusted_vertical_layer_dimension_for_radiation) type = real kind = kind_phys [gflx] standard_name = upward_heat_flux_in_soil long_name = soil heat flux units = W m-2 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [gflx_water] standard_name = upward_heat_flux_in_soil_over_water long_name = soil heat flux over water units = W m-2 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [gflx_land] standard_name = upward_heat_flux_in_soil_over_land long_name = soil heat flux over land units = W m-2 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [gflx_ice] standard_name = upward_heat_flux_in_soil_over_ice long_name = soil heat flux over ice units = W m-2 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [graupelmp] standard_name = lwe_thickness_of_graupel_amount long_name = explicit graupel fall on physics timestep units = m - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys active = (control_for_microphysics_scheme == identifier_for_gfdl_microphysics_scheme .or. control_for_microphysics_scheme == identifier_for_thompson_microphysics_scheme .or. control_for_microphysics_scheme == identifier_for_nssl_microphysics_scheme .or. control_for_microphysics_scheme == identifier_for_tempo_microphysics_scheme) @@ -1133,91 +1126,91 @@ standard_name = tendency_of_x_wind_due_to_convective_gravity_wave_drag long_name = zonal wind tendency due to convective gravity wave drag units = m s-2 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys [gwdcv] standard_name = tendency_of_y_wind_due_to_convective_gravity_wave_drag long_name = meridional wind tendency due to convective gravity wave drag units = m s-2 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys [zvfun] standard_name = function_of_surface_roughness_length_and_green_vegetation_fraction long_name = function of surface roughness length and green vegetation fraction units = none - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [hffac] standard_name = surface_upward_sensible_heat_flux_reduction_factor long_name = surface upward sensible heat flux reduction factor from canopy heat storage units = none - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [hflxq] standard_name = kinematic_surface_upward_sensible_heat_flux_reduced_by_surface_roughness_and_vegetation long_name = kinematic surface upward sensible heat flux reduced by surface roughness and vegetation units = K m s-1 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [hflx_water] standard_name = kinematic_surface_upward_sensible_heat_flux_over_water long_name = kinematic surface upward sensible heat flux over water units = K m s-1 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [hflx_land] standard_name = kinematic_surface_upward_sensible_heat_flux_over_land long_name = kinematic surface upward sensible heat flux over land units = K m s-1 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [hflx_ice] standard_name = kinematic_surface_upward_sensible_heat_flux_over_ice long_name = kinematic surface upward sensible heat flux over ice units = K m s-1 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [htlwc] standard_name = tendency_of_air_temperature_due_to_longwave_heating_on_radiation_time_step_and_radiation_levels long_name = total sky heating rate due to longwave radiation units = K s-1 - dimensions = (horizontal_loop_extent,adjusted_vertical_layer_dimension_for_radiation) + dimensions = (horizontal_dimension,adjusted_vertical_layer_dimension_for_radiation) type = real kind = kind_phys [htlw0] standard_name = tendency_of_air_temperature_due_to_longwave_heating_assuming_clear_sky_on_radiation_time_step_and_radiation_levels long_name = clear sky heating rate due to longwave radiation units = K s-1 - dimensions = (horizontal_loop_extent,adjusted_vertical_layer_dimension_for_radiation) + dimensions = (horizontal_dimension,adjusted_vertical_layer_dimension_for_radiation) type = real kind = kind_phys [htswc] standard_name = tendency_of_air_temperature_due_to_shortwave_heating_on_radiation_time_step_and_radiation_levels long_name = total sky heating rate due to shortwave radiation units = K s-1 - dimensions = (horizontal_loop_extent,adjusted_vertical_layer_dimension_for_radiation) + dimensions = (horizontal_dimension,adjusted_vertical_layer_dimension_for_radiation) type = real kind = kind_phys [htsw0] standard_name = tendency_of_air_temperature_due_to_shortwave_heating_assuming_clear_sky_on_radiation_time_step_and_radiation_levels long_name = clear sky heating rates due to shortwave radiation units = K s-1 - dimensions = (horizontal_loop_extent,adjusted_vertical_layer_dimension_for_radiation) + dimensions = (horizontal_dimension,adjusted_vertical_layer_dimension_for_radiation) type = real kind = kind_phys [icemp] standard_name = lwe_thickness_of_ice_amount long_name = explicit ice fall on physics timestep units = m - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys active = (control_for_microphysics_scheme == identifier_for_gfdl_microphysics_scheme .or. control_for_microphysics_scheme == identifier_for_thompson_microphysics_scheme .or. control_for_microphysics_scheme == identifier_for_nssl_microphysics_scheme .or. control_for_microphysics_scheme == identifier_for_tempo_microphysics_scheme) @@ -1225,31 +1218,31 @@ standard_name = flag_nonzero_land_surface_fraction long_name = flag indicating presence of some land surface area fraction units = flag - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = logical [idxday] standard_name = daytime_points long_name = daytime points units = index - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = integer [icy] standard_name = flag_nonzero_sea_ice_surface_fraction long_name = flag indicating presence of some sea ice surface area fraction units = flag - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = logical [lake] standard_name = flag_nonzero_lake_surface_fraction long_name = flag indicating presence of some lake surface area fraction units = flag - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = logical [ocean] standard_name = flag_nonzero_ocean_surface_fraction long_name = flag indicating presence of some ocean surface area fraction units = flag - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = logical [ipr] standard_name = horizontal_index_of_printed_column @@ -1261,13 +1254,13 @@ standard_name = sea_land_ice_mask long_name = sea/land/ice mask (=0/1/2) units = flag - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = integer [islmsk_cice] standard_name = sea_land_ice_mask_cice long_name = sea/land/ice mask cice (=0/1/2) units = flag - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = integer [itc] standard_name = index_of_first_chemical_tracer_for_convection @@ -1279,7 +1272,7 @@ standard_name = flag_nonzero_wet_surface_fraction long_name = flag indicating presence of some ocean or lake surface area fraction units = flag - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = logical [kb] standard_name = vertical_index_difference_between_layer_and_lower_bound @@ -1291,13 +1284,13 @@ standard_name = vertical_index_at_cloud_base long_name = vertical index at cloud base units = index - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = integer [kcnv] standard_name = flag_deep_convection long_name = flag indicating whether convection occurs in column (0 or 1) units = flag - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = integer [kd] standard_name = vertical_index_difference_between_inout_and_local @@ -1309,13 +1302,13 @@ standard_name = index_of_highest_temperature_inversion long_name = index of highest temperature inversion units = index - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = integer [kpbl] standard_name = vertical_index_at_top_of_atmosphere_boundary_layer long_name = vertical index at top atmospheric boundary layer units = index - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = integer [kt] standard_name = vertical_index_difference_between_layer_and_upper_bound @@ -1327,7 +1320,7 @@ standard_name = vertical_index_at_cloud_top long_name = vertical index at cloud top units = index - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = integer [latidxprnt] standard_name = latitude_index_in_debug_printouts @@ -1357,7 +1350,7 @@ standard_name = model_layer_number_at_cloud_base long_name = vertical indices for low, middle and high cloud bases units = index - dimensions = (horizontal_loop_extent,3) + dimensions = (horizontal_dimension,3) type = integer [mg3_as_mg2] standard_name = flag_mg3_as_mg2 @@ -1369,7 +1362,7 @@ standard_name = model_layer_number_at_cloud_top long_name = vertical indices for low, middle and high cloud tops units = index - dimensions = (horizontal_loop_extent,3) + dimensions = (horizontal_dimension,3) type = integer [nbdlw] standard_name = number_of_aerosol_bands_for_longwave_radiation @@ -1387,7 +1380,7 @@ standard_name = local_graupel_number_concentration long_name = number concentration of graupel local to physics units = kg-1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (control_for_microphysics_scheme == identifier_for_morrison_gettelman_microphysics_scheme) @@ -1395,7 +1388,7 @@ standard_name = local_rain_number_concentration long_name = number concentration of rain local to physics units = kg-1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (control_for_microphysics_scheme == identifier_for_morrison_gettelman_microphysics_scheme) @@ -1403,7 +1396,7 @@ standard_name = local_snow_number_concentration long_name = number concentration of snow local to physics units = kg-1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (control_for_microphysics_scheme == identifier_for_morrison_gettelman_microphysics_scheme) @@ -1501,14 +1494,14 @@ standard_name = asymmetry_of_subgrid_orography long_name = asymmetry of subgrid height_above_mean_sea_level units = none - dimensions = (horizontal_loop_extent,4) + dimensions = (horizontal_dimension,4) type = real kind = kind_phys [varss] standard_name = standard_deviation_of_subgrid_orography_small_scale long_name = standard deviation of subgrid height_above_mean_sea_level small scale units = m - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys active = (control_for_drag_suite_gravity_wave_drag == 2 .or. control_for_drag_suite_gravity_wave_drag == 3 .or. control_for_drag_suite_gravity_wave_drag == 22 .or. control_for_drag_suite_gravity_wave_drag == 33) @@ -1516,7 +1509,7 @@ standard_name = asymmetry_of_subgrid_orography_small_scale long_name = asymmetry of subgrid height_above_mean_sea_level small scale units = none - dimensions = (horizontal_loop_extent,4) + dimensions = (horizontal_dimension,4) type = real kind = kind_phys active = (control_for_drag_suite_gravity_wave_drag == 2 .or. control_for_drag_suite_gravity_wave_drag == 3 .or. control_for_drag_suite_gravity_wave_drag == 22 .or. control_for_drag_suite_gravity_wave_drag == 33) @@ -1524,14 +1517,14 @@ standard_name = convexity_of_subgrid_orography long_name = convexity of subgrid height_above_mean_sea_level units = none - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [ocss] standard_name = convexity_of_subgrid_orography_small_scale long_name = convexity of subgrid height_above_mean_sea_level small scale units = none - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys active = (control_for_drag_suite_gravity_wave_drag == 2 .or. control_for_drag_suite_gravity_wave_drag == 3 .or. control_for_drag_suite_gravity_wave_drag == 22 .or. control_for_drag_suite_gravity_wave_drag == 33) @@ -1539,7 +1532,7 @@ standard_name = ozone_concentration_at_layer_for_radiation long_name = ozone concentration layer units = kg kg-1 - dimensions = (horizontal_loop_extent,adjusted_vertical_layer_dimension_for_radiation) + dimensions = (horizontal_dimension,adjusted_vertical_layer_dimension_for_radiation) type = real kind = kind_phys [otspt] @@ -1570,28 +1563,28 @@ standard_name = air_pressure_at_interface_for_radiation_in_hPa long_name = air pressure at vertical interface for radiation calculation units = hPa - dimensions = (horizontal_loop_extent,adjusted_vertical_level_dimension_for_radiation) + dimensions = (horizontal_dimension,adjusted_vertical_level_dimension_for_radiation) type = real kind = kind_phys [plyr] standard_name = air_pressure_at_layer_for_radiation_in_hPa long_name = air pressure at vertical layer for radiation calculation units = hPa - dimensions = (horizontal_loop_extent,adjusted_vertical_layer_dimension_for_radiation) + dimensions = (horizontal_dimension,adjusted_vertical_layer_dimension_for_radiation) type = real kind = kind_phys [prnum] standard_name = prandtl_number long_name = turbulent Prandtl number units = none - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys [q2mp] standard_name = specific_humidity_at_2m_from_noahmp long_name = 2 meter specific humidity from noahmp units = kg kg-1 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys active = (control_for_land_surface_scheme == identifier_for_noahmp_land_surface_scheme) @@ -1599,7 +1592,7 @@ standard_name = local_graupel_mixing_ratio long_name = ratio of mass of graupel to mass of dry air plus vapor (without condensates) local to physics units = kg kg-1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (control_for_microphysics_scheme == identifier_for_morrison_gettelman_microphysics_scheme) @@ -1607,7 +1600,7 @@ standard_name = mass_fraction_of_convective_cloud_ice long_name = mass fraction of convective cloud ice water units = kg kg-1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (control_for_microphysics_scheme == identifier_for_morrison_gettelman_microphysics_scheme) @@ -1615,7 +1608,7 @@ standard_name = mass_fraction_of_convective_cloud_liquid_water long_name = mass fraction of convective cloud liquid water units = kg kg-1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (control_for_microphysics_scheme == identifier_for_morrison_gettelman_microphysics_scheme) @@ -1623,14 +1616,14 @@ standard_name = water_vapor_specific_humidity_at_layer_for_radiation long_name = specific humidity layer units = kg kg-1 - dimensions = (horizontal_loop_extent,adjusted_vertical_layer_dimension_for_radiation) + dimensions = (horizontal_dimension,adjusted_vertical_layer_dimension_for_radiation) type = real kind = kind_phys [qrn] standard_name = local_rain_water_mixing_ratio long_name = ratio of mass of rain water to mass of dry air plus vapor (without condensates) local to physics units = kg kg-1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (control_for_microphysics_scheme == identifier_for_morrison_gettelman_microphysics_scheme) @@ -1638,7 +1631,7 @@ standard_name = local_snow_water_mixing_ratio long_name = ratio of mass of snow water to mass of dry air plus vapor (without condensates) local to physics units = kg kg-1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (control_for_microphysics_scheme == identifier_for_morrison_gettelman_microphysics_scheme) @@ -1646,28 +1639,28 @@ standard_name = lwe_thickness_of_explicit_precipitation_amount long_name = explicit precipitation (rain, ice, snow, graupel, ...) on physics timestep units = m - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [qss_water] standard_name = surface_specific_humidity_over_water long_name = surface air saturation specific humidity over water units = kg kg-1 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [qss_land] standard_name = surface_specific_humidity_over_land long_name = surface air saturation specific humidity over land units = kg kg-1 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [qss_ice] standard_name = surface_specific_humidity_over_ice long_name = surface air saturation specific humidity over ice units = kg kg-1 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [fullradar_diag] @@ -1687,28 +1680,28 @@ standard_name = lwe_thickness_of_deep_convective_precipitation_amount long_name = deep convective rainfall amount on physics timestep units = m - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [raincs] standard_name = lwe_thickness_of_shallow_convective_precipitation_amount long_name = shallow convective rainfall amount on physics timestep units = m - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [rainmcadj] standard_name = lwe_thickness_of_moist_convective_adj_precipitation_amount long_name = adjusted moist convective rainfall amount on physics timestep units = m - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [rainmp] standard_name = lwe_thickness_of_explicit_rain_amount long_name = explicit rain on physics timestep units = m - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys active = (control_for_microphysics_scheme == identifier_for_gfdl_microphysics_scheme .or. control_for_microphysics_scheme == identifier_for_thompson_microphysics_scheme .or. control_for_microphysics_scheme == identifier_for_nssl_microphysics_scheme .or. control_for_microphysics_scheme == identifier_for_tempo_microphysics_scheme) @@ -1716,35 +1709,35 @@ standard_name = tendency_of_rain_water_mixing_ratio_due_to_microphysics long_name = tendency of rain water mixing ratio due to microphysics units = kg kg-1 s-1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys [rb] standard_name = bulk_richardson_number_at_lowest_model_level long_name = bulk Richardson number at the surface units = none - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [rb_water] standard_name = bulk_richardson_number_at_lowest_model_level_over_water long_name = bulk Richardson number at the surface over water units = none - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [rb_land] standard_name = bulk_richardson_number_at_lowest_model_level_over_land long_name = bulk Richardson number at the surface over land units = none - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [rb_ice] standard_name = bulk_richardson_number_at_lowest_model_level_over_ice long_name = bulk Richardson number at the surface over ice units = none - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [max_hourly_reset] @@ -1763,181 +1756,181 @@ standard_name = critical_relative_humidity long_name = critical relative humidity units = frac - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys [rho1] standard_name = air_density_at_lowest_model_layer long_name = air density at lowest model layer units = kg m-3 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [runoff] standard_name = surface_runoff_flux long_name = surface runoff flux units = kg m-2 s-1 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [save_q(:,:,index_of_ozone_mixing_ratio_in_tracer_concentration_array)] standard_name = ozone_mixing_ratio_save long_name = ozone mixing ratio before entering a physics scheme units = kg kg-1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys [save_q(:,:,index_of_turbulent_kinetic_energy_in_tracer_concentration_array)] standard_name = turbulent_kinetic_energy_save long_name = turbulent kinetic energy before entering a physics scheme units = kg kg-1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys [save_q(:,:,index_of_cloud_liquid_water_mixing_ratio_in_tracer_concentration_array)] standard_name = cloud_condensed_water_mixing_ratio_save long_name = ratio of mass of cloud water to mass of dry air plus vapor (without condensates) before entering a physics scheme units = kg kg-1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys [save_q(:,:,index_of_snow_mixing_ratio_in_tracer_concentration_array)] standard_name = snow_mixing_ratio_save long_name = ratio of mass of snow water to mass of dry air plus vapor (without condensates) before entering a physics scheme units = kg kg-1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys [save_q(:,:,index_of_cloud_ice_mixing_ratio_in_tracer_concentration_array)] standard_name = ice_water_mixing_ratio_save long_name = cloud ice water mixing ratio before entering a physics scheme units = kg kg-1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys [save_q(:,:,index_of_specific_humidity_in_tracer_concentration_array)] standard_name = water_vapor_specific_humidity_save long_name = water vapor specific humidity before entering a physics scheme units = kg kg-1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys [save_q(:,:,index_of_mass_number_concentration_of_cloud_droplets_in_tracer_concentration_array)] standard_name = liquid_cloud_number_concentration_save long_name = liquid cloud number concentration before entering a physics scheme units = kg kg-1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys [save_q(:,:,index_of_mass_number_concentration_of_cloud_ice_in_tracer_concentration_array)] standard_name = ice_cloud_number_concentration_save long_name = ice cloud number concentration before entering a physics scheme units = kg kg-1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys [save_q] standard_name = tracer_concentration_save long_name = tracer concentration before entering a physics scheme units = kg kg-1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension,number_of_tracers) + dimensions = (horizontal_dimension,vertical_layer_dimension,number_of_tracers) type = real kind = kind_phys [save_t] standard_name = air_temperature_save long_name = air temperature before entering a physics scheme units = K - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys [save_tcp] standard_name = air_temperature_save_from_convective_parameterization long_name = air temperature after cumulus parameterization units = K - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys [save_u] standard_name = x_wind_save long_name = x-wind before entering a physics scheme units = m s-1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys [save_v] standard_name = y_wind_save long_name = y-wind before entering a physics scheme units = m s-1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys [scmpsw] standard_name = components_of_surface_downward_shortwave_fluxes long_name = derived type for special components of surface downward shortwave fluxes units = W m-2 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = cmpfsw_type [sfcalb] standard_name = surface_albedo_components long_name = surface albedo IR/UV/VIS components units = frac - dimensions = (horizontal_loop_extent,number_of_components_for_surface_albedo) + dimensions = (horizontal_dimension,number_of_components_for_surface_albedo) type = real kind = kind_phys [sfcalb(:,1)] standard_name = surface_albedo_due_to_near_IR_direct long_name = surface albedo due to near IR direct beam units = frac - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [sfcalb(:,2)] standard_name = surface_albedo_due_to_near_IR_diffused long_name = surface albedo due to near IR diffused beam units = frac - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [sfcalb(:,3)] standard_name = surface_albedo_due_to_UV_and_VIS_direct long_name = surface albedo due to UV+VIS direct beam units = frac - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [sfcalb(:,4)] standard_name = surface_albedo_due_to_UV_and_VIS_diffused long_name = surface albedo due to UV+VIS diffused beam units = frac - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [sigma] standard_name = slope_of_subgrid_orography long_name = slope of subgrid height_above_mean_sea_level units = none - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [sigmaf] standard_name = bounded_vegetation_area_fraction long_name = areal fractional cover of green vegetation bounded on the bottom units = frac - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [sigmafrac] standard_name = convective_updraft_area_fraction long_name = convective updraft area fraction units = frac - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys [sigmatot] standard_name = convective_updraft_area_fraction_at_model_interfaces long_name = convective updraft area fraction at model interfaces units = frac - dimensions = (horizontal_loop_extent,vertical_interface_dimension) + dimensions = (horizontal_dimension,vertical_interface_dimension) type = real kind = kind_phys [skip_macro] @@ -1950,21 +1943,21 @@ standard_name = surface_snow_area_fraction long_name = surface snow area fraction units = frac - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [snohf] standard_name = snow_freezing_rain_upward_latent_heat_flux long_name = latent heat flux due to snow and frz rain units = W m-2 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [snowmp] standard_name = lwe_thickness_of_snow_amount long_name = explicit snow fall on physics timestep units = m - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys active = (control_for_microphysics_scheme == identifier_for_gfdl_microphysics_scheme .or. control_for_microphysics_scheme == identifier_for_thompson_microphysics_scheme .or. control_for_microphysics_scheme == identifier_for_nssl_microphysics_scheme .or. control_for_microphysics_scheme == identifier_for_tempo_microphysics_scheme) @@ -1972,42 +1965,42 @@ standard_name = surface_snow_melt long_name = snow melt during timestep units = m - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [stress] standard_name = surface_wind_stress long_name = surface wind stress units = m2 s-2 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [stress_water] standard_name = surface_wind_stress_over_water long_name = surface wind stress over water units = m2 s-2 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [stress_land] standard_name = surface_wind_stress_over_land long_name = surface wind stress over land units = m2 s-2 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [stress_ice] standard_name = surface_wind_stress_over_ice long_name = surface wind stress over ice units = m2 s-2 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [t2mmp] standard_name = temperature_at_2m_from_noahmp long_name = 2 meter temperature from noahmp units = K - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys active = (control_for_land_surface_scheme == identifier_for_noahmp_land_surface_scheme) @@ -2015,42 +2008,42 @@ standard_name = angle_from_east_of_maximum_subgrid_orographic_variations long_name = angle with_respect to east of maximum subgrid orographic variations units = degree - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [tlvl] standard_name = air_temperature_at_interface_for_radiation long_name = air temperature at vertical interface for radiation calculation units = K - dimensions = (horizontal_loop_extent,adjusted_vertical_level_dimension_for_radiation) + dimensions = (horizontal_dimension,adjusted_vertical_level_dimension_for_radiation) type = real kind = kind_phys [tlyr] standard_name = air_temperature_at_layer_for_radiation long_name = air temperature at vertical layer for radiation calculation units = K - dimensions = (horizontal_loop_extent,adjusted_vertical_layer_dimension_for_radiation) + dimensions = (horizontal_dimension,adjusted_vertical_layer_dimension_for_radiation) type = real kind = kind_phys [tprcp_water] standard_name = nonnegative_lwe_thickness_of_precipitation_amount_on_dynamics_timestep_over_water long_name = total precipitation amount in each time step over water units = m - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [tprcp_land] standard_name = nonnegative_lwe_thickness_of_precipitation_amount_on_dynamics_timestep_over_land long_name = total precipitation amount in each time step over land units = m - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [tprcp_ice] standard_name = nonnegative_lwe_thickness_of_precipitation_amount_on_dynamics_timestep_over_ice long_name = total precipitation amount in each time step over ice units = m - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [tracers_start_index] @@ -2075,49 +2068,49 @@ standard_name = surface_skin_temperature_for_nsst long_name = ocean surface skin temperature units = K - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [tsfa] standard_name = surface_air_temperature_for_radiation long_name = lowest model layer air temperature for radiation units = K - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [tsfc_water] standard_name = surface_skin_temperature_over_water long_name = surface skin temperature over water units = K - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [tsfg] standard_name = surface_ground_temperature_for_radiation long_name = surface ground temperature for radiation units = K - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [tsurf_water] standard_name = surface_skin_temperature_after_iteration_over_water long_name = surface skin temperature after iteration over water units = K - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [tsurf_land] standard_name = surface_skin_temperature_after_iteration_over_land long_name = surface skin temperature after iteration over land units = K - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [tsurf_ice] standard_name = surface_skin_temperature_after_iteration_over_ice long_name = surface skin temperature after iteration over ice units = K - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [tracers_water] @@ -2130,28 +2123,28 @@ standard_name = surface_friction_velocity_over_water long_name = surface friction velocity over water units = m s-1 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [uustar_land] standard_name = surface_friction_velocity_over_land long_name = surface friction velocity over land units = m s-1 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [uustar_ice] standard_name = surface_friction_velocity_over_ice long_name = surface friction velocity over ice units = m s-1 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [vdftra] standard_name = vertically_diffused_tracer_concentration long_name = tracer concentration diffused by PBL scheme units = kg kg-1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension,number_of_vertical_diffusion_tracers) + dimensions = (horizontal_dimension,vertical_layer_dimension,number_of_vertical_diffusion_tracers) type = real kind = kind_phys [lndp_vgf] @@ -2165,14 +2158,14 @@ standard_name = perturbation_of_vegetation_fraction long_name = perturbation of vegetation fraction units = frac - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [w_upi] standard_name = vertical_velocity_for_updraft long_name = vertical velocity for updraft units = m s-1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (control_for_microphysics_scheme == identifier_for_morrison_gettelman_microphysics_scheme) @@ -2180,105 +2173,105 @@ standard_name = maximum_updraft_velocity_at_cloud_base long_name = maximum updraft velocity at cloud base units = m s-1 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [wind] standard_name = wind_speed_at_lowest_model_layer long_name = wind speed at lowest model level units = m s-1 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [work1] standard_name = grid_size_related_coefficient_used_in_scale_sensitive_schemes long_name = grid size related coefficient used in scale-sensitive schemes units = none - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [work2] standard_name = grid_size_related_coefficient_used_in_scale_sensitive_schemes_complement long_name = complement to work1 units = none - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [work3] standard_name = ratio_of_exner_function_between_midlayer_and_interface_at_lowest_model_layer long_name = Exner function ratio bt midlayer and interface at 1st layer units = ratio - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [xcosz] standard_name = instantaneous_cosine_of_zenith_angle long_name = cosine of zenith angle at current time units = none - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [xlai1d] standard_name = perturbation_of_leaf_area_index long_name = perturbation of leaf area index units = frac - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [xmu] standard_name = zenith_angle_temporal_adjustment_factor_for_shortwave_fluxes long_name = zenith angle temporal adjustment factor for shortwave units = none - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [z01d] standard_name = perturbation_of_momentum_roughness_length long_name = perturbation of momentum roughness length units = frac - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [ztmax_water] standard_name = bounded_surface_roughness_length_for_heat_over_water long_name = bounded surface roughness length for heat over water units = m - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [ztmax_land] standard_name = bounded_surface_roughness_length_for_heat_over_land long_name = bounded surface roughness length for heat over land units = m - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [ztmax_ice] standard_name = bounded_surface_roughness_length_for_heat_over_ice long_name = bounded surface roughness length for heat over ice units = m - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [zt1d] standard_name = perturbation_of_heat_to_momentum_roughness_length_ratio long_name = perturbation of heat to momentum roughness length ratio units = frac - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [zmtb] standard_name = height_of_mountain_blocking long_name = height of mountain blocking drag units = m - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [dudt_ngw] standard_name = tendency_of_x_wind_due_to_nonorographic_gravity_wave_drag long_name = zonal wind tendency due to non-stationary GWs units = m s-2 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (flag_for_unified_gravity_wave_physics_diagnostics .or. flag_for_ugwp_version_0 .or. flag_for_ugwp_version_0_nonorographic_gwd .or. flag_for_ugwp_version_1) @@ -2286,7 +2279,7 @@ standard_name = tendency_of_y_wind_due_to_nonorographic_gravity_wave_drag long_name = meridional wind tendency due to non-stationary GWs units = m s-2 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (flag_for_unified_gravity_wave_physics_diagnostics .or. flag_for_ugwp_version_0 .or. flag_for_ugwp_version_0_nonorographic_gwd .or. flag_for_ugwp_version_1) @@ -2294,7 +2287,7 @@ standard_name = tendency_of_air_temperature_due_to_nonorographic_gravity_wave_drag long_name = air temperature tendency due to non-stationary GWs units = K s-1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (flag_for_unified_gravity_wave_physics_diagnostics .or. flag_for_ugwp_version_0 .or. flag_for_ugwp_version_0_nonorographic_gwd .or. flag_for_ugwp_version_1) @@ -2302,7 +2295,7 @@ standard_name = atmosphere_momentum_diffusivity_due_to_nonorographic_gravity_wave_drag long_name = eddy mixing due to non-stationary GWs units = m2 s-1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (flag_for_unified_gravity_wave_physics_diagnostics .or. flag_for_ugwp_version_0 .or. flag_for_ugwp_version_0_nonorographic_gwd .or. flag_for_ugwp_version_1) @@ -2310,77 +2303,77 @@ standard_name = height_of_low_level_wave_breaking long_name = height of low level wave breaking units = m - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [zogw] standard_name = height_of_launch_level_of_orographic_gravity_wave long_name = height of launch level of orographic gravity wave units = m - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [zngw] standard_name = height_of_launch_level_of_nonorographic_gravity_waves long_name = height of launch level of non-stationary GWs units = m - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [tau_tofd] standard_name = instantaneous_momentum_flux_due_to_turbulent_orographic_form_drag long_name = instantaneous momentum flux due to TOFD units = Pa - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [tau_mtb] standard_name = instantaneous_momentum_flux_due_to_mountain_blocking_drag long_name = instantaneous momentum flux due to mountain blocking drag units = Pa - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [tau_ogw] standard_name = instantaneous_momentum_flux_due_to_orographic_gravity_wave_drag long_name = instantaneous momentum flux due to orographic gravity wave drag units = Pa - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [tau_oss] standard_name = momentum_flux_due_to_subgrid_scale_orographic_gravity_wave_drag long_name = momentum flux or stress due to SSO including OBL-OSS-OFD units = Pa - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [tau_ngw] standard_name = instantaneous_momentum_flux_due_to_nonstationary_gravity_wave long_name = instantaneous momentum flux due to nonstationary gravity waves units = Pa - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [dudt_mtb] standard_name = instantaneous_change_in_x_wind_due_to_mountain_blocking_drag long_name = instantaneous change in x wind due to mountain blocking drag units = m s-2 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys [dudt_tms] standard_name = tendency_of_x_wind_due_to_turbulent_orographic_form_drag long_name = instantaneous change in x wind due to TOFD units = m s-2 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys [qs_lay] standard_name = saturation_vapor_pressure long_name = saturation vapor pressure units = Pa - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2388,7 +2381,7 @@ standard_name = water_vapor_mixing_ratio long_name = water vaport mixing ratio units = kg kg-1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2396,7 +2389,7 @@ standard_name = air_pressure_at_layer_for_RRTMGP long_name = air pressure layer units = Pa - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2404,7 +2397,7 @@ standard_name = air_pressure_at_interface_for_RRTMGP long_name = air pressure level units = Pa - dimensions = (horizontal_loop_extent,vertical_interface_dimension) + dimensions = (horizontal_dimension,vertical_interface_dimension) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2412,7 +2405,7 @@ standard_name = air_temperature_at_layer_for_RRTMGP long_name = air temperature layer units = K - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2420,7 +2413,7 @@ standard_name = air_temperature_at_interface_for_RRTMGP long_name = air temperature layer units = K - dimensions = (horizontal_loop_extent,vertical_interface_dimension) + dimensions = (horizontal_dimension,vertical_interface_dimension) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2428,7 +2421,7 @@ standard_name = virtual_temperature long_name = layer virtual temperature units = K - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2436,7 +2429,7 @@ standard_name = relative_humidity long_name = layer relative humidity units = frac - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2444,7 +2437,7 @@ standard_name = layer_thickness long_name = layer_thickness units = m - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2452,7 +2445,7 @@ standard_name = layer_thickness_from_layer_center long_name = layer_thickness units = m - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2460,7 +2453,7 @@ standard_name = layer_thickness_in_Pa long_name = layer_thickness_in_Pa units = Pa - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2468,7 +2461,7 @@ standard_name = chemical_tracers long_name = chemical tracers units = g g-1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension,number_of_tracers) + dimensions = (horizontal_dimension,vertical_layer_dimension,number_of_tracers) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2476,7 +2469,7 @@ standard_name = cloud_overlap_param long_name = cloud overlap parameter for RRTMGP (but not for RRTMG) units = km - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2484,7 +2477,7 @@ standard_name = precip_overlap_param long_name = precipitation overlap parameter units = km - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2492,7 +2485,7 @@ standard_name = RRTMGP_lw_flux_profile_upward_allsky long_name = RRTMGP upward longwave all-sky flux profile units = W m-2 - dimensions = (horizontal_loop_extent,vertical_interface_dimension) + dimensions = (horizontal_dimension,vertical_interface_dimension) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2500,7 +2493,7 @@ standard_name = RRTMGP_lw_flux_profile_downward_allsky long_name = RRTMGP downward longwave all-sky flux profile units = W m-2 - dimensions = (horizontal_loop_extent,vertical_interface_dimension) + dimensions = (horizontal_dimension,vertical_interface_dimension) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2508,7 +2501,7 @@ standard_name = RRTMGP_lw_flux_profile_upward_clrsky long_name = RRTMGP upward longwave clr-sky flux profile units = W m-2 - dimensions = (horizontal_loop_extent,vertical_interface_dimension) + dimensions = (horizontal_dimension,vertical_interface_dimension) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2516,7 +2509,7 @@ standard_name = RRTMGP_lw_flux_profile_downward_clrsky long_name = RRTMGP downward longwave clr-sky flux profile units = W m-2 - dimensions = (horizontal_loop_extent,vertical_interface_dimension) + dimensions = (horizontal_dimension,vertical_interface_dimension) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2524,7 +2517,7 @@ standard_name = RRTMGP_sw_flux_profile_upward_allsky long_name = RRTMGP upward shortwave all-sky flux profile units = W m-2 - dimensions = (horizontal_loop_extent,vertical_interface_dimension) + dimensions = (horizontal_dimension,vertical_interface_dimension) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2532,7 +2525,7 @@ standard_name = RRTMGP_sw_flux_profile_downward_allsky long_name = RRTMGP downward shortwave all-sky flux profile units = W m-2 - dimensions = (horizontal_loop_extent,vertical_interface_dimension) + dimensions = (horizontal_dimension,vertical_interface_dimension) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2540,7 +2533,7 @@ standard_name = RRTMGP_sw_flux_profile_upward_clrsky long_name = RRTMGP upward shortwave clr-sky flux profile units = W m-2 - dimensions = (horizontal_loop_extent,vertical_interface_dimension) + dimensions = (horizontal_dimension,vertical_interface_dimension) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2548,7 +2541,7 @@ standard_name = RRTMGP_sw_flux_profile_downward_clrsky long_name = RRTMGP downward shortwave clr-sky flux profile units = W m-2 - dimensions = (horizontal_loop_extent,vertical_interface_dimension) + dimensions = (horizontal_dimension,vertical_interface_dimension) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2556,21 +2549,21 @@ standard_name = RRTMGP_lw_fluxes long_name = lw fluxes total sky / csk and up / down at levels units = W m-2 - dimensions = (horizontal_loop_extent,vertical_interface_dimension) + dimensions = (horizontal_dimension,vertical_interface_dimension) type = proflw_type active = (flag_for_rrtmgp_radiation_scheme) [flxprf_sw] standard_name = RRTMGP_sw_fluxes long_name = sw fluxes total sky / csk and up / down at levels units = W m-2 - dimensions = (horizontal_loop_extent,vertical_interface_dimension) + dimensions = (horizontal_dimension,vertical_interface_dimension) type = profsw_type active = (flag_for_rrtmgp_radiation_scheme) [aerosolslw] standard_name = RRTMGP_aerosol_optical_properties_for_longwave_bands_01_16 long_name = aerosol optical properties for longwave bands 01-16 units = mixed - dimensions = (horizontal_loop_extent,vertical_layer_dimension, number_of_longwave_bands,number_of_aerosol_output_fields_for_longwave_radiation) + dimensions = (horizontal_dimension,vertical_layer_dimension, number_of_longwave_bands,number_of_aerosol_output_fields_for_longwave_radiation) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2578,7 +2571,7 @@ standard_name = RRTMGP_aerosol_optical_depth_for_longwave_bands_01_16 long_name = aerosol optical depth for longwave bands 01-16 units = none - dimensions = (horizontal_loop_extent,vertical_layer_dimension, number_of_longwave_bands) + dimensions = (horizontal_dimension,vertical_layer_dimension, number_of_longwave_bands) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2586,7 +2579,7 @@ standard_name = RRTMGP_aerosol_single_scattering_albedo_for_longwave_bands_01_16 long_name = aerosol single scattering albedo for longwave bands 01-16 units = frac - dimensions = (horizontal_loop_extent,vertical_layer_dimension, number_of_longwave_bands) + dimensions = (horizontal_dimension,vertical_layer_dimension, number_of_longwave_bands) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2594,7 +2587,7 @@ standard_name = RRTMGP_aerosol_asymmetry_parameter_for_longwave_bands_01_16 long_name = aerosol asymmetry parameter for longwave bands 01-16 units = none - dimensions = (horizontal_loop_extent,vertical_layer_dimension, number_of_longwave_bands) + dimensions = (horizontal_dimension,vertical_layer_dimension, number_of_longwave_bands) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2602,7 +2595,7 @@ standard_name = RRTMGP_aerosol_optical_properties_for_shortwave_bands_01_16 long_name = aerosol optical properties for shortwave bands 01-16 units = mixed - dimensions = (horizontal_loop_extent,vertical_layer_dimension, number_of_shortwave_bands, number_of_aerosol_output_fields_for_shortwave_radiation) + dimensions = (horizontal_dimension,vertical_layer_dimension, number_of_shortwave_bands, number_of_aerosol_output_fields_for_shortwave_radiation) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2610,7 +2603,7 @@ standard_name = RRTMGP_aerosol_optical_depth_for_shortwave_bands_01_16 long_name = aerosol optical depth for shortwave bands 01-16 units = none - dimensions = (horizontal_loop_extent,vertical_layer_dimension, number_of_shortwave_bands) + dimensions = (horizontal_dimension,vertical_layer_dimension, number_of_shortwave_bands) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2618,7 +2611,7 @@ standard_name = RRTMGP_aerosol_single_scattering_albedo_for_shortwave_bands_01_16 long_name = aerosol single scattering albedo for shortwave bands 01-16 units = frac - dimensions = (horizontal_loop_extent,vertical_layer_dimension, number_of_shortwave_bands) + dimensions = (horizontal_dimension,vertical_layer_dimension, number_of_shortwave_bands) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2626,7 +2619,7 @@ standard_name = RRTMGP_aerosol_asymmetry_parameter_for_shortwave_bands_01_16 long_name = aerosol asymmetry parameter for shortwave bands 01-16 units = none - dimensions = (horizontal_loop_extent,vertical_layer_dimension, number_of_shortwave_bands) + dimensions = (horizontal_dimension,vertical_layer_dimension, number_of_shortwave_bands) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2634,7 +2627,7 @@ standard_name = precipitation_fraction_by_layer long_name = precipitation fraction in each layer units = frac - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2642,7 +2635,7 @@ standard_name = convective_cloud_overlap_param long_name = convective cloud overlap parameter units = km - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2650,7 +2643,7 @@ standard_name = convective_cloud_fraction_for_RRTMGP long_name = layer convective cloud fraction units = frac - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2658,7 +2651,7 @@ standard_name = convective_cloud_liquid_water_path long_name = layer convective cloud liquid water path units = g m-2 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2666,7 +2659,7 @@ standard_name = convective_cloud_ice_water_path long_name = layer convective cloud ice water path units = g m-2 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2674,7 +2667,7 @@ standard_name = mean_effective_radius_for_liquid_convective_cloud long_name = mean effective radius for liquid convective cloud units = um - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2682,7 +2675,7 @@ standard_name = mean_effective_radius_for_ice_convective_cloud long_name = mean effective radius for ice convective cloud units = um - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2690,7 +2683,7 @@ standard_name = MYNN_SGS_cloud_liquid_water_path long_name = layer convective cloud liquid water path units = g m-2 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2698,7 +2691,7 @@ standard_name = MYNN_SGS_cloud_ice_water_path long_name = layer convective cloud ice water path units = g m-2 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2706,7 +2699,7 @@ standard_name = mean_effective_radius_for_liquid_MYNN_SGS_cloud long_name = mean effective radius for liquid MYNN_SGS cloud units = um - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2714,7 +2707,7 @@ standard_name = mean_effective_radius_for_ice_MYNN_SGS_cloud long_name = mean effective radius for ice MYNN_SGS cloud units = um - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2722,7 +2715,7 @@ standard_name = volume_mixing_ratio_for_o2 long_name = molar mixing ratio of o2 in with respect to dry air units = 1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2730,7 +2723,7 @@ standard_name = volume_mixing_ratio_for_h2o long_name = molar mixing ratio of h2o in with respect to dry air units = 1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2738,7 +2731,7 @@ standard_name = volume_mixing_ratio_for_o3 long_name = molar mixing ratio of o3 in with respect to dry air units = 1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2746,7 +2739,7 @@ standard_name = volume_mixing_ratio_for_ch4 long_name = molar mixing ratio of ch4 in with respect to dry air units = 1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2754,7 +2747,7 @@ standard_name = volume_mixing_ratio_for_n2o long_name = molar mixing ratio of n2o in with respect to dry air units = 1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2762,7 +2755,7 @@ standard_name = volume_mixing_ratio_for_co2 long_name = molar mixing ratio of co2 in with respect to dry air units = 1 - dimensions = (horizontal_loop_extent,vertical_layer_dimension) + dimensions = (horizontal_dimension,vertical_layer_dimension) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2770,7 +2763,7 @@ standard_name = surface_emissivity_in_each_RRTMGP_LW_band long_name = surface emissivity in each RRTMGP LW band units = none - dimensions = (number_of_longwave_bands,horizontal_loop_extent) + dimensions = (number_of_longwave_bands,horizontal_dimension) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2778,7 +2771,7 @@ standard_name = secant_of_diffusivity_angle_each_RRTMGP_LW_band long_name = secant of diffusivity angle in each RRTMGP LW band units = none - dimensions = (number_of_longwave_bands,horizontal_loop_extent) + dimensions = (number_of_longwave_bands,horizontal_dimension) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2786,7 +2779,7 @@ standard_name = surface_albedo_nearIR_direct long_name = near-IR (direct) surface albedo (sfc_alb_nir_dir) units = none - dimensions = (number_of_shortwave_bands,horizontal_loop_extent) + dimensions = (number_of_shortwave_bands,horizontal_dimension) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2794,7 +2787,7 @@ standard_name = surface_albedo_nearIR_diffuse long_name = near-IR (diffuse) surface albedo (sfc_alb_nir_dif) units = none - dimensions = (number_of_shortwave_bands,horizontal_loop_extent) + dimensions = (number_of_shortwave_bands,horizontal_dimension) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2802,7 +2795,7 @@ standard_name = surface_albedo_uvvis_direct long_name = UVVIS (direct) surface albedo (sfc_alb_uvvis_dir) units = none - dimensions = (number_of_shortwave_bands,horizontal_loop_extent) + dimensions = (number_of_shortwave_bands,horizontal_dimension) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2810,7 +2803,7 @@ standard_name = surface_albedo_uvvis_diffuse long_name = UVVIS (diffuse) surface albedo (sfc_alb_uvvis_dif) units = none - dimensions = (number_of_shortwave_bands,horizontal_loop_extent) + dimensions = (number_of_shortwave_bands,horizontal_dimension) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2818,7 +2811,7 @@ standard_name = toa_incident_lw_flux_by_spectral_point long_name = TOA longwave incident flux at each spectral points units = W m-2 - dimensions = (horizontal_loop_extent,number_of_longwave_spectral_points) + dimensions = (horizontal_dimension,number_of_longwave_spectral_points) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2826,7 +2819,7 @@ standard_name = toa_incident_sw_flux_by_spectral_point long_name = TOA shortwave incident flux at each spectral points units = W m-2 - dimensions = (horizontal_loop_extent,number_of_shortwave_spectral_points) + dimensions = (horizontal_dimension,number_of_shortwave_spectral_points) type = real kind = kind_phys active = (flag_for_rrtmgp_radiation_scheme) @@ -2840,7 +2833,7 @@ standard_name = vertical_turbulent_kinetic_energy_at_interface long_name = vertical turbulent kinetic energy at model layer interfaces units = m2 s-2 - dimensions = (horizontal_loop_extent,vertical_interface_dimension) + dimensions = (horizontal_dimension,vertical_interface_dimension) type = real kind = kind_phys @@ -2864,14 +2857,14 @@ standard_name = cappa_moist_gas_constant_at_Lagrangian_surface long_name = cappa(i,j,k) = rdgas / ( rdgas + cvm(i)/(1.+r_vir*q(i,j,k,sphum)) ) units = none - dimensions = (starting_x_direction_index_domain:ending_x_direction_index_domain,starting_y_direction_index_domain:ending_y_direction_index_domain,1:vertical_dimension_for_cappa_at_Lagrangian_surface) + dimensions = (starting_x_direction_index_domain:ending_x_direction_index_domain,starting_y_direction_index_domain:ending_y_direction_index_domain,ccpp_constant_one:vertical_dimension_for_cappa_at_Lagrangian_surface) type = real kind = kind_dyn [dtdt] standard_name = tendency_of_air_temperature_at_Lagrangian_surface long_name = air temperature tendency due to fast physics at Lagrangian surface units = K s-1 - dimensions = (starting_x_direction_index:ending_x_direction_index,starting_y_direction_index:ending_y_direction_index,1:vertical_dimension_for_fast_physics) + dimensions = (starting_x_direction_index:ending_x_direction_index,starting_y_direction_index:ending_y_direction_index,ccpp_constant_one:vertical_dimension_for_fast_physics) type = real kind = kind_dyn [do_qa] @@ -2922,7 +2915,7 @@ standard_name = atmosphere_energy_content_at_Lagrangian_surface long_name = atmosphere total energy at Lagrangian surface units = J m-2 - dimensions = (starting_x_direction_index_domain:ending_x_direction_index_domain,starting_y_direction_index_domain:ending_y_direction_index_domain,1:vertical_dimension_for_fast_physics) + dimensions = (starting_x_direction_index_domain:ending_x_direction_index_domain,starting_y_direction_index_domain:ending_y_direction_index_domain,ccpp_constant_one:vertical_dimension_for_fast_physics) type = real kind = kind_dyn [zvir] @@ -3038,14 +3031,14 @@ standard_name = pressure_thickness_at_Lagrangian_surface long_name = pressure thickness at Lagrangian surface units = Pa - dimensions = (starting_x_direction_index_alloc1:ending_x_direction_index_alloc1,starting_y_direction_index_alloc1:ending_y_direction_index_alloc1,1:vertical_dimension_for_fast_physics) + dimensions = (starting_x_direction_index_alloc1:ending_x_direction_index_alloc1,starting_y_direction_index_alloc1:ending_y_direction_index_alloc1,ccpp_constant_one:vertical_dimension_for_fast_physics) type = real kind = kind_dyn [delz] standard_name = thickness_at_Lagrangian_surface long_name = thickness at Lagrangian_surface units = m - dimensions = (starting_x_direction_index_alloc2:ending_x_direction_index_alloc2,starting_y_direction_index_alloc2:ending_y_direction_index_alloc2,1:vertical_dimension_for_thickness_at_Lagrangian_surface) + dimensions = (starting_x_direction_index_alloc2:ending_x_direction_index_alloc2,starting_y_direction_index_alloc2:ending_y_direction_index_alloc2,ccpp_constant_one:vertical_dimension_for_thickness_at_Lagrangian_surface) type = real kind = kind_dyn [area] @@ -3095,7 +3088,7 @@ standard_name = log_pressure_at_Lagrangian_surface long_name = logarithm of pressure at Lagrangian surface units = Pa - dimensions = (starting_x_direction_index_alloc2:ending_x_direction_index_alloc2,1:vertical_dimension_for_fast_physics_plus_one,starting_y_direction_index_alloc2:ending_y_direction_index_alloc2) + dimensions = (starting_x_direction_index_alloc2:ending_x_direction_index_alloc2,ccpp_constant_one:vertical_dimension_for_fast_physics_plus_one,starting_y_direction_index_alloc2:ending_y_direction_index_alloc2) type = real kind = kind_dyn [phis] @@ -3109,77 +3102,77 @@ standard_name = finite_volume_mean_edge_pressure_raised_to_the_power_of_kappa long_name = finite-volume mean edge pressure raised to the power of kappa units = 1 - dimensions = (starting_x_direction_index_alloc2:ending_x_direction_index_alloc2,starting_y_direction_index_alloc2:ending_y_direction_index_alloc2,1:vertical_dimension_for_fast_physics) + dimensions = (starting_x_direction_index_alloc2:ending_x_direction_index_alloc2,starting_y_direction_index_alloc2:ending_y_direction_index_alloc2,ccpp_constant_one:vertical_dimension_for_fast_physics) type = real kind = kind_dyn [pt] standard_name = virtual_temperature_at_Lagrangian_surface long_name = virtual temperature at Lagrangian surface units = K - dimensions = (starting_x_direction_index_alloc1:ending_x_direction_index_alloc1,starting_y_direction_index_alloc1:ending_y_direction_index_alloc1,1:vertical_dimension_for_fast_physics) + dimensions = (starting_x_direction_index_alloc1:ending_x_direction_index_alloc1,starting_y_direction_index_alloc1:ending_y_direction_index_alloc1,ccpp_constant_one:vertical_dimension_for_fast_physics) type = real kind = kind_dyn [qvi] standard_name = gas_tracers_for_multi_gas_physics_at_Lagrangian_surface long_name = gas tracers for multi gas physics at Lagrangian surface units = kg kg-1 - dimensions = (starting_x_direction_index_alloc1:ending_x_direction_index_alloc1,starting_y_direction_index_alloc1:ending_y_direction_index_alloc1,1:vertical_dimension_for_fast_physics,1:number_of_gases_for_multi_gases_physics) + dimensions = (starting_x_direction_index_alloc1:ending_x_direction_index_alloc1,starting_y_direction_index_alloc1:ending_y_direction_index_alloc1,ccpp_constant_one:vertical_dimension_for_fast_physics,ccpp_constant_one:number_of_gases_for_multi_gases_physics) type = real kind = kind_dyn [qv] standard_name = water_vapor_specific_humidity_at_Lagrangian_surface long_name = water vapor specific humidity updated by fast physics at Lagrangian surface units = kg kg-1 - dimensions = (starting_x_direction_index_alloc1:ending_x_direction_index_alloc1,starting_y_direction_index_alloc1:ending_y_direction_index_alloc1,1:vertical_dimension_for_fast_physics) + dimensions = (starting_x_direction_index_alloc1:ending_x_direction_index_alloc1,starting_y_direction_index_alloc1:ending_y_direction_index_alloc1,ccpp_constant_one:vertical_dimension_for_fast_physics) type = real kind = kind_dyn [ql] standard_name = cloud_liquid_water_specific_humidity_at_Lagrangian_surface long_name = cloud liquid water specific humidity updated by fast physics at Lagrangian surface units = kg kg-1 - dimensions = (starting_x_direction_index_alloc1:ending_x_direction_index_alloc1,starting_y_direction_index_alloc1:ending_y_direction_index_alloc1,1:vertical_dimension_for_fast_physics) + dimensions = (starting_x_direction_index_alloc1:ending_x_direction_index_alloc1,starting_y_direction_index_alloc1:ending_y_direction_index_alloc1,ccpp_constant_one:vertical_dimension_for_fast_physics) type = real kind = kind_dyn [qi] standard_name = cloud_ice_specific_humidity_at_Lagrangian_surface long_name = cloud ice specific humidity updated by fast physics at Lagrangian surface units = kg kg-1 - dimensions = (starting_x_direction_index_alloc1:ending_x_direction_index_alloc1,starting_y_direction_index_alloc1:ending_y_direction_index_alloc1,1:vertical_dimension_for_fast_physics) + dimensions = (starting_x_direction_index_alloc1:ending_x_direction_index_alloc1,starting_y_direction_index_alloc1:ending_y_direction_index_alloc1,ccpp_constant_one:vertical_dimension_for_fast_physics) type = real kind = kind_dyn [qr] standard_name = cloud_rain_specific_humidity_at_Lagrangian_surface long_name = cloud rain specific humidity updated by fast physics at Lagrangian surface units = kg kg-1 - dimensions = (starting_x_direction_index_alloc1:ending_x_direction_index_alloc1,starting_y_direction_index_alloc1:ending_y_direction_index_alloc1,1:vertical_dimension_for_fast_physics) + dimensions = (starting_x_direction_index_alloc1:ending_x_direction_index_alloc1,starting_y_direction_index_alloc1:ending_y_direction_index_alloc1,ccpp_constant_one:vertical_dimension_for_fast_physics) type = real kind = kind_dyn [qs] standard_name = cloud_snow_specific_humidity_at_Lagrangian_surface long_name = cloud snow specific humidity updated by fast physics at Lagrangian surface units = kg kg-1 - dimensions = (starting_x_direction_index_alloc1:ending_x_direction_index_alloc1,starting_y_direction_index_alloc1:ending_y_direction_index_alloc1,1:vertical_dimension_for_fast_physics) + dimensions = (starting_x_direction_index_alloc1:ending_x_direction_index_alloc1,starting_y_direction_index_alloc1:ending_y_direction_index_alloc1,ccpp_constant_one:vertical_dimension_for_fast_physics) type = real kind = kind_dyn [qg] standard_name = cloud_graupel_specific_humidity_at_Lagrangian_surface long_name = cloud graupel specific humidity updated by fast physics at Lagrangian surface units = kg kg-1 - dimensions = (starting_x_direction_index_alloc1:ending_x_direction_index_alloc1,starting_y_direction_index_alloc1:ending_y_direction_index_alloc1,1:vertical_dimension_for_fast_physics) + dimensions = (starting_x_direction_index_alloc1:ending_x_direction_index_alloc1,starting_y_direction_index_alloc1:ending_y_direction_index_alloc1,ccpp_constant_one:vertical_dimension_for_fast_physics) type = real kind = kind_dyn [qc] standard_name = cloud_fraction_at_Lagrangian_surface long_name = cloud fraction at Lagrangian surface units = none - dimensions = (starting_x_direction_index_alloc1:ending_x_direction_index_alloc1,starting_y_direction_index_alloc1:ending_y_direction_index_alloc1,1:vertical_dimension_for_fast_physics) + dimensions = (starting_x_direction_index_alloc1:ending_x_direction_index_alloc1,starting_y_direction_index_alloc1:ending_y_direction_index_alloc1,ccpp_constant_one:vertical_dimension_for_fast_physics) type = real kind = kind_dyn [q_con] standard_name = cloud_condensed_water_specific_humidity_at_Lagrangian_surface long_name = cloud condensed water specific humidity updated by fast physics at Lagrangian surface units = kg kg-1 - dimensions = (starting_x_direction_index_alloc1:ending_x_direction_index_alloc1,starting_y_direction_index_alloc1:ending_y_direction_index_alloc1,1:vertical_dimension_for_condensed_water_at_Lagrangian_surface) + dimensions = (starting_x_direction_index_alloc1:ending_x_direction_index_alloc1,starting_y_direction_index_alloc1:ending_y_direction_index_alloc1,ccpp_constant_one:vertical_dimension_for_condensed_water_at_Lagrangian_surface) type = real kind = kind_dyn [nthreads] @@ -3210,14 +3203,14 @@ standard_name = gas_constants_for_multi_gases_physics long_name = gas constants for multi gases physics units = J kg-1 K-1 - dimensions = (0:number_of_gases_for_multi_gases_physics) + dimensions = (constant_zero:number_of_gases_for_multi_gases_physics) type = real kind = kind_dyn [cpilist] standard_name = specific_heat_capacities_for_multi_gases_physics long_name = specific heat capacities for multi gases physics units = J kg-1 K-1 - dimensions = (0:number_of_gases_for_multi_gases_physics) + dimensions = (constant_zero:number_of_gases_for_multi_gases_physics) type = real kind = kind_dyn [mpirank] diff --git a/ccpp/data/GFS_typedefs.F90 b/ccpp/data/GFS_typedefs.F90 index a5febdbb28..e3071f27eb 100644 --- a/ccpp/data/GFS_typedefs.F90 +++ b/ccpp/data/GFS_typedefs.F90 @@ -69,7 +69,7 @@ module GFS_typedefs ! LTP=0: no extra top layer integer, parameter :: LTP = 0 ! no extra top layer !integer, parameter :: LTP = 1 ! add an extra top layer - + integer, parameter :: con_zero = 0 !---------------- ! Data Containers !---------------- @@ -172,11 +172,11 @@ module GFS_typedefs real (kind=kind_phys), pointer :: vvl (:,:) => null() !< layer mean vertical velocity in pa/sec real (kind=kind_phys), pointer :: tgrs (:,:) => null() !< model layer mean temperature in k real (kind=kind_phys), pointer :: qgrs (:,:,:) => null() !< layer mean tracer concentration -!3D-SA-TKE +!SA-3D-TKE real (kind=kind_phys), pointer :: def_1 (:,:) => null() !< deformation real (kind=kind_phys), pointer :: def_2 (:,:) => null() !< deformation real (kind=kind_phys), pointer :: def_3 (:,:) => null() !< deformation -!3D-SA-TKE-end +!SA-3D-TKE-end ! dissipation estimate real (kind=kind_phys), pointer :: diss_est(:,:) => null() !< model layer mean temperature in k ! soil state variables - for soil SPPT - sfc-perts, mgehne @@ -778,6 +778,11 @@ module GFS_typedefs ! integer :: fire_aux_data_levels !< vertical levels of fire auxiliary data +!--- dycore control parameters + integer :: dycore_active !< Choice of dynamical core + integer :: dycore_fv3 = 1 !< Choice of FV3 dynamical core + integer :: dycore_mpas = 2 !< Choice of MPAS dynamical core + !--- coupling parameters logical :: cplflx !< default no cplflx collection logical :: cplice !< default no cplice collection (used together with cplflx) @@ -896,8 +901,8 @@ module GFS_typedefs integer :: rad_hr_units !< flag to control units of lw/sw heating rate !< 1: K day-1 - 2: K s-1 logical :: inc_minor_gas !< Include minor trace gases in RRTMG radiation calculation? - integer :: ipsd0 !< initial permutaion seed for mcica radiation - integer :: ipsdlim !< limit initial permutaion seed for mcica radiation + integer :: ipsd0 !< initial permutation seed for mcica radiation + integer :: ipsdlim !< limit initial permutation seed for mcica radiation logical :: lrseeds !< flag to use host-provided random seeds integer :: nrstreams !< number of random number streams in host-provided random seed array logical :: lextop !< flag for using an extra top layer for radiation @@ -915,9 +920,6 @@ module GFS_typedefs character(len=128) :: sw_file_clouds !< RRTMGP file containing coefficients used to compute clouds optical properties integer :: rrtmgp_nBandsSW !< Number of RRTMGP SW bands. integer :: rrtmgp_nGptsSW !< Number of RRTMGP SW spectral points. - logical :: doG_cldoptics !< Use legacy RRTMG cloud-optics? - logical :: doGP_cldoptics_PADE !< Use RRTMGP cloud-optics: PADE approximation? - logical :: doGP_cldoptics_LUT !< Use RRTMGP cloud-optics: LUTs? integer :: iovr_convcld !< Cloud-overlap assumption for convective-cloud integer :: rrtmgp_nrghice !< Number of ice-roughness categories integer :: rrtmgp_nGauss_ang !< Number of angles used in Gaussian quadrature @@ -1048,7 +1050,15 @@ module GFS_typedefs integer :: decfl !< deformed CFL factor type(ty_tempo_cfg) :: tempo_cfg !< Thompson MP configuration information. logical :: thompson_mp_is_init=.false. !< Local scheme initialization flag - + real(kind=kind_phys) :: nt_c_l !< prescribed cloud liquid water number concentration over land + real(kind=kind_phys) :: nt_c_o !< prescribed cloud liquid water number concentration over ocean + real(kind=kind_phys) :: av_i !< transition value of coefficient matching at crossover from cloud ice to snow + real(kind=kind_phys) :: xnc_max !< maximum mass number concentration of cloud liquid water particles in air used in deposition nucleation + real(kind=kind_phys) :: ssati_min !< minimum supersaturation over ice threshold for deposition nucleation + real(kind=kind_phys) :: Nt_i_max !< maximum threshold number concentration of cloud ice water crystals in air + real(kind=kind_phys) :: rr_min !< multiplicative tuning parameter for microphysical sedimentation minimum threshold + + !--- GFDL microphysical paramters logical :: lgfdlmprad !< flag for GFDL mp scheme and radiation consistency @@ -1190,6 +1200,8 @@ module GFS_typedefs logical :: hybedmf !< flag for hybrid edmf pbl scheme logical :: satmedmf !< flag for scale-aware TKE-based moist edmf !< vertical turbulent mixing scheme + logical :: tte_edmf !< flag for scale-aware TTE-based moist edmf + !< vertical turbulent mixing scheme logical :: shinhong !< flag for scale-aware Shinhong vertical turbulent mixing scheme logical :: do_ysu !< flag for YSU turbulent mixing scheme logical :: dspheat !< flag for tke dissipative heating @@ -1248,6 +1260,7 @@ module GFS_typedefs !< used in the GWD parameterization - 10 more added if !< GSL orographic drag scheme is used integer :: jcap !< number of spectral wave trancation used only by sascnv shalcnv + real(kind=kind_phys) :: cscale !< tunable parameter for saSAS convective cloud liquid real(kind=kind_phys) :: cs_parm(10) !< tunable parameters for Chikira-Sugiyama convection real(kind=kind_phys) :: flgmin(2) !< [in] ice fraction bounds real(kind=kind_phys) :: cgwf(2) !< multiplication factor for convective GWD @@ -1266,7 +1279,7 @@ module GFS_typedefs real(kind=kind_phys) :: psauras(2) !< [in] auto conversion coeff from ice to snow in ras real(kind=kind_phys) :: prauras(2) !< [in] auto conversion coeff from cloud to rain in ras real(kind=kind_phys) :: wminras(2) !< [in] water and ice minimum threshold for ras - + integer :: seed0 !< random seed for radiation real(kind=kind_phys) :: rbcr !< Critical Richardson Number in the PBL scheme @@ -1366,9 +1379,7 @@ module GFS_typedefs !< 6=areodynamical roughness over water with input 10-m wind !< 7=slightly decrease Cd for higher wind speed compare to 6 !--- air_sea_flux scheme - integer :: icplocn2atm !< air_sea flux options over ocean: - !< 0=no change - !< l=including ocean current in the computation of air_sea fluxes + logical :: use_oceanuv !< flag for including ocean current in the computation of air_sea fluxes !--- potential temperature definition in surface layer physics logical :: thsfc_loc !< flag for local vs. standard potential temperature @@ -1696,6 +1707,8 @@ module GFS_typedefs ! logical :: land_iau_do_stcsmc_adjustment ! real(kind=kind_phys) :: land_iau_min_T_increment + character(len=2) :: sfc_file_version = 'V1' !< version number of input surface file + contains procedure :: init => control_initialize procedure :: init_chemistry => control_chemistry_initialize @@ -1818,7 +1831,6 @@ module GFS_typedefs real (kind=kind_phys), pointer :: dku3d_h (:,:) => null() !< Horizontal eddy diffusitivity for momentum real (kind=kind_phys), pointer :: dku3d_e (:,:) => null() !< Eddy diffusitivity for momentum for tke - !--- dynamical forcing variables for Grell-Freitas convection real (kind=kind_phys), pointer :: forcet (:,:) => null() !< real (kind=kind_phys), pointer :: forceq (:,:) => null() !< @@ -1877,7 +1889,7 @@ module GFS_typedefs real (kind=kind_phys), pointer :: cv (:) => null() !< fraction of convective cloud ; phys real (kind=kind_phys), pointer :: cvt (:) => null() !< convective cloud top pressure in pa ; phys real (kind=kind_phys), pointer :: cvb (:) => null() !< convective cloud bottom pressure in pa ; phys, cnvc90 - + real (kind=kind_phys), pointer :: cnvw (:,:) => null() !< convective cloud fractions contains procedure :: create => cldprop_create !< allocate array data end type GFS_cldprop_type @@ -2310,21 +2322,21 @@ subroutine statein_create (Statein, Model) allocate (Statein%wgrs (IM,Model%levs)) endif allocate (Statein%qgrs (IM,Model%levs,Model%ntrac)) -!3D-SA-TKE +!SA-3D-TKE allocate (Statein%def_1 (IM,Model%levs)) allocate (Statein%def_2 (IM,Model%levs)) allocate (Statein%def_3 (IM,Model%levs)) -!3D-SA-TKE-end +!SA-3D-TKE-end Statein%qgrs = clear_val Statein%pgr = clear_val Statein%ugrs = clear_val Statein%vgrs = clear_val -!3D-SA-TKE +!SA-3D-TKE Statein%def_1 = clear_val Statein%def_2 = clear_val Statein%def_3 = clear_val -!3D-SA-TKE-end +!SA-3D-TKE-end if(Model%lightning_threat) then Statein%wgrs = clear_val @@ -3365,15 +3377,15 @@ end subroutine coupling_create !---------------------- ! GFS_control_type%init !---------------------- - subroutine control_initialize (Model, nlunit, fn_nml, me, master, & - logunit, isc, jsc, nx, ny, levs, & - cnx, cny, gnx, gny, dt_dycore, & + subroutine control_initialize (Model, nlunit, fn_nml, me, & + master, logunit, levs, dt_dycore, & dt_phys, iau_offset, idat, jdat, & nwat, tracer_names, tracer_types, & - input_nml_file, tile_num, blksz, & - ak, bk, restart, hydrostatic, & - communicator, ntasks, nthreads) - + input_nml_file, blksz, restart, & + communicator, ntasks, nthreads, & + tile_num, isc, jsc, nx, ny, cnx, & + cny, gnx, gny, ak, bk, hydrostatic) + !--- modules use physcons, only: con_rerth, con_pi use mersenne_twister, only: random_setseed, random_number @@ -3387,16 +3399,7 @@ subroutine control_initialize (Model, nlunit, fn_nml, me, master, & integer, intent(in) :: me integer, intent(in) :: master integer, intent(in) :: logunit - integer, intent(in) :: tile_num - integer, intent(in) :: isc - integer, intent(in) :: jsc - integer, intent(in) :: nx - integer, intent(in) :: ny integer, intent(in) :: levs - integer, intent(in) :: cnx - integer, intent(in) :: cny - integer, intent(in) :: gnx - integer, intent(in) :: gny real(kind=kind_phys), intent(in) :: dt_dycore real(kind=kind_phys), intent(in) :: dt_phys integer, intent(in) :: iau_offset @@ -3407,13 +3410,23 @@ subroutine control_initialize (Model, nlunit, fn_nml, me, master, & integer, intent(in) :: tracer_types(:) character(len=:), intent(in), dimension(:), pointer :: input_nml_file integer, intent(in) :: blksz(:) - real(kind=kind_phys), dimension(:), intent(in) :: ak - real(kind=kind_phys), dimension(:), intent(in) :: bk logical, intent(in) :: restart - logical, intent(in) :: hydrostatic type(MPI_Comm), intent(in) :: communicator integer, intent(in) :: ntasks integer, intent(in) :: nthreads + !--- optional variables (Dycore specific) + integer, optional, intent(in) :: tile_num + integer, optional, intent(in) :: isc + integer, optional, intent(in) :: jsc + integer, optional, intent(in) :: nx + integer, optional, intent(in) :: ny + integer, optional, intent(in) :: cnx + integer, optional, intent(in) :: cny + integer, optional, intent(in) :: gnx + integer, optional, intent(in) :: gny + logical, optional, intent(in) :: hydrostatic + real(kind_phys), optional, dimension(:), intent(in) :: ak + real(kind_phys), optional, dimension(:), intent(in) :: bk !--- local variables integer :: i, j, n @@ -3545,8 +3558,8 @@ subroutine control_initialize (Model, nlunit, fn_nml, me, master, & logical :: swhtr = .true. !< flag to output sw heating rate (Radtend%swhc) integer :: rad_hr_units = 2 !< heating rate units are K s-1 logical :: inc_minor_gas = .true. !< Include minor trace gases in RRTMG radiation calculation - integer :: ipsd0 = 0 !< initial permutaion seed for mcica radiation - integer :: ipsdlim = 1e8 !< limit initial permutaion seed for mcica radiation + integer :: ipsd0 = 0 !< initial permutation seed for mcica radiation + integer :: ipsdlim = 1e8 !< limit initial permutation seed for mcica radiation logical :: lrseeds = .false. !< flag to use host-provided random seeds integer :: nrstreams = 2 !< number of random number streams in host-provided random seed array logical :: lextop = .false. !< flag for using an extra top layer for radiation @@ -3563,9 +3576,6 @@ subroutine control_initialize (Model, nlunit, fn_nml, me, master, & integer :: rrtmgp_nGptsSW = -999 !< Number of RRTMGP SW spectral points. # The RRTMGP spectral dimensions in the files integer :: rrtmgp_nBandsLW = -999 !< Number of RRTMGP LW bands. # need to be provided via namelsit. integer :: rrtmgp_nGptsLW = -999 !< Number of RRTMGP LW spectral points. # - logical :: doG_cldoptics = .false. !< Use legacy RRTMG cloud-optics? - logical :: doGP_cldoptics_PADE = .false. !< Use RRTMGP cloud-optics: PADE approximation? - logical :: doGP_cldoptics_LUT = .false. !< Use RRTMGP cloud-optics: LUTs? integer :: iovr_convcld = 1 !< Cloud-overlap assumption for convective-cloud (defaults to iovr if not set) integer :: rrtmgp_nrghice = 3 !< Number of ice-roughness categories integer :: rrtmgp_nGauss_ang = 1 !< Number of angles used in Gaussian quadrature @@ -3651,7 +3661,14 @@ subroutine control_initialize (Model, nlunit, fn_nml, me, master, & real(kind=kind_phys) :: dt_inner = -999.0 !< time step for the inner loop logical :: sedi_semi = .false. !< flag for semi Lagrangian sedi of rain integer :: decfl = 8 !< deformed CFL factor - + real(kind=kind_phys) :: nt_c_l = 150.e6 !< prescribed cloud liquid water number concentration over land + real(kind=kind_phys) :: nt_c_o = 50.e6 !< prescribed cloud liquid water number concentration over ocean + real(kind=kind_phys) :: av_i = -999.0 !< transition value of coefficient matching at crossover from cloud ice to snow + real(kind=kind_phys) :: xnc_max = 1000.e3 !< maximum mass number concentration of cloud liquid water particles in air used in deposition nucleation + real(kind=kind_phys) :: ssati_min = 0.15 !< minimum supersaturation over ice threshold for deposition nucleation + real(kind=kind_phys) :: Nt_i_max = 4999.e3 !< maximum threshold number concentration of cloud ice water crystals in air + real(kind=kind_phys) :: rr_min = 1000.0 !< multiplicative tuning parameter for microphysical sedimentation minimum threshold + !--- GFDL microphysical parameters logical :: lgfdlmprad = .false. !< flag for GFDLMP radiation interaction @@ -3783,6 +3800,8 @@ subroutine control_initialize (Model, nlunit, fn_nml, me, master, & logical :: hybedmf = .false. !< flag for hybrid edmf pbl scheme logical :: satmedmf = .false. !< flag for scale-aware TKE-based moist edmf !< vertical turbulent mixing scheme + logical :: tte_edmf = .false. !< flag for scale-aware TTE-based moist edmf + !< vertical turbulent mixing scheme logical :: shinhong = .false. !< flag for scale-aware Shinhong vertical turbulent mixing scheme logical :: do_ysu = .false. !< flag for YSU vertical turbulent mixing scheme logical :: dspheat = .false. !< flag for tke dissipative heating @@ -3854,7 +3873,8 @@ subroutine control_initialize (Model, nlunit, fn_nml, me, master, & integer :: nmtvr = 14 !< number of topographic variables such as variance etc !< used in the GWD parameterization integer :: jcap = 1 !< number of spectral wave trancation used only by sascnv shalcnv -! real(kind=kind_phys) :: cs_parm(10) = (/5.0,2.5,1.0e3,3.0e3,20.0,-999.,-999.,0.,0.,0./) + real :: cscale = 1 !< tunable parameter for convective cloud liquid (0-1) + ! real(kind=kind_phys) :: cs_parm(10) = (/5.0,2.5,1.0e3,3.0e3,20.0,-999.,-999.,0.,0.,0./) real(kind=kind_phys) :: cs_parm(10) = (/8.0,4.0,1.0e3,3.5e3,20.0,1.0,-999.,1.,0.6,0./) real(kind=kind_phys) :: flgmin(2) = (/0.180,0.220/) !< [in] ice fraction bounds real(kind=kind_phys) :: cgwf(2) = (/0.5d0,0.05d0/) !< multiplication factor for convective GWD @@ -3941,9 +3961,7 @@ subroutine control_initialize (Model, nlunit, fn_nml, me, master, & !< 6=areodynamical roughness over water with input 10-m wind !< 7=slightly decrease Cd for higher wind speed compare to 6 !< negative when cplwav2atm=.true. - i.e. two way wave coupling - integer :: icplocn2atm = 0 !< air_sea_flux options over ocean - !< 0=ocean current is not used in the computation of air_sea fluxes - !< 1=including ocean current in the computation of air_sea fluxes + logical :: use_oceanuv = .false. !< flag for air_sea_flux options over ocean !--- potential temperature definition in surface layer physics logical :: thsfc_loc = .true. !< flag for local vs. standard potential temperature @@ -4144,7 +4162,6 @@ subroutine control_initialize (Model, nlunit, fn_nml, me, master, & do_RRTMGP, active_gases, nGases, rrtmgp_root, & lw_file_gas, lw_file_clouds, rrtmgp_nBandsLW, rrtmgp_nGptsLW,& sw_file_gas, sw_file_clouds, rrtmgp_nBandsSW, rrtmgp_nGptsSW,& - doG_cldoptics, doGP_cldoptics_PADE, doGP_cldoptics_LUT, & rrtmgp_nrghice, rrtmgp_nGauss_ang, do_GPsw_Glw, & use_LW_jacobian, doGP_lwscat, damp_LW_fluxadj, lfnc_k, & lfnc_p0, iovr_convcld, doGP_sgs_cnv, doGP_sgs_mynn, & @@ -4160,7 +4177,8 @@ subroutine control_initialize (Model, nlunit, fn_nml, me, master, & mg_ncnst, mg_ninst, mg_ngnst, sed_supersat, do_sb_physics, & mg_alf, mg_qcmin, mg_do_ice_gmao, mg_do_liq_liu, & ltaerosol, lthailaware, lradar, nsfullradar_diag, lrefres, & - ttendlim, ext_diag_thompson, dt_inner, lgfdlmprad, & + ttendlim, ext_diag_thompson, nt_c_l, nt_c_o, av_i, xnc_max, & + ssati_min, Nt_i_max, rr_min, dt_inner, lgfdlmprad, & sedi_semi, decfl, & nssl_cccn, nssl_alphah, nssl_alphahl, & nssl_alphar, nssl_ehw0, nssl_ehlw0, & @@ -4205,10 +4223,10 @@ subroutine control_initialize (Model, nlunit, fn_nml, me, master, & do_myjsfc, do_myjpbl, & hwrf_samfdeep, hwrf_samfshal,progsigma,progomega,betascu, & betamcu, betadcu,h2o_phys, pdfcld, shcnvcw, redrag, & - hybedmf, satmedmf, sigmab_coldstart, & + hybedmf, satmedmf, tte_edmf, sigmab_coldstart, & shinhong, do_ysu, dspheat, lheatstrg, lseaspray, cnvcld, & xr_cnvcld, random_clds, shal_cnv, imfshalcnv, imfdeepcnv, & - isatmedmf, conv_cf_opt, do_deep, jcap, & + isatmedmf, conv_cf_opt, do_deep, jcap, cscale, & cs_parm, flgmin, cgwf, ccwf, cdmbgwd, alpha_fd, & psl_gwd_dx_factor, & sup, ctei_rm, crtrh, & @@ -4234,7 +4252,7 @@ subroutine control_initialize (Model, nlunit, fn_nml, me, master, & frac_grid, min_lakeice, min_seaice, min_lake_height, & ignore_lake, frac_ice, & !--- surface layer - sfc_z0_type, icplocn2atm, & + sfc_z0_type, use_oceanuv, & !--- switch beteeen local and standard potential temperature thsfc_loc, & !--- switches in 2-m diagnostics @@ -4306,6 +4324,58 @@ subroutine control_initialize (Model, nlunit, fn_nml, me, master, & !--- NRL ozone physics character(len=128) :: err_message + !--- If initializing model with FV3 dynamical core. + if (Model%dycore_active == Model%dycore_fv3) then + if (.not. present(tile_num)) then + write(6,*) 'ERROR: is required when using FV3 dynamical core' + stop + endif + if (.not. present(isc)) then + write(6,*) 'ERROR: is required when using FV3 dynamical core' + stop + endif + if (.not. present(jsc)) then + write(6,*) 'ERROR: is required when using FV3 dynamical core' + stop + endif + if (.not. present(nx)) then + write(6,*) 'ERROR: is required when using FV3 dynamical core' + stop + endif + if (.not. present(ny)) then + write(6,*) 'ERROR: is required when using FV3 dynamical core' + stop + endif + if (.not. present(cnx)) then + write(6,*) 'ERROR: is required when using FV3 dynamical core' + stop + endif + if (.not. present(cny)) then + write(6,*) 'ERROR: is required when using FV3 dynamical core' + stop + endif + if (.not. present(gnx)) then + write(6,*) 'ERROR: is required when using FV3 dynamical core' + stop + endif + if (.not. present(gny)) then + write(6,*) 'ERROR: is required when using FV3 dynamical core' + stop + endif + if (.not. present(hydrostatic)) then + write(6,*) 'ERROR: is required when using FV3 dynamical core' + stop + endif + if (.not. present(ak)) then + write(6,*) 'ERROR: is required when using FV3 dynamical core' + stop + endif + if (.not. present(bk)) then + write(6,*) 'ERROR: is required when using FV3 dynamical core' + stop + endif + endif + ! dtend selection: default is to match all variables: dtend_select(1)='*' do ipat=2,pat_count @@ -4454,23 +4524,28 @@ subroutine control_initialize (Model, nlunit, fn_nml, me, master, & Model%sfcpress_id = sfcpress_id Model%gen_coord_hybrid = gen_coord_hybrid - !--- set some grid extent parameters - Model%tile_num = tile_num - Model%isc = isc - Model%jsc = jsc - Model%nx = nx - Model%ny = ny + !--- set some grid extent parameters (dycore specific) + if (Model%dycore_active == Model%dycore_fv3) then + Model%tile_num = tile_num + Model%isc = isc + Model%jsc = jsc + Model%nx = nx + Model%ny = ny + allocate (Model%ak(1:size(ak))) + allocate (Model%bk(1:size(bk))) + Model%ak = ak + Model%bk = bk + Model%cnx = cnx + Model%cny = cny + Model%lonr = gnx ! number longitudinal points + Model%latr = gny ! number of latitudinal points from pole to pole + endif + if (Model%dycore_active == Model%dycore_mpas) then + + end if Model%levs = levs - allocate (Model%ak(1:size(ak))) - allocate (Model%bk(1:size(bk))) - Model%ak = ak - Model%bk = bk Model%levsp1 = Model%levs + 1 Model%levsm1 = Model%levs - 1 - Model%cnx = cnx - Model%cny = cny - Model%lonr = gnx ! number longitudinal points - Model%latr = gny ! number of latitudinal points from pole to pole Model%nblks = size(blksz) allocate (Model%blksz(1:Model%nblks)) Model%blksz = blksz @@ -4701,9 +4776,6 @@ subroutine control_initialize (Model, nlunit, fn_nml, me, master, & Model%sw_file_clouds = sw_file_clouds Model%rrtmgp_nBandsSW = rrtmgp_nBandsSW Model%rrtmgp_nGptsSW = rrtmgp_nGptsSW - Model%doG_cldoptics = doG_cldoptics - Model%doGP_cldoptics_PADE = doGP_cldoptics_PADE - Model%doGP_cldoptics_LUT = doGP_cldoptics_LUT Model%iovr_convcld = iovr_convcld Model%use_LW_jacobian = use_LW_jacobian Model%damp_LW_fluxadj = damp_LW_fluxadj @@ -4720,11 +4792,6 @@ subroutine control_initialize (Model, nlunit, fn_nml, me, master, & write(0,*) "Logic error, RRTMGP only works with levr = levs" stop end if - ! RRTMGP LW scattering calculation not supported w/ RRTMG cloud-optics - if (Model%doGP_lwscat .and. Model%doG_cldoptics) then - write(0,*) "Logic error, RRTMGP Longwave cloud-scattering not supported with RRTMG cloud-optics." - stop - end if if (Model%doGP_sgs_mynn .and. .not. do_mynnedmf) then write(0,*) "Logic error, RRTMGP flag doGP_sgs_mynn only works with do_mynnedmf=.true." stop @@ -4736,14 +4803,6 @@ subroutine control_initialize (Model, nlunit, fn_nml, me, master, & write(0,*) "RRTMGP implicit cloud scheme being used." endif - if (Model%doGP_cldoptics_PADE .and. Model%doGP_cldoptics_LUT) then - write(0,*) "Logic error, Both RRTMGP cloud-optics options cannot be selected. " - stop - end if - if (.not. Model%doGP_cldoptics_PADE .and. .not. Model%doGP_cldoptics_LUT .and. .not. Model%doG_cldoptics) then - write(0,*) "Logic error, No option for cloud-optics scheme provided. Using RRTMG cloud-optics" - Model%doG_cldoptics = .true. - end if if (Model%rrtmgp_nGptsSW .lt. 0 .or. Model%rrtmgp_nGptsLW .lt. 0 .or. & Model%rrtmgp_nBandsSW .lt. 0 .or. Model%rrtmgp_nBandsLW .lt. 0) then write(0,*) "Logic error, RRTMGP spectral dimensions (bands/gpts) need to be provided." @@ -4840,6 +4899,13 @@ subroutine control_initialize (Model, nlunit, fn_nml, me, master, & endif Model%sedi_semi = sedi_semi Model%decfl = decfl + Model%nt_c_l = nt_c_l + Model%nt_c_o = nt_c_o + Model%av_i = av_i + Model%xnc_max = xnc_max + Model%ssati_min = ssati_min + Model%Nt_i_max = Nt_i_max + Model%rr_min = rr_min !--- TEMPO MP parameters ! DJS to Anders: Maybe we put more of these nml options into the TEMPO configuration type? @@ -5071,6 +5137,7 @@ subroutine control_initialize (Model, nlunit, fn_nml, me, master, & Model%redrag = redrag Model%hybedmf = hybedmf Model%satmedmf = satmedmf + Model%tte_edmf = tte_edmf Model%shinhong = shinhong Model%do_ysu = do_ysu Model%dspheat = dspheat @@ -5088,6 +5155,7 @@ subroutine control_initialize (Model, nlunit, fn_nml, me, master, & Model%conv_cf_opt = conv_cf_opt Model%nmtvr = nmtvr Model%jcap = jcap + Model%cscale = cscale Model%flgmin = flgmin Model%cgwf = cgwf Model%ccwf = ccwf @@ -5204,7 +5272,7 @@ subroutine control_initialize (Model, nlunit, fn_nml, me, master, & !--- surface layer Model%sfc_z0_type = sfc_z0_type if (Model%cplwav2atm) Model%sfc_z0_type = -1 - Model%icplocn2atm = icplocn2atm + Model%use_oceanuv = use_oceanuv !--- potential temperature reference in sfc layer Model%thsfc_loc = thsfc_loc @@ -5826,28 +5894,38 @@ subroutine control_initialize (Model, nlunit, fn_nml, me, master, & Model%first_time_step = .true. Model%restart = restart Model%lsm_cold_start = .not. restart - Model%hydrostatic = hydrostatic if (Model%me == Model%master) then print *,'in atm phys init, phour=',Model%phour,'fhour=',Model%fhour,'zhour=',Model%zhour,'kdt=',Model%kdt endif - - if(Model%hydrostatic .and. Model%lightning_threat) then - write(0,*) 'Turning off lightning threat index for hydrostatic run.' - Model%lightning_threat = .false. - lightning_threat = .false. + if (Model%dycore_active == Model%dycore_fv3) then + Model%hydrostatic = hydrostatic + if(Model%hydrostatic .and. Model%lightning_threat) then + write(0,*) 'Turning off lightning threat index for hydrostatic run.' + Model%lightning_threat = .false. + lightning_threat = .false. + endif endif - Model%jdat(1:8) = jdat(1:8) - allocate (Model%si(Model%levs+1)) - !--- Define sigma level for radiation initialization + !--- Define sigma level for radiation initialization (FV3) !--- The formula converting hybrid sigma pressure coefficients to sigma coefficients follows Eckermann (2009, MWR) !--- ps is replaced with p0. The value of p0 uses that in http://www.emc.ncep.noaa.gov/officenotes/newernotes/on461.pdf !--- ak/bk have been flipped from their original FV3 orientation and are defined sfc -> toa - Model%si(1:Model%levs+1) = (ak(1:Model%levs+1) + bk(1:Model%levs+1) * con_p0 - ak(Model%levs+1)) / (con_p0 - ak(Model%levs+1)) + allocate (Model%si(Model%levs+1)) + if (Model%dycore_active == Model%dycore_fv3) then + Model%si(1:Model%levs+1) = (ak(1:Model%levs+1) + bk(1:Model%levs+1) * con_p0 - ak(Model%levs+1)) / (con_p0 - ak(Model%levs+1)) + end if + ! DJS2025: NOT YET IMPLEMENTED + if (Model%dycore_active == Model%dycore_mpas) then + Model%si(1:Model%levs+1) = 1._kind_phys + endif + + ! --- Set default time + Model%jdat(1:8) = jdat(1:8) Model%sec = 0 Model%yearlen = 365 Model%julian = -9999. + !--- Set vertical flag used by radiation schemes Model%top_at_1 = .false. if (Model%do_RRTMGP) then @@ -6278,6 +6356,13 @@ subroutine control_initialize (Model, nlunit, fn_nml, me, master, & ' dt_inner =',Model%dt_inner, & ' sedi_semi=',Model%sedi_semi, & ' decfl=',decfl, & + ' nt_c_l=',nt_c_l, & + ' nt_c_o=',nt_c_o, & + ' av_i=',av_i, & + ' xnc_max=',xnc_max, & + ' ssati_min',ssati_min, & + ' Nt_i_max',Nt_i_max, & + ' rr_min',rr_min, & ' effr_in =',Model%effr_in, & ' lradar =',Model%lradar, & ' nsfullradar_diag =',Model%nsfullradar_diag, & @@ -6445,6 +6530,7 @@ subroutine control_initialize (Model, nlunit, fn_nml, me, master, & ! Model%upd_slc = land_iau_upd_slc ! Model%do_stcsmc_adjustment = land_iau_do_stcsmc_adjustment ! Model%min_T_increment = land_iau_min_T_increment + call Model%print () end subroutine control_initialize @@ -6649,18 +6735,23 @@ subroutine control_print(Model) print *, ' thermodyn_id : ', Model%thermodyn_id print *, ' sfcpress_id : ', Model%sfcpress_id print *, ' gen_coord_hybrid : ', Model%gen_coord_hybrid - print *, ' hydrostatic : ', Model%hydrostatic + if (Model%dycore_active == Model%dycore_fv3) then + print *, ' hydrostatic : ', Model%hydrostatic + endif print *, ' ' print *, 'grid extent parameters' - print *, ' isc : ', Model%isc - print *, ' jsc : ', Model%jsc - print *, ' nx : ', Model%nx - print *, ' ny : ', Model%ny - print *, ' levs : ', Model%levs - print *, ' cnx : ', Model%cnx - print *, ' cny : ', Model%cny - print *, ' lonr : ', Model%lonr - print *, ' latr : ', Model%latr + if (Model%dycore_active == Model%dycore_fv3) then + print *, ' isc : ', Model%isc + print *, ' jsc : ', Model%jsc + print *, ' nx : ', Model%nx + print *, ' ny : ', Model%ny + print *, ' levs : ', Model%levs + print *, ' cnx : ', Model%cnx + print *, ' cny : ', Model%cny + print *, ' lonr : ', Model%lonr + print *, ' latr : ', Model%latr + end if + print *, ' nblks : ', Model%nblks print *, ' blksz(1) : ', Model%blksz(1) print *, ' blksz(nblks) : ', Model%blksz(Model%nblks) print *, ' Model%ncols : ', Model%ncols @@ -6792,9 +6883,6 @@ subroutine control_print(Model) print *, ' sw_file_clouds : ', Model%sw_file_clouds print *, ' rrtmgp_nBandsSW : ', Model%rrtmgp_nBandsSW print *, ' rrtmgp_nGptsSW : ', Model%rrtmgp_nGptsSW - print *, ' doG_cldoptics : ', Model%doG_cldoptics - print *, ' doGP_cldoptics_PADE: ', Model%doGP_cldoptics_PADE - print *, ' doGP_cldoptics_LUT : ', Model%doGP_cldoptics_LUT print *, ' use_LW_jacobian : ', Model%use_LW_jacobian print *, ' damp_LW_fluxadj : ', Model%damp_LW_fluxadj print *, ' lfnc_k : ', Model%lfnc_k @@ -6834,6 +6922,13 @@ subroutine control_print(Model) print *, ' dt_inner : ', Model%dt_inner print *, ' sedi_semi : ', Model%sedi_semi print *, ' decfl : ', Model%decfl + print *, ' nt_c_l : ', Model%nt_c_l + print *, ' nt_c_o : ', Model%nt_c_o + print *, ' av_i : ', Model%av_i + print *, ' xnc_max : ', Model%xnc_max + print *, ' ssati_min : ', Model%ssati_min + print *, ' Nt_i_max : ', Model%Nt_i_max + print *, ' rr_min : ', Model%rr_min print *, ' ' endif if (Model%imp_physics == Model%imp_physics_nssl) then @@ -6976,6 +7071,7 @@ subroutine control_print(Model) print *, ' redrag : ', Model%redrag print *, ' hybedmf : ', Model%hybedmf print *, ' satmedmf : ', Model%satmedmf + print *, ' tte_edmf : ', Model%tte_edmf print *, ' isatmedmf : ', Model%isatmedmf print *, ' shinhong : ', Model%shinhong print *, ' do_ysu : ', Model%do_ysu @@ -6992,6 +7088,7 @@ subroutine control_print(Model) print *, ' conv_cf_opt : ', Model%conv_cf_opt print *, ' nmtvr : ', Model%nmtvr print *, ' jcap : ', Model%jcap + print *, ' cscale : ', Model%cscale print *, ' cs_parm : ', Model%cs_parm print *, ' flgmin : ', Model%flgmin print *, ' cgwf : ', Model%cgwf @@ -7063,7 +7160,7 @@ subroutine control_print(Model) print *, ' ' print *, 'surface layer options' print *, ' sfc_z0_type : ', Model%sfc_z0_type - print *, ' icplocn2atm : ', Model%icplocn2atm + print *, ' use_oceanuv : ', Model%use_oceanuv print *, ' ' print *, 'vertical diffusion coefficients' print *, ' xkzm_m : ', Model%xkzm_m @@ -7541,11 +7638,13 @@ subroutine cldprop_create (Cldprop, Model) allocate (Cldprop%cv (IM)) allocate (Cldprop%cvt (IM)) allocate (Cldprop%cvb (IM)) + allocate (Cldprop%cnvw(IM, Model%levs)) Cldprop%cv = clear_val Cldprop%cvt = clear_val Cldprop%cvb = clear_val - + Cldprop%cnvw = clear_val + end subroutine cldprop_create diff --git a/ccpp/data/GFS_typedefs.meta b/ccpp/data/GFS_typedefs.meta index 096d7c4cc0..0b9ae7fe3d 100644 --- a/ccpp/data/GFS_typedefs.meta +++ b/ccpp/data/GFS_typedefs.meta @@ -1643,7 +1643,7 @@ standard_name = temperature_in_surface_snow long_name = temperature_in_surface_snow units = K - dimensions = (horizontal_dimension, lower_bound_of_vertical_dimension_of_surface_snow:0) + dimensions = (horizontal_dimension, lower_bound_of_vertical_dimension_of_surface_snow:constant_zero) type = real kind = kind_phys active = (control_for_land_surface_scheme == identifier_for_noahmp_land_surface_scheme) @@ -1659,7 +1659,7 @@ standard_name = lwe_thickness_of_ice_in_surface_snow long_name = snow layer ice units = mm - dimensions = (horizontal_dimension, lower_bound_of_vertical_dimension_of_surface_snow:0) + dimensions = (horizontal_dimension, lower_bound_of_vertical_dimension_of_surface_snow:constant_zero) type = real kind = kind_phys active = (control_for_land_surface_scheme == identifier_for_noahmp_land_surface_scheme) @@ -1667,7 +1667,7 @@ standard_name = lwe_thickness_of_liquid_water_in_surface_snow long_name = snow layer liquid water units = mm - dimensions = (horizontal_dimension, lower_bound_of_vertical_dimension_of_surface_snow:0) + dimensions = (horizontal_dimension, lower_bound_of_vertical_dimension_of_surface_snow:constant_zero) type = real kind = kind_phys active = (control_for_land_surface_scheme == identifier_for_noahmp_land_surface_scheme) @@ -2541,7 +2541,7 @@ standard_name = surface_downwelling_shortwave_flux_on_radiation_timestep_assuming_clear_sky long_name = total sky sfc downward sw flux assuming clear sky conditions units = W m-2 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [sfcnsw] @@ -2975,7 +2975,7 @@ active = ((flag_for_surface_flux_coupling .or. do_fire_coupling) .and. do_mediator_atmosphere_ocean_fluxes) [sncovr1_lnd] standard_name = surface_snow_area_fraction_over_land_from_land - long_name = surface snow area fraction over land for coupling + long_name = surface snow area fraction over land for coupling units = frac dimensions = (horizontal_dimension) type = real @@ -3685,6 +3685,24 @@ units = index dimensions = () type = integer +[dycore_active] + standard_name = control_for_dynamical_core + long_name = choice of dynamical core + units = flag + dimensions = () + type = integer +[dycore_fv3] + standard_name = identifier_for_fv3_dynamical_core + long_name = identifier for FV3 dynamical core + units = flag + dimensions = () + type = integer +[dycore_mpas] + standard_name = identifier_for_mpas_dynamical_core + long_name = identifier for MPAS dynamical core + units = flag + dimensions = () + type = integer [tile_num] standard_name = index_of_cubed_sphere_tile long_name = tile number @@ -4095,13 +4113,13 @@ type = logical [ipsd0] standard_name = initial_seed_for_mcica - long_name = initial permutaion seed for mcica radiation + long_name = initial permutation seed for mcica radiation units = 1 dimensions = () type = integer [ipsdlim] standard_name = limit_for_initial_seed_for_mcica - long_name = limit for initial permutaion seed for mcica radiation + long_name = limit for initial permutation seed for mcica radiation units = 1 dimensions = () type = integer @@ -4215,24 +4233,6 @@ units = count dimensions = () type = integer -[doG_cldoptics] - standard_name = flag_to_calc_lw_cld_optics_using_RRTMG - long_name = logical flag to control cloud optics scheme. - units = flag - dimensions = () - type = logical -[doGP_cldoptics_PADE] - standard_name = flag_to_calc_lw_cld_optics_using_RRTMGP_PADE - long_name = logical flag to control cloud optics scheme. - units = flag - dimensions = () - type = logical -[doGP_cldoptics_LUT] - standard_name = flag_to_calc_lw_cld_optics_using_RRTMGP_LUT - long_name = logical flag to control cloud optics scheme. - units = flag - dimensions = () - type = logical [use_LW_jacobian] standard_name = flag_to_calc_RRTMGP_LW_jacobian long_name = logical flag to control RRTMGP LW calculation @@ -4999,6 +4999,55 @@ units = flag dimensions = () type = logical +[nt_c_l] + standard_name = prescribed_number_concentration_of_cloud_liquid_water_particles_in_air_over_land + long_name = volumetric number concentration of cloud liquid water droplets in air over land points + units = m-3 + dimensions = () + type = real + kind = kind_phys +[nt_c_o] + standard_name = prescribed_number_concentration_of_cloud_liquid_water_particles_in_air_over_ocean + long_name = volumetric number concentration of cloud liquid water droplets in air over ocean points + units = m-3 + dimensions = () + type = real + kind = kind_phys +[av_i] + standard_name = cloud_ice_to_snow_tuning_parameter + long_name = transition value of coefficient matching at crossover from cloud ice to snow + units = 1 + dimensions = () + type = real + kind = kind_phys +[xnc_max] + standard_name = maximum_mass_number_concentration_of_cloud_liquid_water_particles_in_air_used_in_deposition_nucleation + long_name = maximum mass number concentration of cloud liquid water particles in air + units = kg-1 + dimensions = () + type = real + kind = kind_phys +[ssati_min] + standard_name = minimum_threshold_supersaturation_over_ice_for_deposition_nucleation + long_name = minimum supersaturation over ice threshold for deposition nucleation + units = fraction + dimensions = () + type = real + kind = kind_phys +[Nt_i_max] + standard_name = maximum_threshold_number_concentration_of_cloud_ice_water_crystals_in_air + long_name = maximum threshold number concentration of cloud ice water crystals in air + units = m-3 + dimensions = () + type = real + kind = kind_phys +[rr_min] + standard_name = multiplicative_tuning_parameter_for_microphysical_sedimentation_minimum_threshold + long_name = multiplicative tuning parameter for microphysical sedimentation minimum threshold + units = 1 + dimensions = () + type = real + kind = kind_phys [gfs_phys_time_vary_is_init] standard_name = flag_for_gfs_phys_time_vary_interstitial_initialization long_name = flag carrying interstitial initialization status @@ -5465,6 +5514,12 @@ units = flag dimensions = () type = logical +[tte_edmf] + standard_name = flag_for_scale_aware_TTE_moist_EDMF_PBL + long_name = flag for scale-aware TTE moist EDMF PBL scheme + units = flag + dimensions = () + type = logical [shinhong] standard_name = flag_for_scale_aware_Shinhong_PBL long_name = flag for scale-aware Shinhong PBL scheme @@ -5810,6 +5865,13 @@ units = count dimensions = () type = integer +[cscale] + standard_name = multiplicative_tuning_parameter_for_convective_cloud_water + long_name = multiplicative tuning parameter for convective cloud water + units = none + dimensions = () + type = real + kind = kind_phys [cs_parm(1)] standard_name = updraft_velocity_tunable_parameter_1_CS long_name = tunable parameter 1 for Chikira-Sugiyama convection @@ -6111,12 +6173,12 @@ units = flag dimensions = () type = integer -[icplocn2atm] - standard_name = control_for_air_sea_flux_computation_over_water +[use_oceanuv] + standard_name = do_air_sea_flux_computation_over_water long_name = air-sea flux option - units = 1 + units = flag dimensions = () - type = integer + type = logical [xkzminv] standard_name = max_atmosphere_heat_diffusivity_due_to_background long_name = maximum background value of heat diffusivity @@ -8211,6 +8273,13 @@ type = real kind = kind_phys active = (number_of_cloud_types_CS > 0 .and. flag_for_Chikira_Sugiyama_deep_convection) +[phy_f2d] + standard_name = xy_dimensioned_restart_array + long_name = xy dimensioned restart array + units = mixed + dimensions = (horizontal_dimension,number_of_variables_in_xy_dimensioned_restart_array) + type = real + kind = kind_phys [phy_f2d(:,index_of_surface_air_pressure_two_timesteps_back_in_xyz_dimensioned_tracer_array)] standard_name = surface_air_pressure_two_timesteps_back long_name = surface air pressure two timesteps back @@ -8235,6 +8304,13 @@ type = real kind = kind_phys active = (index_of_enhancement_to_wind_speed_at_surface_adjacent_layer_due_to_convection_in_xy_dimensioned_restart_array > 0) +[phy_f3d] + standard_name = xyz_dimensioned_restart_array + long_name = xyz dimensioned restart array + units = mixed + dimensions = (horizontal_dimension,vertical_layer_dimension,number_of_variables_in_xyz_dimensioned_restart_array) + type = real + kind = kind_phys [phy_f3d(:,:,index_of_air_temperature_two_timesteps_back_in_xyz_dimensioned_restart_array)] standard_name = air_temperature_two_timesteps_back long_name = air temperature two timesteps back @@ -8624,6 +8700,13 @@ dimensions = (horizontal_dimension) type = real kind = kind_phys +[cnvw] + standard_name = convective_cloud_water_mixing_ratio + long_name = moist convective cloud water mixing ratio + units = kg kg-1 + dimensions = (horizontal_dimension,vertical_layer_dimension) + type = real + kind = kind_phys ######################################################################## [ccpp-table-properties] @@ -9366,7 +9449,7 @@ standard_name = surface_downwelling_shortwave_flux_assuming_clear_sky long_name = surface downwelling shortwave flux at current time assuming clear sky units = W m-2 - dimensions = (horizontal_loop_extent) + dimensions = (horizontal_dimension) type = real kind = kind_phys [nswsfci] @@ -10735,3 +10818,9 @@ dimensions = () type = real kind = kind_phys +[con_zero] + standard_name = constant_zero + long_name = definition of constant zero + units = 1 + dimensions = () + type = integer diff --git a/ccpp/data/MPAS_typedefs.F90 b/ccpp/data/MPAS_typedefs.F90 new file mode 100644 index 0000000000..31bbec27bf --- /dev/null +++ b/ccpp/data/MPAS_typedefs.F90 @@ -0,0 +1,12 @@ +! ########################################################################################### +!> \file MPAS_typedefs.F90 +! ########################################################################################### +module MPAS_typedefs + use mpi_f08 + use machine, only: kind_phys, kind_dbl_prec, kind_sngl_prec + implicit none + +!> \section arg_table_MPAS_typedefs +!! \htmlinclude MPAS_typedefs.html +!! +end module MPAS_typedefs diff --git a/ccpp/data/MPAS_typedefs.meta b/ccpp/data/MPAS_typedefs.meta new file mode 100644 index 0000000000..21c41ebb17 --- /dev/null +++ b/ccpp/data/MPAS_typedefs.meta @@ -0,0 +1,10 @@ +######################################################################## +[ccpp-table-properties] + name = MPAS_typedefs + type = module + relative_path = ../physics/physics/ + dependencies = hooks/machine.F + +[ccpp-arg-table] + name = MPAS_typedefs + type = module diff --git a/ccpp/driver/GFS_init.F90 b/ccpp/driver/GFS_init.F90 index af1b768bcc..1c7e60e797 100644 --- a/ccpp/driver/GFS_init.F90 +++ b/ccpp/driver/GFS_init.F90 @@ -7,7 +7,6 @@ module GFS_init GFS_control_type, GFS_grid_type, & GFS_tbd_type, GFS_cldprop_type, & GFS_radtend_type, GFS_diag_type - use CCPP_typedefs, only: GFS_interstitial_type implicit none @@ -28,7 +27,7 @@ module GFS_init !-------------- subroutine GFS_initialize (Model, Statein, Stateout, Sfcprop, & Coupling, Grid, Tbd, Cldprop, Radtend, & - Diag, Interstitial, Init_parm) + Diag, Init_parm) #ifdef _OPENMP use omp_lib @@ -45,7 +44,6 @@ subroutine GFS_initialize (Model, Statein, Stateout, Sfcprop, & type(GFS_cldprop_type), intent(inout) :: Cldprop type(GFS_radtend_type), intent(inout) :: Radtend type(GFS_diag_type), intent(inout) :: Diag - type(GFS_interstitial_type), intent(inout) :: Interstitial(:) type(GFS_init_type), intent(in) :: Init_parm !--- local variables @@ -64,22 +62,26 @@ subroutine GFS_initialize (Model, Statein, Stateout, Sfcprop, & #endif !--- set control properties (including namelist read) + Model%dycore_active = Model%dycore_fv3 call Model%init (Init_parm%nlunit, Init_parm%fn_nml, & Init_parm%me, Init_parm%master, & - Init_parm%logunit, Init_parm%isc, & - Init_parm%jsc, Init_parm%nx, Init_parm%ny, & - Init_parm%levs, Init_parm%cnx, Init_parm%cny, & - Init_parm%gnx, Init_parm%gny, & + Init_parm%logunit, Init_parm%levs, & Init_parm%dt_dycore, Init_parm%dt_phys, & Init_parm%iau_offset, Init_parm%bdat, & Init_parm%cdat, Init_parm%nwat, & Init_parm%tracer_names, & Init_parm%tracer_types, & - Init_parm%input_nml_file, Init_parm%tile_num, & - Init_parm%blksz, Init_parm%ak, Init_parm%bk, & - Init_parm%restart, Init_parm%hydrostatic, & - Init_parm%fcst_mpi_comm, & - Init_parm%fcst_ntasks, nthrds) + Init_parm%input_nml_file, Init_parm%blksz, & + Init_parm%restart, Init_parm%fcst_mpi_comm, & + Init_parm%fcst_ntasks, nthrds, & + ! Below only needed for FV3 dynamical core. + tile_num = Init_parm%tile_num, & + isc = Init_parm%isc, jsc = Init_parm%jsc, & + nx = Init_parm%nx, ny = Init_parm%ny, & + cnx = Init_parm%cnx, cny = Init_parm%cny, & + gnx = Init_parm%gnx, gny = Init_parm%gny, & + ak = Init_parm%ak, bk = Init_parm%bk, & + hydrostatic = Init_parm%hydrostatic) call Statein%create(Model) call Stateout%create(Model) @@ -91,35 +93,6 @@ subroutine GFS_initialize (Model, Statein, Stateout, Sfcprop, & call Coupling%create(Model) call Diag%create(Model) -! This logic deals with non-uniform block sizes for CCPP. When non-uniform block sizes -! are used, it is required that only the last block has a different (smaller) size than -! all other blocks. This is the standard in FV3. If this is the case, set non_uniform_blocks -! to .true. and initialize nthreads+1 elements of the interstitial array. The extra element -! will be used by the thread that runs over the last, smaller block. - if (minval(Init_parm%blksz)==maxval(Init_parm%blksz)) then - non_uniform_blocks = .false. - elseif (all(minloc(Init_parm%blksz)==(/size(Init_parm%blksz)/))) then - non_uniform_blocks = .true. - else - write(0,'(2a)') 'For non-uniform blocksizes, only the last element ', & - 'in Init_parm%blksz can be different from the others' - stop - endif - -! Initialize the Interstitial data type in parallel so that -! each thread creates (touches) its Interstitial(nt) first. -!$OMP parallel do default (shared) & -!$OMP schedule (static,1) & -!$OMP private (nt) - do nt=1,nthrds - call Interstitial (nt)%create (maxval(Init_parm%blksz), Model) - enddo -!$OMP end parallel do - - if (non_uniform_blocks) then - call Interstitial (nthrds+1)%create (Init_parm%blksz(nblks), Model) - end if - !--- populate the grid components call GFS_grid_populate (Grid, Init_parm%xlon, Init_parm%xlat, Init_parm%area) diff --git a/ccpp/driver/MPAS_init.F90 b/ccpp/driver/MPAS_init.F90 new file mode 100644 index 0000000000..b54e5f027e --- /dev/null +++ b/ccpp/driver/MPAS_init.F90 @@ -0,0 +1,77 @@ +! ########################################################################################### +!> \file MPAS_init.F90 +!> +! ########################################################################################### +module MPAS_init + use machine, only : kind_phys + use ufs_mpas_subdriver, only : MPAS_control_type + use GFS_typedefs, only : GFS_control_type, GFS_diag_type, GFS_grid_type, GFS_tbd_type + use GFS_typedefs, only : GFS_sfcprop_type, GFS_statein_type, GFS_cldprop_type + use GFS_typedefs, only : GFS_radtend_type + use GFS_typedefs, only : GFS_coupling_type + + implicit none + + public :: MPAS_initialize + +contains + !> ######################################################################################### + !> Procedure to initialize MPAS interface to CCPP Physics. + !> + !> ######################################################################################### + subroutine MPAS_initialize (Model, Diag, Grid, Tbd, SfcProp, Statein, CldProp, RadTend, & + Coupling, Init_parm) +#ifdef _OPENMP + use omp_lib +#endif + + ! Inputs + type(GFS_control_type), intent(inout) :: Model + type(GFS_diag_type), intent(inout) :: Diag + type(GFS_grid_type), intent(inout) :: Grid + type(GFS_tbd_type), intent(inout) :: Tbd + type(GFS_sfcprop_type), intent(inout) :: SfcProp + type(GFS_statein_type), intent(inout) :: Statein + type(GFS_cldprop_type), intent(inout) :: Cldprop + type(GFS_radtend_type), intent(inout) :: Radtend + type(GFS_coupling_type), intent(inout) :: Coupling + type(MPAS_control_type), intent(inout) :: Init_parm + + ! Locals + integer :: nb + integer :: nblks + integer :: nt + integer :: nthrds + logical :: non_uniform_blocks + integer :: ix + + nblks = size(Init_parm%blksz) + +#ifdef _OPENMP + nthrds = omp_get_max_threads() +#else + nthrds = 1 +#endif + + ! Set control properties (including physics namelist read) + Model%dycore_active = Model%dycore_mpas + call Model%init(Init_parm%nlunit, Init_parm%fn_nml, Init_parm%me, Init_parm%master, & + Init_parm%logunit, Init_parm%levs, real(Init_parm%dt_dycore, kind_phys), & + real(Init_parm%dt_phys, kind_phys), Init_parm%iau_offset, Init_parm%bdat, & + Init_parm%cdat, Init_parm%nwat, Init_parm%tracer_names, Init_parm%tracer_types, & + Init_parm%input_nml_file, Init_parm%blksz, Init_parm%restart, Init_parm%mpi_comm, & + Init_parm%fcst_ntasks, nthrds) + + ! Allocate data containers for physics. + call Grid%create(Model) + call Diag%create(Model) + call Tbd%create(Model) + call SfcProp%create(Model) + call Statein%create(Model) + call Cldprop%create(Model) + call Radtend%create(Model) + call Coupling%create(Model) + + end subroutine MPAS_initialize + +end module MPAS_init diff --git a/ccpp/framework b/ccpp/framework index 11359cb04a..3256121dc4 160000 --- a/ccpp/framework +++ b/ccpp/framework @@ -1 +1 @@ -Subproject commit 11359cb04a420fc87e4cf0f035f4d1215ab24488 +Subproject commit 3256121dc4972d5c78f43f1a16ea1cb118ec6daf diff --git a/ccpp/physics b/ccpp/physics index 8292b660ec..6382d6140c 160000 --- a/ccpp/physics +++ b/ccpp/physics @@ -1 +1 @@ -Subproject commit 8292b660ec9d692577c5490738335341dd4420eb +Subproject commit 6382d6140c1c11a3e830716a7f5a9d7eb9d58cab diff --git a/ccpp/suites/suite_FV3_GFS_v15_thompson_mynn_lam3km.xml b/ccpp/suites/suite_FV3_GFS_v15_thompson_mynn_lam3km.xml index 8028ea7a19..af490fe837 100644 --- a/ccpp/suites/suite_FV3_GFS_v15_thompson_mynn_lam3km.xml +++ b/ccpp/suites/suite_FV3_GFS_v15_thompson_mynn_lam3km.xml @@ -21,7 +21,7 @@ rrtmg_lw sgscloud_radpost rrtmg_lw_post - GFS_rrtmg_post + GFS_radiation_post diff --git a/ccpp/suites/suite_FV3_GFS_v15p2.xml b/ccpp/suites/suite_FV3_GFS_v15p2.xml index f9db755e99..0682aa0f90 100644 --- a/ccpp/suites/suite_FV3_GFS_v15p2.xml +++ b/ccpp/suites/suite_FV3_GFS_v15p2.xml @@ -24,7 +24,7 @@ rrtmg_sw_post rrtmg_lw rrtmg_lw_post - GFS_rrtmg_post + GFS_radiation_post diff --git a/ccpp/suites/suite_FV3_GFS_v16.xml b/ccpp/suites/suite_FV3_GFS_v16.xml index ce5736e5c0..5fba28575a 100644 --- a/ccpp/suites/suite_FV3_GFS_v16.xml +++ b/ccpp/suites/suite_FV3_GFS_v16.xml @@ -24,7 +24,7 @@ rrtmg_sw_post rrtmg_lw rrtmg_lw_post - GFS_rrtmg_post + GFS_radiation_post diff --git a/ccpp/suites/suite_FV3_GFS_v16_csawmg.xml b/ccpp/suites/suite_FV3_GFS_v16_csawmg.xml index f70d9174d0..0f18a0cbd7 100644 --- a/ccpp/suites/suite_FV3_GFS_v16_csawmg.xml +++ b/ccpp/suites/suite_FV3_GFS_v16_csawmg.xml @@ -19,7 +19,7 @@ rrtmg_sw_post rrtmg_lw rrtmg_lw_post - GFS_rrtmg_post + GFS_radiation_post diff --git a/ccpp/suites/suite_FV3_GFS_v16_flake.xml b/ccpp/suites/suite_FV3_GFS_v16_flake.xml index 15b547ffb7..2548d7c212 100644 --- a/ccpp/suites/suite_FV3_GFS_v16_flake.xml +++ b/ccpp/suites/suite_FV3_GFS_v16_flake.xml @@ -24,7 +24,7 @@ rrtmg_sw_post rrtmg_lw rrtmg_lw_post - GFS_rrtmg_post + GFS_radiation_post diff --git a/ccpp/suites/suite_FV3_GFS_v16_fv3wam.xml b/ccpp/suites/suite_FV3_GFS_v16_fv3wam.xml index 04191ca7ee..918ef4856c 100644 --- a/ccpp/suites/suite_FV3_GFS_v16_fv3wam.xml +++ b/ccpp/suites/suite_FV3_GFS_v16_fv3wam.xml @@ -19,7 +19,7 @@ rrtmg_sw_post rrtmg_lw rrtmg_lw_post - GFS_rrtmg_post + GFS_radiation_post diff --git a/ccpp/suites/suite_FV3_GFS_v16_gfdlmpv3.xml b/ccpp/suites/suite_FV3_GFS_v16_gfdlmpv3.xml index 604343d635..c02d886b15 100644 --- a/ccpp/suites/suite_FV3_GFS_v16_gfdlmpv3.xml +++ b/ccpp/suites/suite_FV3_GFS_v16_gfdlmpv3.xml @@ -24,7 +24,7 @@ rrtmg_sw_post rrtmg_lw rrtmg_lw_post - GFS_rrtmg_post + GFS_radiation_post diff --git a/ccpp/suites/suite_FV3_GFS_v16_ras.xml b/ccpp/suites/suite_FV3_GFS_v16_ras.xml index 506964317d..918dc7246f 100644 --- a/ccpp/suites/suite_FV3_GFS_v16_ras.xml +++ b/ccpp/suites/suite_FV3_GFS_v16_ras.xml @@ -24,7 +24,7 @@ rrtmg_sw_post rrtmg_lw rrtmg_lw_post - GFS_rrtmg_post + GFS_radiation_post diff --git a/ccpp/suites/suite_FV3_GFS_v17_coupled_p8.xml b/ccpp/suites/suite_FV3_GFS_v17_coupled_p8.xml index 3b79d258b9..ee5465b663 100644 --- a/ccpp/suites/suite_FV3_GFS_v17_coupled_p8.xml +++ b/ccpp/suites/suite_FV3_GFS_v17_coupled_p8.xml @@ -19,7 +19,7 @@ rrtmg_sw_post rrtmg_lw rrtmg_lw_post - GFS_rrtmg_post + GFS_radiation_post diff --git a/ccpp/suites/suite_FV3_GFS_v17_coupled_p8_c3.xml b/ccpp/suites/suite_FV3_GFS_v17_coupled_p8_c3.xml index 439755717f..cc30e09872 100644 --- a/ccpp/suites/suite_FV3_GFS_v17_coupled_p8_c3.xml +++ b/ccpp/suites/suite_FV3_GFS_v17_coupled_p8_c3.xml @@ -19,7 +19,7 @@ rrtmg_sw_post rrtmg_lw rrtmg_lw_post - GFS_rrtmg_post + GFS_radiation_post diff --git a/ccpp/suites/suite_FV3_GFS_v17_coupled_p8_sfcocn.xml b/ccpp/suites/suite_FV3_GFS_v17_coupled_p8_sfcocn.xml index 28a9085ec9..4d586a4f37 100644 --- a/ccpp/suites/suite_FV3_GFS_v17_coupled_p8_sfcocn.xml +++ b/ccpp/suites/suite_FV3_GFS_v17_coupled_p8_sfcocn.xml @@ -19,7 +19,7 @@ rrtmg_sw_post rrtmg_lw rrtmg_lw_post - GFS_rrtmg_post + GFS_radiation_post diff --git a/ccpp/suites/suite_FV3_GFS_v17_coupled_p8_ugwpv1.xml b/ccpp/suites/suite_FV3_GFS_v17_coupled_p8_ugwpv1.xml index b75921dbaa..57497e1347 100644 --- a/ccpp/suites/suite_FV3_GFS_v17_coupled_p8_ugwpv1.xml +++ b/ccpp/suites/suite_FV3_GFS_v17_coupled_p8_ugwpv1.xml @@ -19,7 +19,7 @@ rrtmg_sw_post rrtmg_lw rrtmg_lw_post - GFS_rrtmg_post + GFS_radiation_post diff --git a/ccpp/suites/suite_FV3_GFS_v17_p8.xml b/ccpp/suites/suite_FV3_GFS_v17_p8.xml index f08119e9a6..05b9c520bb 100644 --- a/ccpp/suites/suite_FV3_GFS_v17_p8.xml +++ b/ccpp/suites/suite_FV3_GFS_v17_p8.xml @@ -19,7 +19,7 @@ rrtmg_sw_post rrtmg_lw rrtmg_lw_post - GFS_rrtmg_post + GFS_radiation_post diff --git a/ccpp/suites/suite_FV3_GFS_v17_p8_c3.xml b/ccpp/suites/suite_FV3_GFS_v17_p8_c3.xml index d0d15f1b81..66e7f859be 100644 --- a/ccpp/suites/suite_FV3_GFS_v17_p8_c3.xml +++ b/ccpp/suites/suite_FV3_GFS_v17_p8_c3.xml @@ -21,7 +21,7 @@ rrtmg_lw sgscloud_radpost rrtmg_lw_post - GFS_rrtmg_post + GFS_radiation_post diff --git a/ccpp/suites/suite_FV3_GFS_v17_p8_mynn.xml b/ccpp/suites/suite_FV3_GFS_v17_p8_mynn.xml index 36bc3d0500..3c9dbe2501 100644 --- a/ccpp/suites/suite_FV3_GFS_v17_p8_mynn.xml +++ b/ccpp/suites/suite_FV3_GFS_v17_p8_mynn.xml @@ -21,7 +21,7 @@ rrtmg_lw sgscloud_radpost rrtmg_lw_post - GFS_rrtmg_post + GFS_radiation_post diff --git a/ccpp/suites/suite_FV3_GFS_v17_p8_rrtmgp.xml b/ccpp/suites/suite_FV3_GFS_v17_p8_rrtmgp.xml index 0a5b9222ec..8dfef69ec8 100644 --- a/ccpp/suites/suite_FV3_GFS_v17_p8_rrtmgp.xml +++ b/ccpp/suites/suite_FV3_GFS_v17_p8_rrtmgp.xml @@ -20,7 +20,7 @@ rrtmgp_aerosol_optics rrtmgp_sw_main rrtmgp_lw_main - GFS_rrtmgp_post + GFS_radiation_post @@ -42,6 +42,7 @@ sfc_nst sfc_nst_post noahmpdrv + sfc_land sfc_sice GFS_surface_loop_control_part2 @@ -55,8 +56,8 @@ satmedmfvdifq GFS_PBL_generic_post GFS_GWD_generic_pre - unified_ugwp - unified_ugwp_post + ugwpv1_gsldrag + ugwpv1_gsldrag_post GFS_GWD_generic_post GFS_suite_stateout_update diff --git a/ccpp/suites/suite_FV3_GFS_v17_p8_ugwpv1.xml b/ccpp/suites/suite_FV3_GFS_v17_p8_ugwpv1.xml index 6d32a42a23..0e3a28e6f5 100644 --- a/ccpp/suites/suite_FV3_GFS_v17_p8_ugwpv1.xml +++ b/ccpp/suites/suite_FV3_GFS_v17_p8_ugwpv1.xml @@ -19,7 +19,7 @@ rrtmg_sw_post rrtmg_lw rrtmg_lw_post - GFS_rrtmg_post + GFS_radiation_post diff --git a/ccpp/suites/suite_FV3_GFS_v17_p8_ugwpv1_tempo.xml b/ccpp/suites/suite_FV3_GFS_v17_p8_ugwpv1_tempo.xml index cd1d1aff56..5dfd909e09 100644 --- a/ccpp/suites/suite_FV3_GFS_v17_p8_ugwpv1_tempo.xml +++ b/ccpp/suites/suite_FV3_GFS_v17_p8_ugwpv1_tempo.xml @@ -19,7 +19,7 @@ rrtmg_sw_post rrtmg_lw rrtmg_lw_post - GFS_rrtmg_post + GFS_radiation_post diff --git a/ccpp/suites/suite_FV3_HAFS_v1_gfdlmp_tedmf.xml b/ccpp/suites/suite_FV3_HAFS_v1_gfdlmp_tedmf.xml index 09f6ff89d8..da8a269a12 100644 --- a/ccpp/suites/suite_FV3_HAFS_v1_gfdlmp_tedmf.xml +++ b/ccpp/suites/suite_FV3_HAFS_v1_gfdlmp_tedmf.xml @@ -24,7 +24,7 @@ rrtmg_sw_post rrtmg_lw rrtmg_lw_post - GFS_rrtmg_post + GFS_radiation_post diff --git a/ccpp/suites/suite_FV3_HAFS_v1_gfdlmp_tedmf_nonsst.xml b/ccpp/suites/suite_FV3_HAFS_v1_gfdlmp_tedmf_nonsst.xml index 15cca099c9..1acc5d5234 100644 --- a/ccpp/suites/suite_FV3_HAFS_v1_gfdlmp_tedmf_nonsst.xml +++ b/ccpp/suites/suite_FV3_HAFS_v1_gfdlmp_tedmf_nonsst.xml @@ -24,7 +24,7 @@ rrtmg_sw_post rrtmg_lw rrtmg_lw_post - GFS_rrtmg_post + GFS_radiation_post diff --git a/ccpp/suites/suite_FV3_HAFS_v1_gfdlmpv3_tedmf.xml b/ccpp/suites/suite_FV3_HAFS_v1_gfdlmpv3_tedmf.xml index b222245f27..9556c27f9f 100644 --- a/ccpp/suites/suite_FV3_HAFS_v1_gfdlmpv3_tedmf.xml +++ b/ccpp/suites/suite_FV3_HAFS_v1_gfdlmpv3_tedmf.xml @@ -24,7 +24,7 @@ rrtmg_sw_post rrtmg_lw rrtmg_lw_post - GFS_rrtmg_post + GFS_radiation_post diff --git a/ccpp/suites/suite_FV3_HAFS_v1_thompson.xml b/ccpp/suites/suite_FV3_HAFS_v1_thompson.xml index a4b84e5f41..c98fee2d50 100644 --- a/ccpp/suites/suite_FV3_HAFS_v1_thompson.xml +++ b/ccpp/suites/suite_FV3_HAFS_v1_thompson.xml @@ -19,7 +19,7 @@ rrtmg_sw_post rrtmg_lw rrtmg_lw_post - GFS_rrtmg_post + GFS_radiation_post diff --git a/ccpp/suites/suite_FV3_HAFS_v1_thompson_nonsst.xml b/ccpp/suites/suite_FV3_HAFS_v1_thompson_nonsst.xml index 2ef7a87cc7..b9c1ca61e1 100644 --- a/ccpp/suites/suite_FV3_HAFS_v1_thompson_nonsst.xml +++ b/ccpp/suites/suite_FV3_HAFS_v1_thompson_nonsst.xml @@ -19,7 +19,7 @@ rrtmg_sw_post rrtmg_lw rrtmg_lw_post - GFS_rrtmg_post + GFS_radiation_post diff --git a/ccpp/suites/suite_FV3_HAFS_v1_thompson_tedmf_gfdlsf.xml b/ccpp/suites/suite_FV3_HAFS_v1_thompson_tedmf_gfdlsf.xml index 407af6f034..615e0c64e5 100644 --- a/ccpp/suites/suite_FV3_HAFS_v1_thompson_tedmf_gfdlsf.xml +++ b/ccpp/suites/suite_FV3_HAFS_v1_thompson_tedmf_gfdlsf.xml @@ -19,7 +19,7 @@ rrtmg_sw_post rrtmg_lw rrtmg_lw_post - GFS_rrtmg_post + GFS_radiation_post diff --git a/ccpp/suites/suite_FV3_HRRR.xml b/ccpp/suites/suite_FV3_HRRR.xml index aa4654d7cd..10ac551127 100644 --- a/ccpp/suites/suite_FV3_HRRR.xml +++ b/ccpp/suites/suite_FV3_HRRR.xml @@ -21,7 +21,7 @@ rrtmg_lw sgscloud_radpost rrtmg_lw_post - GFS_rrtmg_post + GFS_radiation_post diff --git a/ccpp/suites/suite_FV3_HRRR_c3.xml b/ccpp/suites/suite_FV3_HRRR_c3.xml index 4eb6410d6c..f2ded8fe59 100644 --- a/ccpp/suites/suite_FV3_HRRR_c3.xml +++ b/ccpp/suites/suite_FV3_HRRR_c3.xml @@ -21,7 +21,7 @@ rrtmg_lw sgscloud_radpost rrtmg_lw_post - GFS_rrtmg_post + GFS_radiation_post diff --git a/ccpp/suites/suite_FV3_HRRR_gf.xml b/ccpp/suites/suite_FV3_HRRR_gf.xml index c3dca9f507..58d3902957 100644 --- a/ccpp/suites/suite_FV3_HRRR_gf.xml +++ b/ccpp/suites/suite_FV3_HRRR_gf.xml @@ -21,7 +21,7 @@ rrtmg_lw sgscloud_radpost rrtmg_lw_post - GFS_rrtmg_post + GFS_radiation_post diff --git a/ccpp/suites/suite_FV3_HRRR_gf_nogwd.xml b/ccpp/suites/suite_FV3_HRRR_gf_nogwd.xml index 6d746f1fa0..0f5d26e2fa 100644 --- a/ccpp/suites/suite_FV3_HRRR_gf_nogwd.xml +++ b/ccpp/suites/suite_FV3_HRRR_gf_nogwd.xml @@ -21,7 +21,7 @@ rrtmg_lw sgscloud_radpost rrtmg_lw_post - GFS_rrtmg_post + GFS_radiation_post diff --git a/ccpp/suites/suite_FV3_RAP.xml b/ccpp/suites/suite_FV3_RAP.xml index 1e6db98795..36ab67bcb6 100644 --- a/ccpp/suites/suite_FV3_RAP.xml +++ b/ccpp/suites/suite_FV3_RAP.xml @@ -21,7 +21,7 @@ rrtmg_lw sgscloud_radpost rrtmg_lw_post - GFS_rrtmg_post + GFS_radiation_post diff --git a/ccpp/suites/suite_FV3_RAP_cires_ugwp.xml b/ccpp/suites/suite_FV3_RAP_cires_ugwp.xml index 5d06b63e1c..43b439343d 100644 --- a/ccpp/suites/suite_FV3_RAP_cires_ugwp.xml +++ b/ccpp/suites/suite_FV3_RAP_cires_ugwp.xml @@ -21,7 +21,7 @@ rrtmg_lw sgscloud_radpost rrtmg_lw_post - GFS_rrtmg_post + GFS_radiation_post diff --git a/ccpp/suites/suite_FV3_RAP_clm_lake.xml b/ccpp/suites/suite_FV3_RAP_clm_lake.xml index adc19d8f6b..091415e6e4 100644 --- a/ccpp/suites/suite_FV3_RAP_clm_lake.xml +++ b/ccpp/suites/suite_FV3_RAP_clm_lake.xml @@ -21,7 +21,7 @@ rrtmg_lw sgscloud_radpost rrtmg_lw_post - GFS_rrtmg_post + GFS_radiation_post diff --git a/ccpp/suites/suite_FV3_RAP_flake.xml b/ccpp/suites/suite_FV3_RAP_flake.xml index b4c90d9e60..f69c2e1442 100644 --- a/ccpp/suites/suite_FV3_RAP_flake.xml +++ b/ccpp/suites/suite_FV3_RAP_flake.xml @@ -21,7 +21,7 @@ rrtmg_lw sgscloud_radpost rrtmg_lw_post - GFS_rrtmg_post + GFS_radiation_post diff --git a/ccpp/suites/suite_FV3_RAP_noah.xml b/ccpp/suites/suite_FV3_RAP_noah.xml index 6db03b211a..fd5d64e676 100644 --- a/ccpp/suites/suite_FV3_RAP_noah.xml +++ b/ccpp/suites/suite_FV3_RAP_noah.xml @@ -21,7 +21,7 @@ rrtmg_lw sgscloud_radpost rrtmg_lw_post - GFS_rrtmg_post + GFS_radiation_post diff --git a/ccpp/suites/suite_FV3_RAP_noah_sfcdiff_cires_ugwp.xml b/ccpp/suites/suite_FV3_RAP_noah_sfcdiff_cires_ugwp.xml index 115c94a218..87195b387e 100644 --- a/ccpp/suites/suite_FV3_RAP_noah_sfcdiff_cires_ugwp.xml +++ b/ccpp/suites/suite_FV3_RAP_noah_sfcdiff_cires_ugwp.xml @@ -21,7 +21,7 @@ rrtmg_lw sgscloud_radpost rrtmg_lw_post - GFS_rrtmg_post + GFS_radiation_post diff --git a/ccpp/suites/suite_FV3_RAP_sfcdiff.xml b/ccpp/suites/suite_FV3_RAP_sfcdiff.xml index 0ae504da11..2067e7d7ab 100644 --- a/ccpp/suites/suite_FV3_RAP_sfcdiff.xml +++ b/ccpp/suites/suite_FV3_RAP_sfcdiff.xml @@ -21,7 +21,7 @@ rrtmg_lw sgscloud_radpost rrtmg_lw_post - GFS_rrtmg_post + GFS_radiation_post diff --git a/ccpp/suites/suite_FV3_RAP_unified_ugwp.xml b/ccpp/suites/suite_FV3_RAP_unified_ugwp.xml index 61aa5fc165..36ce5ed897 100644 --- a/ccpp/suites/suite_FV3_RAP_unified_ugwp.xml +++ b/ccpp/suites/suite_FV3_RAP_unified_ugwp.xml @@ -21,7 +21,7 @@ rrtmg_lw sgscloud_radpost rrtmg_lw_post - GFS_rrtmg_post + GFS_radiation_post diff --git a/ccpp/suites/suite_FV3_RRFS_v1beta.xml b/ccpp/suites/suite_FV3_RRFS_v1beta.xml index 234b48b72d..003c7b8cc6 100644 --- a/ccpp/suites/suite_FV3_RRFS_v1beta.xml +++ b/ccpp/suites/suite_FV3_RRFS_v1beta.xml @@ -21,7 +21,7 @@ rrtmg_lw sgscloud_radpost rrtmg_lw_post - GFS_rrtmg_post + GFS_radiation_post diff --git a/ccpp/suites/suite_FV3_RRFS_v1nssl.xml b/ccpp/suites/suite_FV3_RRFS_v1nssl.xml index e5c11b97bc..160dd0ecf3 100644 --- a/ccpp/suites/suite_FV3_RRFS_v1nssl.xml +++ b/ccpp/suites/suite_FV3_RRFS_v1nssl.xml @@ -21,7 +21,7 @@ rrtmg_lw sgscloud_radpost rrtmg_lw_post - GFS_rrtmg_post + GFS_radiation_post diff --git a/ccpp/suites/suite_FV3_WoFS_v0.xml b/ccpp/suites/suite_FV3_WoFS_v0.xml index c1e1f7ccdc..2e32afd48a 100644 --- a/ccpp/suites/suite_FV3_WoFS_v0.xml +++ b/ccpp/suites/suite_FV3_WoFS_v0.xml @@ -21,7 +21,7 @@ rrtmg_lw sgscloud_radpost rrtmg_lw_post - GFS_rrtmg_post + GFS_radiation_post diff --git a/ccpp/suites/suite_FV3_coupled_lowres.xml b/ccpp/suites/suite_FV3_coupled_lowres.xml index 8b9a78671a..141fc8df07 100644 --- a/ccpp/suites/suite_FV3_coupled_lowres.xml +++ b/ccpp/suites/suite_FV3_coupled_lowres.xml @@ -19,7 +19,7 @@ rrtmg_sw_post rrtmg_lw rrtmg_lw_post - GFS_rrtmg_post + GFS_radiation_post diff --git a/ccpp/suites/suite_FV3_global_nest_v1.xml b/ccpp/suites/suite_FV3_global_nest_v1.xml index 956b267411..e74dbff5b1 100644 --- a/ccpp/suites/suite_FV3_global_nest_v1.xml +++ b/ccpp/suites/suite_FV3_global_nest_v1.xml @@ -21,7 +21,7 @@ rrtmg_lw sgscloud_radpost rrtmg_lw_post - GFS_rrtmg_post + GFS_radiation_post diff --git a/ccpp/suites/suite_FV3_ideal_pbl_mp_nssl.xml b/ccpp/suites/suite_FV3_ideal_pbl_mp_nssl.xml index 4df1affce6..f9fbab7e91 100644 --- a/ccpp/suites/suite_FV3_ideal_pbl_mp_nssl.xml +++ b/ccpp/suites/suite_FV3_ideal_pbl_mp_nssl.xml @@ -21,7 +21,7 @@ rrtmg_lw sgscloud_radpost rrtmg_lw_post - GFS_rrtmg_post + GFS_radiation_post diff --git a/ccpp/suites/suite_FV3_lowres.xml b/ccpp/suites/suite_FV3_lowres.xml index 5edcef6e28..cdae7a1018 100644 --- a/ccpp/suites/suite_FV3_lowres.xml +++ b/ccpp/suites/suite_FV3_lowres.xml @@ -19,7 +19,7 @@ rrtmg_sw_post rrtmg_lw rrtmg_lw_post - GFS_rrtmg_post + GFS_radiation_post diff --git a/ccpp/suites/suite_MPAS_RRFS.xml b/ccpp/suites/suite_MPAS_RRFS.xml new file mode 100644 index 0000000000..ab83aadc40 --- /dev/null +++ b/ccpp/suites/suite_MPAS_RRFS.xml @@ -0,0 +1,28 @@ + + + + + + GFS_time_vary_pre + GFS_rrtmg_setup + GFS_rad_time_vary + GFS_phys_time_vary + + + + + GFS_rrtmg_pre + GFS_radiation_surface + rad_sw_pre + rrtmg_sw + rrtmg_sw_post + rrtmg_lw + rrtmg_lw_post + GFS_radiation_post + + + + + + + diff --git a/ccpp/suites/suite_RRFS_sas.xml b/ccpp/suites/suite_RRFS_sas.xml index 7275e151fc..f7dd9295b9 100644 --- a/ccpp/suites/suite_RRFS_sas.xml +++ b/ccpp/suites/suite_RRFS_sas.xml @@ -21,7 +21,7 @@ rrtmg_lw sgscloud_radpost rrtmg_lw_post - GFS_rrtmg_post + GFS_radiation_post diff --git a/ccpp/suites/suite_RRFS_sas_nogwd.xml b/ccpp/suites/suite_RRFS_sas_nogwd.xml index d2c1346eca..0c83bb46a6 100644 --- a/ccpp/suites/suite_RRFS_sas_nogwd.xml +++ b/ccpp/suites/suite_RRFS_sas_nogwd.xml @@ -21,7 +21,7 @@ rrtmg_lw sgscloud_radpost rrtmg_lw_post - GFS_rrtmg_post + GFS_radiation_post diff --git a/ccpp/suites/suite_RRFSens_phy1.xml b/ccpp/suites/suite_RRFSens_phy1.xml index 51366f2f26..f35bea23d4 100644 --- a/ccpp/suites/suite_RRFSens_phy1.xml +++ b/ccpp/suites/suite_RRFSens_phy1.xml @@ -21,7 +21,7 @@ rrtmg_lw sgscloud_radpost rrtmg_lw_post - GFS_rrtmg_post + GFS_radiation_post diff --git a/ccpp/suites/suite_RRFSens_phy2.xml b/ccpp/suites/suite_RRFSens_phy2.xml index db8c2d28d6..71edcfcdec 100644 --- a/ccpp/suites/suite_RRFSens_phy2.xml +++ b/ccpp/suites/suite_RRFSens_phy2.xml @@ -21,7 +21,7 @@ rrtmg_lw sgscloud_radpost rrtmg_lw_post - GFS_rrtmg_post + GFS_radiation_post diff --git a/ccpp/suites/suite_RRFSens_phy3.xml b/ccpp/suites/suite_RRFSens_phy3.xml index b4767562f2..5fe718e1e0 100644 --- a/ccpp/suites/suite_RRFSens_phy3.xml +++ b/ccpp/suites/suite_RRFSens_phy3.xml @@ -21,7 +21,7 @@ rrtmg_lw sgscloud_radpost rrtmg_lw_post - GFS_rrtmg_post + GFS_radiation_post diff --git a/ccpp/suites/suite_RRFSens_phy4.xml b/ccpp/suites/suite_RRFSens_phy4.xml index aca458329a..47af44d17e 100644 --- a/ccpp/suites/suite_RRFSens_phy4.xml +++ b/ccpp/suites/suite_RRFSens_phy4.xml @@ -21,7 +21,7 @@ rrtmg_lw sgscloud_radpost rrtmg_lw_post - GFS_rrtmg_post + GFS_radiation_post diff --git a/ccpp/suites/suite_RRFSens_phy5.xml b/ccpp/suites/suite_RRFSens_phy5.xml index 77c99805a0..b5ccb40606 100644 --- a/ccpp/suites/suite_RRFSens_phy5.xml +++ b/ccpp/suites/suite_RRFSens_phy5.xml @@ -21,7 +21,7 @@ rrtmg_lw sgscloud_radpost rrtmg_lw_post - GFS_rrtmg_post + GFS_radiation_post diff --git a/fv3/atmos_cubed_sphere b/fv3/atmos_cubed_sphere index 77be6d28a8..b0a1b0957d 160000 --- a/fv3/atmos_cubed_sphere +++ b/fv3/atmos_cubed_sphere @@ -1 +1 @@ -Subproject commit 77be6d28a8cbf39fa5299baed27f58260de7a45b +Subproject commit b0a1b0957d3b79d642f3c01e5cd9441936c7c832 diff --git a/fv3/atmos_model.F90 b/fv3/atmos_model.F90 index e4ab41774a..1d7a274987 100644 --- a/fv3/atmos_model.F90 +++ b/fv3/atmos_model.F90 @@ -90,8 +90,8 @@ module atmos_model_mod GFS_coupling, GFS_intdiag, & GFS_interstitial use GFS_init, only: GFS_initialize -use CCPP_driver, only: CCPP_step, non_uniform_blocks - +use CCPP_driver, only: CCPP_step +use mod_ufsatm_util, only: get_atmos_tracer_types use stochastic_physics_wrapper_mod, only: stochastic_physics_wrapper,stochastic_physics_wrapper_end use fv3atm_history_io_mod, only: fv3atm_diag_register, fv3atm_diag_output, & @@ -294,7 +294,7 @@ subroutine update_atmos_radiation_physics (Atmos) !--- execute the atmospheric setup step call mpp_clock_begin(setupClock) - call CCPP_step (step="timestep_init", nblks=Atm_block%nblks, ierr=ierr) + call CCPP_step (step="timestep_init", nblks=Atm_block%nblks, ierr=ierr, dycore='fv3') if (ierr/=0) call mpp_error(FATAL, 'Call to CCPP timestep_init step failed') if (GFS_Control%do_sppt .or. GFS_Control%do_shum .or. GFS_Control%do_skeb .or. & @@ -369,7 +369,7 @@ subroutine update_atmos_radiation_physics (Atmos) call mpp_clock_begin(radClock) ! Performance improvement. Only enter if it is time to call the radiation physics. if (GFS_control%lsswr .or. GFS_control%lslwr) then - call CCPP_step (step="radiation", nblks=Atm_block%nblks, ierr=ierr) + call CCPP_step (step="radiation", nblks=Atm_block%nblks, ierr=ierr, dycore='fv3') if (ierr/=0) call mpp_error(FATAL, 'Call to CCPP radiation step failed') endif call mpp_clock_end(radClock) @@ -384,7 +384,7 @@ subroutine update_atmos_radiation_physics (Atmos) !--- execute the atmospheric physics step1 subcomponent (main physics driver) call mpp_clock_begin(physClock) - call CCPP_step (step="physics", nblks=Atm_block%nblks, ierr=ierr) + call CCPP_step (step="physics", nblks=Atm_block%nblks, ierr=ierr, dycore='fv3') if (ierr/=0) call mpp_error(FATAL, 'Call to CCPP physics step failed') call mpp_clock_end(physClock) @@ -401,7 +401,7 @@ subroutine update_atmos_radiation_physics (Atmos) !--- execute the atmospheric physics step2 subcomponent (stochastic physics driver) call mpp_clock_begin(physClock) - call CCPP_step (step="stochastics", nblks=Atm_block%nblks, ierr=ierr) + call CCPP_step (step="stochastics", nblks=Atm_block%nblks, ierr=ierr, dycore='fv3') if (ierr/=0) call mpp_error(FATAL, 'Call to CCPP stochastics step failed') call mpp_clock_end(physClock) @@ -416,7 +416,7 @@ subroutine update_atmos_radiation_physics (Atmos) !--- execute the atmospheric timestep finalize step call mpp_clock_begin(setupClock) - call CCPP_step (step="timestep_finalize", nblks=Atm_block%nblks, ierr=ierr) + call CCPP_step (step="timestep_finalize", nblks=Atm_block%nblks, ierr=ierr, dycore='fv3') if (ierr/=0) call mpp_error(FATAL, 'Call to CCPP timestep_finalize step failed') call mpp_clock_end(setupClock) @@ -641,26 +641,7 @@ subroutine atmos_model_init (Atmos, Time_init, Time, Time_step) #else nthrds = 1 #endif - - ! This logic deals with non-uniform block sizes for CCPP. - ! When non-uniform block sizes are used, it is required - ! that only the last block has a different (smaller) - ! size than all other blocks. This is the standard in - ! FV3. If this is the case, set non_uniform_blocks (a - ! variable imported from CCPP_driver) to .true. and - ! allocate nthreads+1 elements of the interstitial array. - ! The extra element will be used by the thread that - ! runs over the last, smaller block. - if (minval(Atm_block%blksz)==maxval(Atm_block%blksz)) then - non_uniform_blocks = .false. - allocate(GFS_interstitial(nthrds)) - else if (all(minloc(Atm_block%blksz)==(/size(Atm_block%blksz)/))) then - non_uniform_blocks = .true. - allocate(GFS_interstitial(nthrds+1)) - else - call mpp_error(FATAL, 'For non-uniform blocksizes, only the last element ' // & - 'in Atm_block%blksz can be different from the others') - end if + allocate(GFS_interstitial(nthrds+1)) !--- update GFS_control%jdat(8) bdat(:) = 0 @@ -715,7 +696,7 @@ subroutine atmos_model_init (Atmos, Time_init, Time, Time_step) call GFS_initialize (GFS_control, GFS_Statein, GFS_Stateout, GFS_Sfcprop, & GFS_Coupling, GFS_Grid, GFS_Tbd, GFS_Cldprop, GFS_Radtend, & - GFS_Intdiag, GFS_interstitial, Init_parm) + GFS_Intdiag, Init_parm) !--- populate/associate the Diag container elements call GFS_externaldiag_populate (GFS_Diag, GFS_Control, GFS_Statein, GFS_Stateout, & @@ -769,10 +750,10 @@ subroutine atmos_model_init (Atmos, Time_init, Time, Time_step) endif ! Initialize the CCPP framework - call CCPP_step (step="init", nblks=Atm_block%nblks, ierr=ierr) + call CCPP_step (step="init", nblks=Atm_block%nblks, ierr=ierr, dycore='fv3') if (ierr/=0) call mpp_error(FATAL, 'Call to CCPP init step failed') ! Initialize the CCPP physics - call CCPP_step (step="physics_init", nblks=Atm_block%nblks, ierr=ierr) + call CCPP_step (step="physics_init", nblks=Atm_block%nblks, ierr=ierr, dycore='fv3') if (ierr/=0) call mpp_error(FATAL, 'Call to CCPP physics_init step failed') if (GFS_Control%do_sppt .or. GFS_Control%do_shum .or. GFS_Control%do_skeb .or. & @@ -1137,11 +1118,11 @@ subroutine atmos_model_end (Atmos) ! Fast physics (from dynamics) are finalized in atmosphere_end above; ! standard/slow physics (from CCPP) are finalized in CCPP_step 'physics_finalize'. - call CCPP_step (step="physics_finalize", nblks=Atm_block%nblks, ierr=ierr) + call CCPP_step (step="physics_finalize", nblks=Atm_block%nblks, ierr=ierr, dycore='fv3') if (ierr/=0) call mpp_error(FATAL, 'Call to CCPP physics_finalize step failed') ! The CCPP framework for all cdata structures is finalized in CCPP_step 'finalize'. - call CCPP_step (step="finalize", nblks=Atm_block%nblks, ierr=ierr) + call CCPP_step (step="finalize", nblks=Atm_block%nblks, ierr=ierr, dycore='fv3') if (ierr/=0) call mpp_error(FATAL, 'Call to CCPP finalize step failed') deallocate (Atmos%lon, Atmos%lat) @@ -1202,111 +1183,6 @@ subroutine get_atmos_model_ungridded_dim(nlev, nsoillev, ntracers) end subroutine get_atmos_model_ungridded_dim ! -!####################################################################### -! -! -! Identify and return usage and type id of atmospheric tracers. -! Ids are defined as: -! 0 = generic tracer -! 1 = chemistry - prognostic -! 2 = chemistry - diagnostic -! -! Tracers are identified via the additional 'tracer_usage' keyword and -! their optional 'type' qualifier. A tracer is assumed prognostic if -! 'type' is not provided. See examples from the field_table file below: -! -! Prognostic tracer: -! ------------------ -! "TRACER", "atmos_mod", "so2" -! "longname", "so2 mixing ratio" -! "units", "ppm" -! "tracer_usage", "chemistry" -! "profile_type", "fixed", "surface_value=5.e-6" / -! -! Diagnostic tracer: -! ------------------ -! "TRACER", "atmos_mod", "pm25" -! "longname", "PM2.5" -! "units", "ug/m3" -! "tracer_usage", "chemistry", "type=diagnostic" -! "profile_type", "fixed", "surface_value=5.e-6" / -! -! For atmospheric chemistry, the order of both prognostic and diagnostic -! tracers is validated against the model's internal assumptions. -! -! -subroutine get_atmos_tracer_types(tracer_types) - - use field_manager_mod, only: parse - use tracer_manager_mod, only: query_method - - integer, intent(out) :: tracer_types(:) - - !--- local variables - logical :: found - integer :: n, num_tracers, num_types - integer :: id_max, id_min, id_num, ip_max, ip_min, ip_num - character(len=32) :: tracer_usage - character(len=128) :: control, tracer_type - - !--- begin - - !--- validate array size - call get_number_tracers(MODEL_ATMOS, num_tracers=num_tracers) - - if (size(tracer_types) < num_tracers) & - call mpp_error(FATAL, 'insufficient size of tracer type array') - - !--- initialize tracer indices - id_min = num_tracers + 1 - id_max = -id_min - ip_min = id_min - ip_max = id_max - id_num = 0 - ip_num = 0 - - do n = 1, num_tracers - tracer_types(n) = 0 - found = query_method('tracer_usage',MODEL_ATMOS,n,tracer_usage,control) - if (found) then - if (trim(tracer_usage) == 'chemistry') then - !--- set default to prognostic - tracer_type = 'prognostic' - num_types = parse(control, 'type', tracer_type) - select case (trim(tracer_type)) - case ('diagnostic') - tracer_types(n) = 2 - id_num = id_num + 1 - id_max = n - if (id_num == 1) id_min = n - case ('prognostic') - tracer_types(n) = 1 - ip_num = ip_num + 1 - ip_max = n - if (ip_num == 1) ip_min = n - end select - end if - end if - end do - - if (ip_num > 0) then - !--- check if prognostic tracers are contiguous - if (ip_num > ip_max - ip_min + 1) & - call mpp_error(FATAL, 'prognostic chemistry tracers must be contiguous') - end if - - if (id_num > 0) then - !--- check if diagnostic tracers are contiguous - if (id_num > id_max - id_min + 1) & - call mpp_error(FATAL, 'diagnostic chemistry tracers must be contiguous') - end if - - !--- prognostic tracers must precede diagnostic ones - if (ip_max > id_min) & - call mpp_error(FATAL, 'diagnostic chemistry tracers must follow prognostic ones') - -end subroutine get_atmos_tracer_types -! !####################################################################### ! diff --git a/fv3/module_fcst_grid_comp.F90 b/fv3/module_fcst_grid_comp.F90 index 32f3c3923a..411c65bc22 100644 --- a/fv3/module_fcst_grid_comp.F90 +++ b/fv3/module_fcst_grid_comp.F90 @@ -575,7 +575,7 @@ subroutine fcst_initialize(fcst_comp, importState, exportState, clock, rc) character(8) :: bundle_grid - real(kind=8) :: mpi_wtime, timeis + real(kind=8) :: timeis type(ESMF_DELayout) :: delayout type(ESMF_DistGrid) :: distgrid @@ -765,6 +765,9 @@ subroutine fcst_initialize(fcst_comp, importState, exportState, clock, rc) Time_restart, num_restart_fh, & restart_fh) + ! Set IAU offset time + Atmos%iau_offset = iau_offset + !------ initialize component models ------ call atmos_model_init (Atmos, Time_init, Time, Time_step) @@ -1320,7 +1323,7 @@ subroutine fcst_run_phase_1(fcst_comp, importState, exportState,clock,rc) integer,save :: dt_cap=0 type(ESMF_Time) :: currTime,stopTime integer :: seconds - real(kind=8) :: mpi_wtime, tbeg1 + real(kind=8) :: tbeg1 ! !----------------------------------------------------------------------- !*********************************************************************** @@ -1387,7 +1390,7 @@ subroutine fcst_run_phase_2(fcst_comp, importState, exportState,clock,rc) integer :: date(6), seconds character(len=64) :: timestamp integer :: unit - real(kind=8) :: mpi_wtime, tbeg1 + real(kind=8) :: tbeg1 ! integer :: FBCount, i logical :: isPresent @@ -1495,7 +1498,7 @@ subroutine fcst_finalize(fcst_comp, importState, exportState,clock,rc) ! integer :: unit integer,dimension(6) :: date - real(kind=8) :: mpi_wtime, tbeg1 + real(kind=8) :: tbeg1 ! !----------------------------------------------------------------------- !*********************************************************************** diff --git a/fv3/io/fv3atm_clm_lake_io.F90 b/io/fv3atm_clm_lake_io.F90 similarity index 100% rename from fv3/io/fv3atm_clm_lake_io.F90 rename to io/fv3atm_clm_lake_io.F90 diff --git a/fv3/io/fv3atm_common_io.F90 b/io/fv3atm_common_io.F90 similarity index 100% rename from fv3/io/fv3atm_common_io.F90 rename to io/fv3atm_common_io.F90 diff --git a/fv3/io/fv3atm_history_io.F90 b/io/fv3atm_history_io.F90 similarity index 100% rename from fv3/io/fv3atm_history_io.F90 rename to io/fv3atm_history_io.F90 diff --git a/fv3/io/fv3atm_oro_io.F90 b/io/fv3atm_oro_io.F90 similarity index 100% rename from fv3/io/fv3atm_oro_io.F90 rename to io/fv3atm_oro_io.F90 diff --git a/fv3/io/fv3atm_restart_io.F90 b/io/fv3atm_restart_io.F90 similarity index 99% rename from fv3/io/fv3atm_restart_io.F90 rename to io/fv3atm_restart_io.F90 index 2b010c938e..7f5613044a 100644 --- a/fv3/io/fv3atm_restart_io.F90 +++ b/io/fv3atm_restart_io.F90 @@ -703,6 +703,7 @@ subroutine sfc_prop_restart_read (Sfcprop, Atm_block, Model, fv_domain, warm_sta if (global_att_exists(Sfc_restart, "file_version")) then call get_global_attribute(Sfc_restart, "file_version", file_ver) + Model%sfc_file_version = file_ver if (file_ver == "V2") then sfc%is_v2_file=.true. endif @@ -1208,6 +1209,14 @@ subroutine fv_sfc_restart_bundle_setup(bundle, grid, Model, rc) call rrfs_sd_quilt%bundle_fields(bundle, grid, Model, outputfile) endif + if (trim(Model%sfc_file_version) /= "V1") then + call ESMF_AttributeAdd(bundle, convention="NetCDF", purpose="FV3", attrList=(/"file_version"/), rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + call ESMF_AttributeSet(bundle, convention="NetCDF", purpose="FV3", name="file_version", value=trim(Model%sfc_file_version), rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + end if + end subroutine fv_sfc_restart_bundle_setup !%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% diff --git a/fv3/io/fv3atm_rrfs_sd_io.F90 b/io/fv3atm_rrfs_sd_io.F90 similarity index 100% rename from fv3/io/fv3atm_rrfs_sd_io.F90 rename to io/fv3atm_rrfs_sd_io.F90 diff --git a/fv3/io/fv3atm_sfc_io.F90 b/io/fv3atm_sfc_io.F90 similarity index 99% rename from fv3/io/fv3atm_sfc_io.F90 rename to io/fv3atm_sfc_io.F90 index 0d1e1193a1..00431e1d03 100644 --- a/fv3/io/fv3atm_sfc_io.F90 +++ b/io/fv3atm_sfc_io.F90 @@ -10,7 +10,7 @@ module fv3atm_sfc_io register_axis, register_restart_field, & register_variable_attribute, register_field, & get_global_io_domain_indices, variable_exists, & - get_dimension_size + get_dimension_size, register_global_attribute use fv3atm_common_io, only: GFS_Data_transfer, axis_type, & create_2d_field_and_add_to_bundle, create_3d_field_and_add_to_bundle use GFS_typedefs, only: GFS_sfcprop_type, GFS_control_type, kind_phys @@ -367,6 +367,10 @@ subroutine Sfc_io_write_axes(sfc, Model, Sfc_restart) call register_field(Sfc_restart, 'Time', axis_type, (/'Time'/)) call register_variable_attribute(Sfc_restart, 'Time', 'cartesian_axis', 'T', str_len=1) call write_data( Sfc_restart, 'Time', 1) + + if (trim(Model%sfc_file_version) /= "V1") then + call register_global_attribute(Sfc_restart, "file_version", trim(Model%sfc_file_version), len(trim(Model%sfc_file_version))) + end if end subroutine Sfc_io_write_axes !>@ Fills the name3d array with all surface 3D field names. diff --git a/fv3/io/module_fv3_io_def.F90 b/io/module_fv3_io_def.F90 similarity index 100% rename from fv3/io/module_fv3_io_def.F90 rename to io/module_fv3_io_def.F90 diff --git a/fv3/io/module_write_internal_state.F90 b/io/module_write_internal_state.F90 similarity index 100% rename from fv3/io/module_write_internal_state.F90 rename to io/module_write_internal_state.F90 diff --git a/fv3/io/module_write_netcdf.F90 b/io/module_write_netcdf.F90 similarity index 100% rename from fv3/io/module_write_netcdf.F90 rename to io/module_write_netcdf.F90 diff --git a/fv3/io/module_write_restart_netcdf.F90 b/io/module_write_restart_netcdf.F90 similarity index 89% rename from fv3/io/module_write_restart_netcdf.F90 rename to io/module_write_restart_netcdf.F90 index 3e5d2dd550..38d852e6f0 100644 --- a/fv3/io/module_write_restart_netcdf.F90 +++ b/io/module_write_restart_netcdf.F90 @@ -431,6 +431,8 @@ subroutine write_restart_netcdf(wrtfb, filename, & ncerr = nf90_put_att(ncid, NF90_GLOBAL, "NumFilesInSet", 1); NC_ERR_STOP(ncerr) + call get_global_attr(wrtfb, ncid, mype, rc) + ! end of define mode ncerr = nf90_enddef(ncid); NC_ERR_STOP(ncerr) @@ -664,5 +666,83 @@ end subroutine write_out_ungridded_dim_atts_from_field end subroutine write_restart_netcdf + !> Get global attribute. + !> + !> @param[in] fldbundle ESMF field bundle. + !> @param[in] ncid NetCDF file ID. + !> @param[in] mype MPI rank. + !> @param[out] rc Return code - 0 for success, ESMF error code otherwise. + !> + !> @author Dusan Jovic @date Nov 1, 2017 + subroutine get_global_attr(fldbundle, ncid, mype, rc) + type(ESMF_FieldBundle), intent(in) :: fldbundle + integer, intent(in) :: ncid + integer, intent(in) :: mype + integer, intent(out) :: rc + +! local variable + integer :: i, attCount + integer :: ncerr + character(len=ESMF_MAXSTR) :: attName + type(ESMF_TypeKind_Flag) :: typekind + + integer(ESMF_KIND_I4) :: varival_i4 + integer(ESMF_KIND_I8) :: varival_i8 + real(ESMF_KIND_R4), dimension(:), allocatable :: varr4list + real(ESMF_KIND_R8), dimension(:), allocatable :: varr8list + integer :: itemCount + character(len=ESMF_MAXSTR) :: varcval +! + call ESMF_AttributeGet(fldbundle, convention="NetCDF", purpose="FV3", & + attnestflag=ESMF_ATTNEST_OFF, count=attCount, & + rc=rc); ESMF_ERR_RETURN(rc) + + do i=1,attCount + + call ESMF_AttributeGet(fldbundle, convention="NetCDF", purpose="FV3", & + attnestflag=ESMF_ATTNEST_OFF, attributeIndex=i, name=attName, & + typekind=typekind, itemCount=itemCount, rc=rc); ESMF_ERR_RETURN(rc) + + if(trim(attName) == 'grid_id') cycle ! Skip grid_id + + if (typekind == ESMF_TYPEKIND_I4) then + call ESMF_AttributeGet(fldbundle, convention="NetCDF", purpose="FV3", & + name=trim(attname), value=varival_i4, rc=rc); ESMF_ERR_RETURN(rc) + ncerr = nf90_put_att(ncid, nf90_global, trim(attname), varival_i4); NC_ERR_STOP(ncerr) + + else if (typekind == ESMF_TYPEKIND_I8) then + call ESMF_AttributeGet(fldbundle, convention="NetCDF", purpose="FV3", & + name=trim(attname), value=varival_i8, rc=rc); ESMF_ERR_RETURN(rc) + ncerr = nf90_put_att(ncid, nf90_global, trim(attname), varival_i8); NC_ERR_STOP(ncerr) + + else if (typekind == ESMF_TYPEKIND_R4) then + allocate (varr4list(itemCount)) + call ESMF_AttributeGet(fldbundle, convention="NetCDF", purpose="FV3", & + name=trim(attName), valueList=varr4list, rc=rc); ESMF_ERR_RETURN(rc) + ncerr = nf90_put_att(ncid, NF90_GLOBAL, trim(attName), varr4list); NC_ERR_STOP(ncerr) + deallocate(varr4list) + + else if (typekind == ESMF_TYPEKIND_R8) then + allocate (varr8list(itemCount)) + call ESMF_AttributeGet(fldbundle, convention="NetCDF", purpose="FV3", & + name=trim(attName), valueList=varr8list, rc=rc); ESMF_ERR_RETURN(rc) + ncerr = nf90_put_att(ncid, NF90_GLOBAL, trim(attName), varr8list); NC_ERR_STOP(ncerr) + deallocate(varr8list) + + else if (typekind == ESMF_TYPEKIND_CHARACTER) then + call ESMF_AttributeGet(fldbundle, convention="NetCDF", purpose="FV3", & + name=trim(attName), value=varcval, rc=rc); ESMF_ERR_RETURN(rc) + ncerr = nf90_put_att(ncid, NF90_GLOBAL, trim(attName), trim(varcval)); NC_ERR_STOP(ncerr) + + else + + if (mype == 0) write(0,*)'Unsupported typekind ', typekind + call ESMF_Finalize(endflag=ESMF_END_ABORT) + end if + + end do + + end subroutine get_global_attr + !---------------------------------------------------------------------------------------- end module module_write_restart_netcdf diff --git a/fv3/io/module_wrt_grid_comp.F90 b/io/module_wrt_grid_comp.F90 similarity index 54% rename from fv3/io/module_wrt_grid_comp.F90 rename to io/module_wrt_grid_comp.F90 index eaf7fb529c..42ba407d21 100644 --- a/fv3/io/module_wrt_grid_comp.F90 +++ b/io/module_wrt_grid_comp.F90 @@ -1,6 +1,6 @@ !----------------------------------------------------------------------- ! - module module_wrt_grid_comp + module module_wrt_grid_comp ! !----------------------------------------------------------------------- !*** This module includes the functionality of write gridded component. @@ -26,239 +26,241 @@ module module_wrt_grid_comp ! !--------------------------------------------------------------------------------- ! - use mpi_f08 - use esmf - use netcdf - - use fms, only : fms_init, fms_end, fms_mpp_uppercase, fms_mpp_error, FATAL - use fms, only : NO_CALENDAR, JULIAN, GREGORIAN, THIRTY_DAY_MONTHS, NOLEAP - - use write_internal_state - use module_fv3_io_def, only : num_pes_fcst, & - n_group, num_files, & - fv3atm_output_dir, & - filename_base, output_grid, output_file, & - imo,jmo,ichunk2d,jchunk2d, & - ichunk3d,jchunk3d,kchunk3d, & - quantize_mode,quantize_nsd, & - cen_lon, cen_lat, & - lon1, lat1, lon2, lat2, dlon, dlat, & - stdlat1, stdlat2, dx, dy, iau_offset, & - ideflate, zstandard_level, lflname_fulltime - use module_write_netcdf, only : write_netcdf - use module_write_restart_netcdf, only : write_restart_netcdf - use physcons, only : pi => con_pi + use mpi_f08 + use esmf + use netcdf + + use fms, only : fms_init, fms_end, fms_mpp_uppercase, fms_mpp_error, FATAL + use fms, only : NO_CALENDAR, JULIAN, GREGORIAN, THIRTY_DAY_MONTHS, NOLEAP + + use write_internal_state + use module_fv3_io_def, only : num_pes_fcst, & + n_group, num_files, & + fv3atm_output_dir, & + filename_base, output_grid, output_file, & + imo,jmo,ichunk2d,jchunk2d, & + ichunk3d,jchunk3d,kchunk3d, & + quantize_mode,quantize_nsd, & + cen_lon, cen_lat, & + lon1, lat1, lon2, lat2, dlon, dlat, & + stdlat1, stdlat2, dx, dy, iau_offset, & + ideflate, zstandard_level, lflname_fulltime + use module_write_netcdf, only : write_netcdf + use module_write_restart_netcdf, only : write_restart_netcdf + use physcons, only : pi => con_pi #ifdef INLINE_POST - use post_fv3, only : post_run_fv3 + use post_fv3, only : post_run_fv3 #endif ! !----------------------------------------------------------------------- ! - implicit none + implicit none ! !----------------------------------------------------------------------- - private - public get_outfile, lambert, rtll + private + public get_outfile, lambert, rtll + public generate_dst_field_mask, add_dst_mask ! !----------------------------------------------------------------------- ! ! - integer,save :: lead_write_task !<-- Rank of the first write task in the write group - integer,save :: last_write_task !<-- Rank of the last write task in the write group - integer,save :: ntasks !<-- # of write tasks in the current group - integer,save :: itasks, jtasks !<-- # of write tasks in i/j direction in the current group - integer,save :: ngrids + integer,save :: lead_write_task !<-- Rank of the first write task in the write group + integer,save :: last_write_task !<-- Rank of the last write task in the write group + integer,save :: ntasks !<-- # of write tasks in the current group + integer,save :: itasks, jtasks !<-- # of write tasks in i/j direction in the current group + integer,save :: ngrids - type(MPI_Comm),save :: wrt_mpi_comm !<-- the mpi communicator in the write comp - integer,save :: idate(7), start_time(7) - logical,save :: write_nsflip - logical,save :: change_wrtidate=.false. - integer,save :: frestart(999) = -1 - integer,save :: calendar_type = 3 - logical :: lprnt + type(MPI_Comm),save :: wrt_mpi_comm !<-- the mpi communicator in the write comp + integer,save :: idate(7), start_time(7) + logical,save :: write_nsflip + logical,save :: change_wrtidate=.false. + integer,save :: frestart(999) = -1 + integer,save :: calendar_type = 3 + logical :: lprnt ! !----------------------------------------------------------------------- ! - type(ESMF_FieldBundle) :: gridFB - integer :: FBCount - character(len=esmf_maxstr),allocatable :: fcstItemNameList(:) - character(128) :: FBlist_outfilename(100) - logical :: top_parent_is_global + type(ESMF_FieldBundle) :: gridFB + integer :: FBCount + character(len=esmf_maxstr),allocatable :: fcstItemNameList(:) + character(128) :: FBlist_outfilename(100) + logical :: top_parent_is_global ! !----------------------------------------------------------------------- - REAL(KIND=8) :: btim,btim0 - REAL(KIND=8),PUBLIC,SAVE :: write_init_tim, write_run_tim - REAL(KIND=8), parameter :: radi=180.0d0/pi + REAL(KIND=8) :: btim,btim0 + REAL(KIND=8),PUBLIC,SAVE :: write_init_tim, write_run_tim + REAL(KIND=8), parameter :: radi=180.0d0/pi !----------------------------------------------------------------------- ! - public SetServices + integer, parameter, public :: dstOutsideMaskValue = 100 + public SetServices ! - interface splat - module procedure splat4 - module procedure splat8 - end interface splat + interface splat + module procedure splat4 + module procedure splat8 + end interface splat ! - type optimizeT - type(ESMF_State) :: state - type(ESMF_GridComp), allocatable :: comps(:) - end type + type optimizeT + type(ESMF_State) :: state + type(ESMF_GridComp), allocatable :: comps(:) + end type - contains + contains ! !----------------------------------------------------------------------- !####################################################################### !----------------------------------------------------------------------- ! - subroutine SetServices(wrt_comp, rc) - type(ESMF_GridComp) :: wrt_comp - integer, intent(out) :: rc + subroutine SetServices(wrt_comp, rc) + type(ESMF_GridComp) :: wrt_comp + integer, intent(out) :: rc - rc = ESMF_SUCCESS + rc = ESMF_SUCCESS - call ESMF_GridCompSetEntryPoint(wrt_comp, ESMF_METHOD_INITIALIZE, phase=1, & - userRoutine=wrt_initialize_p1, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_GridCompSetEntryPoint(wrt_comp, ESMF_METHOD_INITIALIZE, phase=1, & + userRoutine=wrt_initialize_p1, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_GridCompSetEntryPoint(wrt_comp, ESMF_METHOD_INITIALIZE, phase=2, & - userRoutine=wrt_initialize_p2, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_GridCompSetEntryPoint(wrt_comp, ESMF_METHOD_INITIALIZE, phase=2, & + userRoutine=wrt_initialize_p2, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_GridCompSetEntryPoint(wrt_comp, ESMF_METHOD_INITIALIZE, phase=3, & - userRoutine=wrt_initialize_p3, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_GridCompSetEntryPoint(wrt_comp, ESMF_METHOD_INITIALIZE, phase=3, & + userRoutine=wrt_initialize_p3, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_GridCompSetEntryPoint(wrt_comp, ESMF_METHOD_RUN, & - userRoutine=wrt_run, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_GridCompSetEntryPoint(wrt_comp, ESMF_METHOD_RUN, & + userRoutine=wrt_run, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_GridCompSetEntryPoint(wrt_comp, ESMF_METHOD_FINALIZE, & - userRoutine=wrt_finalize, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_GridCompSetEntryPoint(wrt_comp, ESMF_METHOD_FINALIZE, & + userRoutine=wrt_finalize, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - end subroutine SetServices + end subroutine SetServices ! !----------------------------------------------------------------------- !####################################################################### !----------------------------------------------------------------------- ! - subroutine wrt_initialize_p1(wrt_comp, imp_state_write, exp_state_write, clock, rc) + subroutine wrt_initialize_p1(wrt_comp, imp_state_write, exp_state_write, clock, rc) ! !----------------------------------------------------------------------- !*** INITIALIZE THE WRITE GRIDDED COMPONENT. !----------------------------------------------------------------------- ! - type(esmf_GridComp) :: wrt_comp - type(ESMF_State) :: imp_state_write, exp_state_write - type(esmf_Clock) :: clock - integer,intent(out) :: rc + type(esmf_GridComp) :: wrt_comp + type(ESMF_State) :: imp_state_write, exp_state_write + type(esmf_Clock) :: clock + integer,intent(out) :: rc ! !*** LOCAL VARIABLES ! - TYPE(ESMF_VM) :: VM - type(write_wrap) :: WRAP - type(wrt_internal_state),pointer :: wrt_int_state - - integer :: tl, i, j, n, k - integer,dimension(2,6) :: decomptile - integer,dimension(2) :: regDecomp !define delayout for the nest grid - integer :: fieldCount - type(MPI_Comm) :: vm_mpi_comm - character(40) :: fieldName - type(ESMF_Config) :: cf, cf_output_grid - type(ESMF_Info) :: info - type(ESMF_DELayout) :: delayout - type(ESMF_Grid) :: fcstGrid - type(ESMF_Grid), allocatable :: wrtGrid(:) - type(ESMF_Grid) :: wrtGrid_cubed_sphere - logical :: create_wrtGrid_cubed_sphere = .true. - type(ESMF_Grid) :: actualWrtGrid - type(ESMF_Array) :: array - type(ESMF_Field) :: field_work, field - type(ESMF_Decomp_Flag) :: decompflagPTile(2,6) - - type(ESMF_StateItem_Flag), allocatable :: fcstItemTypeList(:) - type(ESMF_FieldBundle) :: fcstFB, fieldbundle, mirrorFB - type(ESMF_Field), allocatable :: fcstField(:) - type(ESMF_TypeKind_Flag) :: typekind - character(len=80), allocatable :: fieldnamelist(:) - integer :: fieldDimCount, gridDimCount, tk, sloc - integer, allocatable :: petMap(:) - integer, allocatable :: gridToFieldMap(:) - integer, allocatable :: ungriddedLBound(:) - integer, allocatable :: ungriddedUBound(:) - type(ESMF_StaggerLoc) :: staggerloc - character(len=80) :: attName - character(len=80), allocatable :: attNameList(:),attNameList2(:) - type(ESMF_TypeKind_Flag), allocatable :: typekindList(:) - character(len=80) :: valueS - integer :: valueI4 - real(ESMF_KIND_R4) :: valueR4 - real(ESMF_KIND_R8) :: valueR8 - logical, allocatable :: is_moving(:) - logical :: isPresent - integer :: minIndex(2), maxIndex(2) - - integer :: attCount, jidx, idx, noutfile - character(19) :: newdate - character(128) :: outfile_name - character(128),dimension(:,:), allocatable :: outfilename - real(8), dimension(:), allocatable :: slat - real(8), dimension(:), allocatable :: lat, lon - real(ESMF_KIND_R8), dimension(:,:), pointer :: lonPtr, latPtr - real(ESMF_KIND_R8) :: rot_lon, rot_lat - real(ESMF_KIND_R8) :: geo_lon, geo_lat - real(ESMF_KIND_R8) :: lon1_r8, lat1_r8 - real(ESMF_KIND_R8) :: x1, y1, x, y, delat, delon - type(ESMF_TimeInterval) :: IAU_offsetTI - - character(256) :: cf_open, cf_close - character(256) :: gridfile - integer :: num_output_file - - type(ESMF_DistGrid) :: acceptorDG, newAcceptorDG - integer :: grid_id - - logical :: history_file_on_native_grid -! - character(ESMF_MAXSTR) :: fb_name1, fb_name2 + TYPE(ESMF_VM) :: VM + type(write_wrap) :: WRAP + type(wrt_internal_state),pointer :: wrt_int_state + + integer :: tl, i, j, n, k + integer,dimension(2,6) :: decomptile + integer,dimension(2) :: regDecomp !define delayout for the nest grid + integer :: fieldCount + type(MPI_Comm) :: vm_mpi_comm + character(40) :: fieldName + type(ESMF_Config) :: cf, cf_output_grid + type(ESMF_Info) :: info + type(ESMF_DELayout) :: delayout + type(ESMF_Grid) :: fcstGrid + type(ESMF_Grid), allocatable :: wrtGrid(:) + type(ESMF_Grid) :: wrtGrid_cubed_sphere + logical :: create_wrtGrid_cubed_sphere = .true. + type(ESMF_Grid) :: actualWrtGrid + type(ESMF_Array) :: array + type(ESMF_Field) :: field_work, field + type(ESMF_Decomp_Flag) :: decompflagPTile(2,6) + + type(ESMF_StateItem_Flag), allocatable :: fcstItemTypeList(:) + type(ESMF_FieldBundle) :: fcstFB, fieldbundle, mirrorFB + type(ESMF_Field), allocatable :: fcstField(:) + type(ESMF_TypeKind_Flag) :: typekind + character(len=80), allocatable :: fieldnamelist(:) + integer :: fieldDimCount, gridDimCount, tk, sloc + integer, allocatable :: petMap(:) + integer, allocatable :: gridToFieldMap(:) + integer, allocatable :: ungriddedLBound(:) + integer, allocatable :: ungriddedUBound(:) + type(ESMF_StaggerLoc) :: staggerloc + character(len=80) :: attName + character(len=80), allocatable :: attNameList(:),attNameList2(:) + type(ESMF_TypeKind_Flag), allocatable :: typekindList(:) + character(len=80) :: valueS + integer :: valueI4 + real(ESMF_KIND_R4) :: valueR4 + real(ESMF_KIND_R8) :: valueR8 + logical, allocatable :: is_moving(:) + logical :: isPresent + integer :: minIndex(2), maxIndex(2) + + integer :: attCount, jidx, idx, noutfile + character(19) :: newdate + character(128) :: outfile_name + character(128),dimension(:,:), allocatable :: outfilename + real(8), dimension(:), allocatable :: slat + real(8), dimension(:), allocatable :: lat, lon + real(ESMF_KIND_R8), dimension(:,:), pointer :: lonPtr, latPtr + real(ESMF_KIND_R8) :: rot_lon, rot_lat + real(ESMF_KIND_R8) :: geo_lon, geo_lat + real(ESMF_KIND_R8) :: lon1_r8, lat1_r8 + real(ESMF_KIND_R8) :: x1, y1, x, y, delat, delon + type(ESMF_TimeInterval) :: IAU_offsetTI + + character(256) :: cf_open, cf_close + character(256) :: gridfile + integer :: num_output_file + + type(ESMF_DistGrid) :: acceptorDG, newAcceptorDG + integer :: grid_id + + logical :: history_file_on_native_grid +! + character(ESMF_MAXSTR) :: fb_name1, fb_name2 !----------------------------------------------------------------------- !*********************************************************************** !----------------------------------------------------------------------- ! - rc = ESMF_SUCCESS + rc = ESMF_SUCCESS ! !----------------------------------------------------------------------- !*** initialize the write component timers. !----------------------------------------------------------------------- ! - write_init_tim = 0. - write_run_tim = 0. - btim0 = MPI_Wtime() + write_init_tim = 0. + write_run_tim = 0. + btim0 = MPI_Wtime() ! !----------------------------------------------------------------------- !*** set the write component's internal state. !----------------------------------------------------------------------- ! - allocate(wrt_int_state,stat=RC) - wrap%write_int_state => wrt_int_state - call ESMF_GridCompSetInternalState(wrt_comp, wrap, rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + allocate(wrt_int_state,stat=RC) + wrap%write_int_state => wrt_int_state + call ESMF_GridCompSetInternalState(wrt_comp, wrap, rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return ! - call ESMF_VMGetCurrent(vm=VM,rc=RC) - call ESMF_VMGet(vm=VM, localPet=wrt_int_state%mype, & - petCount=wrt_int_state%petcount,mpiCommunicator=vm_mpi_comm%mpi_val,rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_VMGetCurrent(vm=VM,rc=RC) + call ESMF_VMGet(vm=VM, localPet=wrt_int_state%mype, & + petCount=wrt_int_state%petcount,mpiCommunicator=vm_mpi_comm%mpi_val,rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call mpi_comm_dup(vm_mpi_comm, wrt_mpi_comm, rc) + call mpi_comm_dup(vm_mpi_comm, wrt_mpi_comm, rc) - ntasks = wrt_int_state%petcount - jidx = wrt_int_state%petcount/6 - lead_write_task = 0 - last_write_task = ntasks -1 - lprnt = lead_write_task == wrt_int_state%mype + ntasks = wrt_int_state%petcount + jidx = wrt_int_state%petcount/6 + lead_write_task = 0 + last_write_task = ntasks -1 + lprnt = lead_write_task == wrt_int_state%mype - call fms_init(wrt_mpi_comm%mpi_val) + call fms_init(wrt_mpi_comm%mpi_val) ! print *,'in wrt, lead_write_task=', & ! lead_write_task,'last_write_task=',last_write_task, & @@ -269,1308 +271,1315 @@ subroutine wrt_initialize_p1(wrt_comp, imp_state_write, exp_state_write, clock, !*** get configuration variables !----------------------------------------------------------------------- ! - call ESMF_GridCompGet(gridcomp=wrt_comp,config=CF,rc=RC) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, file=__FILE__)) return ! bail out + call ESMF_GridCompGet(gridcomp=wrt_comp,config=CF,rc=RC) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & + line=__LINE__, file=__FILE__)) return ! bail out ! variables for post - call ESMF_ConfigGetAttribute(config=CF,value=wrt_int_state%output_history,default=.true., & - label='output_history:',rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, file=__FILE__)) return - call ESMF_ConfigGetAttribute(config=CF,value=wrt_int_state%write_dopost,default=.false., & - label='write_dopost:',rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, file=__FILE__)) return - - call ESMF_ConfigGetAttribute(config=CF,value=write_nsflip,default=.false., & - label='write_nsflip:',rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, file=__FILE__)) return - - call ESMF_ConfigGetAttribute(config=CF,value=fv3atm_output_dir, & - label ='fv3atm_output_dir:', default='./', rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_ConfigGetAttribute(config=CF,value=wrt_int_state%output_history,default=.true., & + label='output_history:',rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & + line=__LINE__, file=__FILE__)) return + call ESMF_ConfigGetAttribute(config=CF,value=wrt_int_state%write_dopost,default=.false., & + label='write_dopost:',rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & + line=__LINE__, file=__FILE__)) return - ! Make sure fv3atm_output_dir ends with '/' - if (fv3atm_output_dir(len(trim(fv3atm_output_dir)):len(trim(fv3atm_output_dir))) /= '/') then - fv3atm_output_dir = trim(fv3atm_output_dir) // '/' - end if + call ESMF_ConfigGetAttribute(config=CF,value=write_nsflip,default=.false., & + label='write_nsflip:',rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & + line=__LINE__, file=__FILE__)) return + + call ESMF_ConfigGetAttribute(config=CF,value=fv3atm_output_dir, & + label ='fv3atm_output_dir:', default='./', rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + ! Make sure fv3atm_output_dir ends with '/' + if (fv3atm_output_dir(len(trim(fv3atm_output_dir)):len(trim(fv3atm_output_dir))) /= '/') then + fv3atm_output_dir = trim(fv3atm_output_dir) // '/' + end if - if( wrt_int_state%write_dopost ) then + if( wrt_int_state%write_dopost ) then #ifdef INLINE_POST - call ESMF_ConfigGetAttribute(config=CF,value=wrt_int_state%post_namelist,default='itag', & - label ='post_namelist:',rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, file=__FILE__)) return + call ESMF_ConfigGetAttribute(config=CF,value=wrt_int_state%post_namelist,default='itag', & + label ='post_namelist:',rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & + line=__LINE__, file=__FILE__)) return #else - rc = ESMF_RC_NOT_IMPL - print *,'inline post not available on this machine' - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, file=__FILE__)) return + rc = ESMF_RC_NOT_IMPL + print *,'inline post not available on this machine' + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & + line=__LINE__, file=__FILE__)) return #endif - endif - - allocate(output_file(num_files)) - num_output_file = ESMF_ConfigGetLen(config=CF, label ='output_file:',rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - if (num_files == num_output_file) then - call ESMF_ConfigGetAttribute(CF,valueList=output_file,label='output_file:', & - count=num_files, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - do i = 1, num_files - if(output_file(i) /= "netcdf" .and. output_file(i) /= "netcdf_parallel") then - write(0,*)"Only netcdf and netcdf_parallel are allowed for multiple values of output_file" - call ESMF_Finalize(endflag=ESMF_END_ABORT) - endif - enddo - else if ( num_output_file == 1) then - call ESMF_ConfigGetAttribute(CF,valuelist=output_file,label='output_file:', count=1, rc=rc) - output_file(1:num_files) = output_file(1) - else - output_file(1:num_files) = 'netcdf' - endif - if(lprnt) then - print *,'num_files=',num_files - do i=1,num_files - print *,'num_file=',i,'filename_base= ',trim(filename_base(i)),' output_file= ',trim(output_file(i)) - enddo - endif + endif - call ESMF_InfoGetFromHost(imp_state_write, info=info, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_InfoGetAlloc(info, key="is_moving", values=is_moving, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + allocate(output_file(num_files)) + num_output_file = ESMF_ConfigGetLen(config=CF, label ='output_file:',rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (num_files == num_output_file) then + call ESMF_ConfigGetAttribute(CF,valueList=output_file,label='output_file:', & + count=num_files, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + do i = 1, num_files + if(output_file(i) /= "netcdf" .and. output_file(i) /= "netcdf_parallel") then + write(0,*)"Only netcdf and netcdf_parallel are allowed for multiple values of output_file" + call ESMF_Finalize(endflag=ESMF_END_ABORT) + endif + enddo + else if ( num_output_file == 1) then + call ESMF_ConfigGetAttribute(CF,valuelist=output_file,label='output_file:', count=1, rc=rc) + output_file(1:num_files) = output_file(1) + else + output_file(1:num_files) = 'netcdf' + endif + if(lprnt) then + print *,'num_files=',num_files + do i=1,num_files + print *,'num_file=',i,'filename_base= ',trim(filename_base(i)),' output_file= ',trim(output_file(i)) + enddo + endif - call ESMF_AttributeGet(imp_state_write, convention="NetCDF", purpose="FV3", & - name="ngrids", value=ngrids, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_InfoGetFromHost(imp_state_write, info=info, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_InfoGetAlloc(info, key="is_moving", values=is_moving, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_AttributeGet(imp_state_write, convention="NetCDF", purpose="FV3", & - name="top_parent_is_global", value=top_parent_is_global, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_AttributeGet(imp_state_write, convention="NetCDF", purpose="FV3", & + name="ngrids", value=ngrids, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - allocate(wrtGrid(ngrids)) + call ESMF_AttributeGet(imp_state_write, convention="NetCDF", purpose="FV3", & + name="top_parent_is_global", value=top_parent_is_global, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - allocate(output_grid(ngrids)) + allocate(wrtGrid(ngrids)) - allocate(imo(ngrids)) - allocate(jmo(ngrids)) + allocate(output_grid(ngrids)) - allocate(cen_lon(ngrids)) - allocate(cen_lat(ngrids)) - allocate(lon1(ngrids)) - allocate(lat1(ngrids)) - allocate(lon2(ngrids)) - allocate(lat2(ngrids)) - allocate(dlon(ngrids)) - allocate(dlat(ngrids)) + allocate(imo(ngrids)) + allocate(jmo(ngrids)) - allocate(stdlat1(ngrids)) - allocate(stdlat2(ngrids)) - allocate(dx(ngrids)) - allocate(dy(ngrids)) + allocate(cen_lon(ngrids)) + allocate(cen_lat(ngrids)) + allocate(lon1(ngrids)) + allocate(lat1(ngrids)) + allocate(lon2(ngrids)) + allocate(lat2(ngrids)) + allocate(dlon(ngrids)) + allocate(dlat(ngrids)) - allocate(ichunk2d(ngrids)) - allocate(jchunk2d(ngrids)) - allocate(ichunk3d(ngrids)) - allocate(jchunk3d(ngrids)) - allocate(kchunk3d(ngrids)) - allocate(ideflate(ngrids)) - allocate(quantize_mode(ngrids)) - allocate(quantize_nsd(ngrids)) - allocate(zstandard_level(ngrids)) + allocate(stdlat1(ngrids)) + allocate(stdlat2(ngrids)) + allocate(dx(ngrids)) + allocate(dy(ngrids)) - allocate(wrt_int_state%out_grid_info(ngrids)) + allocate(ichunk2d(ngrids)) + allocate(jchunk2d(ngrids)) + allocate(ichunk3d(ngrids)) + allocate(jchunk3d(ngrids)) + allocate(kchunk3d(ngrids)) + allocate(ideflate(ngrids)) + allocate(quantize_mode(ngrids)) + allocate(quantize_nsd(ngrids)) + allocate(zstandard_level(ngrids)) - do n=1, ngrids + allocate(wrt_int_state%out_grid_info(ngrids)) - if (n == 1) then - ! for top level domain look directly in cf - cf_output_grid = cf - else - ! for nest domains, look under specific section - write(cf_open,'("")') n - write(cf_close,'("")') n - cf_output_grid = ESMF_ConfigCreate(cf, openLabel=trim(cf_open), closeLabel=trim(cf_close), rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - end if + do n=1, ngrids - call ESMF_ConfigGetAttribute(config=cf_output_grid, value=output_grid(n), label ='output_grid:',rc=rc) - if (lprnt) then - print *,'grid_id= ', n, ' output_grid= ', trim(output_grid(n)) - end if + if (n == 1) then + ! for top level domain look directly in cf + cf_output_grid = cf + else + ! for nest domains, look under specific section + write(cf_open,'("")') n + write(cf_close,'("")') n + cf_output_grid = ESMF_ConfigCreate(cf, openLabel=trim(cf_open), closeLabel=trim(cf_close), rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + end if + + call ESMF_ConfigGetAttribute(config=cf_output_grid, value=output_grid(n), label ='output_grid:',rc=rc) + if (lprnt) then + print *,'grid_id= ', n, ' output_grid= ', trim(output_grid(n)) + end if + + if (trim(output_grid(n)) == 'cubed_sphere_grid' .and. wrt_int_state%write_dopost) then + write(0,*) 'wrt_initialize_p1: Inline post is not supported with cubed_sphere_grid outputs' + call ESMF_LogWrite("wrt_initialize_p1: Inline post is not supported with cubed_sphere_grid output",ESMF_LOGMSG_ERROR,rc=RC) + call ESMF_Finalize(endflag=ESMF_END_ABORT) + end if + + call ESMF_ConfigGetAttribute(config=CF, value=itasks,default=1,label ='itasks:',rc=rc) + jtasks = ntasks + if(itasks > 0 ) jtasks = ntasks/itasks + if( itasks*jtasks /= ntasks ) then + itasks = 1 + jtasks = ntasks + endif - if (trim(output_grid(n)) == 'cubed_sphere_grid' .and. wrt_int_state%write_dopost) then - write(0,*) 'wrt_initialize_p1: Inline post is not supported with cubed_sphere_grid outputs' - call ESMF_LogWrite("wrt_initialize_p1: Inline post is not supported with cubed_sphere_grid output",ESMF_LOGMSG_ERROR,rc=RC) + if (trim(output_grid(n)) == 'gaussian_grid' .or. trim(output_grid(n)) == 'global_latlon') then + call ESMF_ConfigGetAttribute(config=cf_output_grid, value=imo(n), label ='imo:',rc=rc) + call ESMF_ConfigGetAttribute(config=cf_output_grid, value=jmo(n), label ='jmo:',rc=rc) + if (lprnt) then + print *,'imo=',imo(n),'jmo=',jmo(n) + end if + else if (trim(output_grid(n)) == 'regional_latlon') then + call ESMF_ConfigGetAttribute(config=cf_output_grid, value=lon1(n), label ='lon1:',rc=rc) + call ESMF_ConfigGetAttribute(config=cf_output_grid, value=lat1(n), label ='lat1:',rc=rc) + call ESMF_ConfigGetAttribute(config=cf_output_grid, value=lon2(n), label ='lon2:',rc=rc) + call ESMF_ConfigGetAttribute(config=cf_output_grid, value=lat2(n), label ='lat2:',rc=rc) + call ESMF_ConfigGetAttribute(config=cf_output_grid, value=dlon(n), label ='dlon:',rc=rc) + call ESMF_ConfigGetAttribute(config=cf_output_grid, value=dlat(n), label ='dlat:',rc=rc) + imo(n) = (lon2(n)-lon1(n))/dlon(n) + 1 + jmo(n) = (lat2(n)-lat1(n))/dlat(n) + 1 + if (lprnt) then + print *,'lon1=',lon1(n),' lat1=',lat1(n) + print *,'lon2=',lon2(n),' lat2=',lat2(n) + print *,'dlon=',dlon(n),' dlat=',dlat(n) + print *,'imo =',imo(n), ' jmo =',jmo(n) + end if + else if (trim(output_grid(n)) == 'rotated_latlon') then + call ESMF_ConfigGetAttribute(config=cf_output_grid, value=cen_lon(n), label ='cen_lon:',rc=rc) + call ESMF_ConfigGetAttribute(config=cf_output_grid, value=cen_lat(n), label ='cen_lat:',rc=rc) + call ESMF_ConfigGetAttribute(config=cf_output_grid, value=lon1(n), label ='lon1:', rc=rc) + call ESMF_ConfigGetAttribute(config=cf_output_grid, value=lat1(n), label ='lat1:', rc=rc) + call ESMF_ConfigGetAttribute(config=cf_output_grid, value=lon2(n), label ='lon2:', rc=rc) + call ESMF_ConfigGetAttribute(config=cf_output_grid, value=lat2(n), label ='lat2:', rc=rc) + call ESMF_ConfigGetAttribute(config=cf_output_grid, value=dlon(n), label ='dlon:', rc=rc) + call ESMF_ConfigGetAttribute(config=cf_output_grid, value=dlat(n), label ='dlat:', rc=rc) + imo(n) = (lon2(n)-lon1(n))/dlon(n) + 1 + jmo(n) = (lat2(n)-lat1(n))/dlat(n) + 1 + if (lprnt) then + print *,'cen_lon=',cen_lon(n),' cen_lat=',cen_lat(n) + print *,'lon1 =',lon1(n), ' lat1 =',lat1(n) + print *,'lon2 =',lon2(n), ' lat2 =',lat2(n) + print *,'dlon =',dlon(n), ' dlat =',dlat(n) + print *,'imo =',imo(n), ' jmo =',jmo(n) + end if + else if (trim(output_grid(n)) == 'rotated_latlon_moving' .or. & + trim(output_grid(n)) == 'regional_latlon_moving') then + call ESMF_ConfigGetAttribute(config=cf_output_grid, value=imo(n), label ='imo:', rc=rc) + call ESMF_ConfigGetAttribute(config=cf_output_grid, value=jmo(n), label ='jmo:', rc=rc) + call ESMF_ConfigGetAttribute(config=cf_output_grid, value=dlon(n), label ='dlon:',rc=rc) + call ESMF_ConfigGetAttribute(config=cf_output_grid, value=dlat(n), label ='dlat:',rc=rc) + if (lprnt) then + print *,'imo =',imo(n), ' jmo =',jmo(n) + print *,'dlon=',dlon(n),' dlat=',dlat(n) + end if + else if (trim(output_grid(n)) == 'lambert_conformal') then + call ESMF_ConfigGetAttribute(config=cf_output_grid, value=cen_lon(n), label ='cen_lon:',rc=rc) + call ESMF_ConfigGetAttribute(config=cf_output_grid, value=cen_lat(n), label ='cen_lat:',rc=rc) + call ESMF_ConfigGetAttribute(config=cf_output_grid, value=stdlat1(n), label ='stdlat1:',rc=rc) + call ESMF_ConfigGetAttribute(config=cf_output_grid, value=stdlat2(n), label ='stdlat2:',rc=rc) + call ESMF_ConfigGetAttribute(config=cf_output_grid, value=imo(n), label ='nx:', rc=rc) + call ESMF_ConfigGetAttribute(config=cf_output_grid, value=jmo(n), label ='ny:', rc=rc) + call ESMF_ConfigGetAttribute(config=cf_output_grid, value=lon1(n), label ='lon1:', rc=rc) + call ESMF_ConfigGetAttribute(config=cf_output_grid, value=lat1(n), label ='lat1:', rc=rc) + call ESMF_ConfigGetAttribute(config=cf_output_grid, value=dx(n), label ='dx:', rc=rc) + call ESMF_ConfigGetAttribute(config=cf_output_grid, value=dy(n), label ='dy:', rc=rc) + if (lprnt) then + print *,'cen_lon=',cen_lon(n),' cen_lat=',cen_lat(n) + print *,'stdlat1=',stdlat1(n),' stdlat2=',stdlat2(n) + print *,'lon1=',lon1(n),' lat1=',lat1(n) + print *,'nx=',imo(n), ' ny=',jmo(n) + print *,'dx=',dx(n),' dy=',dy(n) + endif + endif ! output_grid + + ! chunksizes for netcdf_parallel + call ESMF_ConfigGetAttribute(config=CF,value=ichunk2d(n),default=0,label ='ichunk2d:',rc=rc) + call ESMF_ConfigGetAttribute(config=CF,value=jchunk2d(n),default=0,label ='jchunk2d:',rc=rc) + call ESMF_ConfigGetAttribute(config=CF,value=ichunk3d(n),default=0,label ='ichunk3d:',rc=rc) + call ESMF_ConfigGetAttribute(config=CF,value=jchunk3d(n),default=0,label ='jchunk3d:',rc=rc) + call ESMF_ConfigGetAttribute(config=CF,value=kchunk3d(n),default=0,label ='kchunk3d:',rc=rc) + + ! zstandard compression flag + call ESMF_ConfigGetAttribute(config=CF,value=zstandard_level(n),default=0,label ='zstandard_level:',rc=rc) + if (zstandard_level(n) < 0) zstandard_level(n)=0 + + ! zlib compression flag + call ESMF_ConfigGetAttribute(config=CF,value=ideflate(n),default=0,label ='ideflate:',rc=rc) + if (ideflate(n) < 0) ideflate(n)=0 + + if (ideflate(n) > 0 .and. zstandard_level(n) > 0) then + write(0,*)"wrt_initialize_p1: zlib and zstd compression cannot be both enabled at the same time" + call ESMF_LogWrite("wrt_initialize_p1: zlib and zstd compression cannot be both enabled at the same time",ESMF_LOGMSG_ERROR,rc=RC) call ESMF_Finalize(endflag=ESMF_END_ABORT) - end if - - call ESMF_ConfigGetAttribute(config=CF, value=itasks,default=1,label ='itasks:',rc=rc) - jtasks = ntasks - if(itasks > 0 ) jtasks = ntasks/itasks - if( itasks*jtasks /= ntasks ) then - itasks = 1 - jtasks = ntasks - endif + end if - if (trim(output_grid(n)) == 'gaussian_grid' .or. trim(output_grid(n)) == 'global_latlon') then - call ESMF_ConfigGetAttribute(config=cf_output_grid, value=imo(n), label ='imo:',rc=rc) - call ESMF_ConfigGetAttribute(config=cf_output_grid, value=jmo(n), label ='jmo:',rc=rc) - if (lprnt) then - print *,'imo=',imo(n),'jmo=',jmo(n) - end if - else if (trim(output_grid(n)) == 'regional_latlon') then - call ESMF_ConfigGetAttribute(config=cf_output_grid, value=lon1(n), label ='lon1:',rc=rc) - call ESMF_ConfigGetAttribute(config=cf_output_grid, value=lat1(n), label ='lat1:',rc=rc) - call ESMF_ConfigGetAttribute(config=cf_output_grid, value=lon2(n), label ='lon2:',rc=rc) - call ESMF_ConfigGetAttribute(config=cf_output_grid, value=lat2(n), label ='lat2:',rc=rc) - call ESMF_ConfigGetAttribute(config=cf_output_grid, value=dlon(n), label ='dlon:',rc=rc) - call ESMF_ConfigGetAttribute(config=cf_output_grid, value=dlat(n), label ='dlat:',rc=rc) - imo(n) = (lon2(n)-lon1(n))/dlon(n) + 1 - jmo(n) = (lat2(n)-lat1(n))/dlat(n) + 1 - if (lprnt) then - print *,'lon1=',lon1(n),' lat1=',lat1(n) - print *,'lon2=',lon2(n),' lat2=',lat2(n) - print *,'dlon=',dlon(n),' dlat=',dlat(n) - print *,'imo =',imo(n), ' jmo =',jmo(n) - end if - else if (trim(output_grid(n)) == 'rotated_latlon') then - call ESMF_ConfigGetAttribute(config=cf_output_grid, value=cen_lon(n), label ='cen_lon:',rc=rc) - call ESMF_ConfigGetAttribute(config=cf_output_grid, value=cen_lat(n), label ='cen_lat:',rc=rc) - call ESMF_ConfigGetAttribute(config=cf_output_grid, value=lon1(n), label ='lon1:', rc=rc) - call ESMF_ConfigGetAttribute(config=cf_output_grid, value=lat1(n), label ='lat1:', rc=rc) - call ESMF_ConfigGetAttribute(config=cf_output_grid, value=lon2(n), label ='lon2:', rc=rc) - call ESMF_ConfigGetAttribute(config=cf_output_grid, value=lat2(n), label ='lat2:', rc=rc) - call ESMF_ConfigGetAttribute(config=cf_output_grid, value=dlon(n), label ='dlon:', rc=rc) - call ESMF_ConfigGetAttribute(config=cf_output_grid, value=dlat(n), label ='dlat:', rc=rc) - imo(n) = (lon2(n)-lon1(n))/dlon(n) + 1 - jmo(n) = (lat2(n)-lat1(n))/dlat(n) + 1 - if (lprnt) then - print *,'cen_lon=',cen_lon(n),' cen_lat=',cen_lat(n) - print *,'lon1 =',lon1(n), ' lat1 =',lat1(n) - print *,'lon2 =',lon2(n), ' lat2 =',lat2(n) - print *,'dlon =',dlon(n), ' dlat =',dlat(n) - print *,'imo =',imo(n), ' jmo =',jmo(n) - end if - else if (trim(output_grid(n)) == 'rotated_latlon_moving' .or. & - trim(output_grid(n)) == 'regional_latlon_moving') then - call ESMF_ConfigGetAttribute(config=cf_output_grid, value=imo(n), label ='imo:', rc=rc) - call ESMF_ConfigGetAttribute(config=cf_output_grid, value=jmo(n), label ='jmo:', rc=rc) - call ESMF_ConfigGetAttribute(config=cf_output_grid, value=dlon(n), label ='dlon:',rc=rc) - call ESMF_ConfigGetAttribute(config=cf_output_grid, value=dlat(n), label ='dlat:',rc=rc) - if (lprnt) then - print *,'imo =',imo(n), ' jmo =',jmo(n) - print *,'dlon=',dlon(n),' dlat=',dlat(n) - end if - else if (trim(output_grid(n)) == 'lambert_conformal') then - call ESMF_ConfigGetAttribute(config=cf_output_grid, value=cen_lon(n), label ='cen_lon:',rc=rc) - call ESMF_ConfigGetAttribute(config=cf_output_grid, value=cen_lat(n), label ='cen_lat:',rc=rc) - call ESMF_ConfigGetAttribute(config=cf_output_grid, value=stdlat1(n), label ='stdlat1:',rc=rc) - call ESMF_ConfigGetAttribute(config=cf_output_grid, value=stdlat2(n), label ='stdlat2:',rc=rc) - call ESMF_ConfigGetAttribute(config=cf_output_grid, value=imo(n), label ='nx:', rc=rc) - call ESMF_ConfigGetAttribute(config=cf_output_grid, value=jmo(n), label ='ny:', rc=rc) - call ESMF_ConfigGetAttribute(config=cf_output_grid, value=lon1(n), label ='lon1:', rc=rc) - call ESMF_ConfigGetAttribute(config=cf_output_grid, value=lat1(n), label ='lat1:', rc=rc) - call ESMF_ConfigGetAttribute(config=cf_output_grid, value=dx(n), label ='dx:', rc=rc) - call ESMF_ConfigGetAttribute(config=cf_output_grid, value=dy(n), label ='dy:', rc=rc) - if (lprnt) then - print *,'cen_lon=',cen_lon(n),' cen_lat=',cen_lat(n) - print *,'stdlat1=',stdlat1(n),' stdlat2=',stdlat2(n) - print *,'lon1=',lon1(n),' lat1=',lat1(n) - print *,'nx=',imo(n), ' ny=',jmo(n) - print *,'dx=',dx(n),' dy=',dy(n) - endif - endif ! output_grid - - ! chunksizes for netcdf_parallel - call ESMF_ConfigGetAttribute(config=CF,value=ichunk2d(n),default=0,label ='ichunk2d:',rc=rc) - call ESMF_ConfigGetAttribute(config=CF,value=jchunk2d(n),default=0,label ='jchunk2d:',rc=rc) - call ESMF_ConfigGetAttribute(config=CF,value=ichunk3d(n),default=0,label ='ichunk3d:',rc=rc) - call ESMF_ConfigGetAttribute(config=CF,value=jchunk3d(n),default=0,label ='jchunk3d:',rc=rc) - call ESMF_ConfigGetAttribute(config=CF,value=kchunk3d(n),default=0,label ='kchunk3d:',rc=rc) - - ! zstandard compression flag - call ESMF_ConfigGetAttribute(config=CF,value=zstandard_level(n),default=0,label ='zstandard_level:',rc=rc) - if (zstandard_level(n) < 0) zstandard_level(n)=0 - - ! zlib compression flag - call ESMF_ConfigGetAttribute(config=CF,value=ideflate(n),default=0,label ='ideflate:',rc=rc) - if (ideflate(n) < 0) ideflate(n)=0 - - if (ideflate(n) > 0 .and. zstandard_level(n) > 0) then - write(0,*)"wrt_initialize_p1: zlib and zstd compression cannot be both enabled at the same time" - call ESMF_LogWrite("wrt_initialize_p1: zlib and zstd compression cannot be both enabled at the same time",ESMF_LOGMSG_ERROR,rc=RC) - call ESMF_Finalize(endflag=ESMF_END_ABORT) - end if - - ! quantize_mode and quantize_nsd - call ESMF_ConfigGetAttribute(config=CF,value=quantize_mode(n),default='quantize_bitgroom',label='quantize_mode:',rc=rc) - call ESMF_ConfigGetAttribute(config=CF,value=quantize_nsd(n),default=0,label='quantize_nsd:',rc=rc) - - if (.NOT. (trim(quantize_mode(n))=='quantize_bitgroom' & - .OR. trim(quantize_mode(n))=='quantize_granularbr' & - .OR. trim(quantize_mode(n))=='quantize_bitround') ) then - write(0,*)"wrt_initialize_p1: unknown quantize_mode ", trim(quantize_mode(n)) - call ESMF_LogWrite("wrt_initialize_p1: wrt_initialize_p1: unknown quantize_mode "//trim(quantize_mode(n)),ESMF_LOGMSG_ERROR,rc=RC) - call ESMF_Finalize(endflag=ESMF_END_ABORT) - end if - - if (lprnt) then - print *,'ideflate=',ideflate(n) - print *,'quantize_mode=',trim(quantize_mode(n)),' quantize_nsd=',quantize_nsd(n) - print *,'zstandard_level=',zstandard_level(n) - end if - - if (cf_output_grid /= cf) then - ! destroy the temporary config object created for nest domains - call ESMF_ConfigDestroy(config=cf_output_grid, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - endif + ! quantize_mode and quantize_nsd + call ESMF_ConfigGetAttribute(config=CF,value=quantize_mode(n),default='quantize_bitgroom',label='quantize_mode:',rc=rc) + call ESMF_ConfigGetAttribute(config=CF,value=quantize_nsd(n),default=0,label='quantize_nsd:',rc=rc) - call ESMF_ConfigGetAttribute(config=CF, value=history_file_on_native_grid, default=.false., & - label='history_file_on_native_grid:', rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - - if (n == 1 .and. top_parent_is_global .and. history_file_on_native_grid) then - do tl=1,6 - decomptile(1,tl) = 1 - decomptile(2,tl) = jidx - decompflagPTile(:,tl) = (/ESMF_DECOMP_SYMMEDGEMAX,ESMF_DECOMP_SYMMEDGEMAX/) - enddo - call ESMF_AttributeGet(imp_state_write, convention="NetCDF", purpose="FV3", & - name="gridfile", value=gridfile, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - - wrtGrid_cubed_sphere = ESMF_GridCreateMosaic(filename="INPUT/"//trim(gridfile), & - regDecompPTile=decomptile,tileFilePath="INPUT/", & - decompflagPTile=decompflagPTile, & - staggerlocList=(/ESMF_STAGGERLOC_CENTER, ESMF_STAGGERLOC_CORNER/), & - name='wrt_grid', rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - - create_wrtGrid_cubed_sphere = .false. - endif - - if ( trim(output_grid(n)) == 'cubed_sphere_grid' ) then - !*** Create cubed sphere grid from file - if (top_parent_is_global .and. n == 1) then - do tl=1,6 - decomptile(1,tl) = 1 - decomptile(2,tl) = jidx - decompflagPTile(:,tl) = (/ESMF_DECOMP_SYMMEDGEMAX,ESMF_DECOMP_SYMMEDGEMAX/) - enddo - call ESMF_AttributeGet(imp_state_write, convention="NetCDF", purpose="FV3", & - name="gridfile", value=gridfile, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - - wrtGrid(n) = ESMF_GridCreateMosaic(filename="INPUT/"//trim(gridfile), & - regDecompPTile=decomptile,tileFilePath="INPUT/", & - decompflagPTile=decompflagPTile, & - staggerlocList=(/ESMF_STAGGERLOC_CENTER, ESMF_STAGGERLOC_CORNER/), & - name='wrt_grid', rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - else - if (top_parent_is_global) then - write(gridfile,'(A,I2.2,A,I1,A)') 'grid.nest', n, '.tile', n+5, '.nc' - else - if (n == 1) then - gridfile='grid.tile7.halo0.nc' ! regional top-level parent - else - write(gridfile,'(A,I2.2,A,I1,A)') 'grid.nest', n, '.tile', n, '.nc' - endif - end if - regDecomp(1) = 1 - regDecomp(2) = ntasks - allocate(petMap(ntasks)) - do i=1, ntasks - petMap(i) = i-1 - enddo - delayout = ESMF_DELayoutCreate(petMap=petMap, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - - ! create the nest Grid by reading it from file but use DELayout - call ESMF_LogWrite("wrtComp: gridfile:"//trim(gridfile),ESMF_LOGMSG_INFO,rc=rc) - wrtGrid(n) = ESMF_GridCreate(filename="INPUT/"//trim(gridfile), & - fileformat=ESMF_FILEFORMAT_GRIDSPEC, regDecomp=regDecomp, & - decompflag=(/ESMF_DECOMP_SYMMEDGEMAX,ESMF_DECOMP_SYMMEDGEMAX/), & - delayout=delayout, isSphere=.false., indexflag=ESMF_INDEX_DELOCAL, & - rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (.NOT. (trim(quantize_mode(n))=='quantize_bitgroom' & + .OR. trim(quantize_mode(n))=='quantize_granularbr' & + .OR. trim(quantize_mode(n))=='quantize_bitround') ) then + write(0,*)"wrt_initialize_p1: unknown quantize_mode ", trim(quantize_mode(n)) + call ESMF_LogWrite("wrt_initialize_p1: wrt_initialize_p1: unknown quantize_mode "//trim(quantize_mode(n)),ESMF_LOGMSG_ERROR,rc=RC) + call ESMF_Finalize(endflag=ESMF_END_ABORT) + end if + + if (lprnt) then + print *,'ideflate=',ideflate(n) + print *,'quantize_mode=',trim(quantize_mode(n)),' quantize_nsd=',quantize_nsd(n) + print *,'zstandard_level=',zstandard_level(n) + end if + + if (cf_output_grid /= cf) then + ! destroy the temporary config object created for nest domains + call ESMF_ConfigDestroy(config=cf_output_grid, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + endif - deallocate(petMap) - endif + call ESMF_ConfigGetAttribute(config=CF, value=history_file_on_native_grid, default=.false., & + label='history_file_on_native_grid:', rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_GridGet(wrtGrid(n), tile=1, staggerloc=ESMF_STAGGERLOC_CENTER, minIndex=minIndex, maxIndex=maxIndex, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (n == 1 .and. top_parent_is_global .and. history_file_on_native_grid) then + do tl=1,6 + decomptile(1,tl) = 1 + decomptile(2,tl) = jidx + decompflagPTile(:,tl) = (/ESMF_DECOMP_SYMMEDGEMAX,ESMF_DECOMP_SYMMEDGEMAX/) + enddo + call ESMF_AttributeGet(imp_state_write, convention="NetCDF", purpose="FV3", & + name="gridfile", value=gridfile, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + wrtGrid_cubed_sphere = ESMF_GridCreateMosaic(filename="INPUT/"//trim(gridfile), & + regDecompPTile=decomptile,tileFilePath="INPUT/", & + decompflagPTile=decompflagPTile, & + staggerlocList=(/ESMF_STAGGERLOC_CENTER, ESMF_STAGGERLOC_CORNER/), & + name='wrt_grid', rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + create_wrtGrid_cubed_sphere = .false. + endif - wrt_int_state%out_grid_info(n)%i_start = 1 - wrt_int_state%out_grid_info(n)%i_end = maxIndex(1) - minIndex(1) + 1 - wrt_int_state%out_grid_info(n)%j_start = 1 - wrt_int_state%out_grid_info(n)%j_end = maxIndex(2) - minIndex(2) + 1 + if ( trim(output_grid(n)) == 'cubed_sphere_grid' ) then + !*** Create cubed sphere grid from file + if (top_parent_is_global .and. n == 1) then + do tl=1,6 + decomptile(1,tl) = 1 + decomptile(2,tl) = jidx + decompflagPTile(:,tl) = (/ESMF_DECOMP_SYMMEDGEMAX,ESMF_DECOMP_SYMMEDGEMAX/) + enddo + call ESMF_AttributeGet(imp_state_write, convention="NetCDF", purpose="FV3", & + name="gridfile", value=gridfile, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + wrtGrid(n) = ESMF_GridCreateMosaic(filename="INPUT/"//trim(gridfile), & + regDecompPTile=decomptile,tileFilePath="INPUT/", & + decompflagPTile=decompflagPTile, & + staggerlocList=(/ESMF_STAGGERLOC_CENTER, ESMF_STAGGERLOC_CORNER/), & + name='wrt_grid', rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + else + if (top_parent_is_global) then + write(gridfile,'(A,I2.2,A,I1,A)') 'grid.nest', n, '.tile', n+5, '.nc' + else + if (n == 1) then + gridfile='grid.tile7.halo0.nc' ! regional top-level parent + else + write(gridfile,'(A,I2.2,A,I1,A)') 'grid.nest', n, '.tile', n, '.nc' + endif + end if + regDecomp(1) = 1 + regDecomp(2) = ntasks + allocate(petMap(ntasks)) + do i=1, ntasks + petMap(i) = i-1 + enddo + delayout = ESMF_DELayoutCreate(petMap=petMap, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + ! create the nest Grid by reading it from file but use DELayout + call ESMF_LogWrite("wrtComp: gridfile:"//trim(gridfile),ESMF_LOGMSG_INFO,rc=rc) + wrtGrid(n) = ESMF_GridCreate(filename="INPUT/"//trim(gridfile), & + fileformat=ESMF_FILEFORMAT_GRIDSPEC, regDecomp=regDecomp, & + decompflag=(/ESMF_DECOMP_SYMMEDGEMAX,ESMF_DECOMP_SYMMEDGEMAX/), & + delayout=delayout, isSphere=.false., indexflag=ESMF_INDEX_DELOCAL, & + rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + deallocate(petMap) + endif - else ! non 'cubed_sphere_grid' - if ( trim(output_grid(n)) == 'gaussian_grid') then + call ESMF_GridGet(wrtGrid(n), tile=1, staggerloc=ESMF_STAGGERLOC_CENTER, minIndex=minIndex, maxIndex=maxIndex, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - wrtGrid(n) = ESMF_GridCreate1PeriDim(minIndex=(/1,1/), & - maxIndex=(/imo(n),jmo(n)/), regDecomp=(/itasks,jtasks/), & - indexflag=ESMF_INDEX_GLOBAL, & - name='wrt_grid',rc=rc) + wrt_int_state%out_grid_info(n)%i_start = 1 + wrt_int_state%out_grid_info(n)%i_end = maxIndex(1) - minIndex(1) + 1 + wrt_int_state%out_grid_info(n)%j_start = 1 + wrt_int_state%out_grid_info(n)%j_end = maxIndex(2) - minIndex(2) + 1 - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + else ! non 'cubed_sphere_grid' + if ( trim(output_grid(n)) == 'gaussian_grid') then - call ESMF_GridAddCoord(wrtGrid(n), staggerLoc=ESMF_STAGGERLOC_CENTER, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + wrtGrid(n) = ESMF_GridCreate1PeriDim(minIndex=(/1,1/), & + maxIndex=(/imo(n),jmo(n)/), regDecomp=(/itasks,jtasks/), & + indexflag=ESMF_INDEX_GLOBAL, & + name='wrt_grid',rc=rc) - call ESMF_GridGetCoord(wrtGrid(n), coordDim=1, farrayPtr=lonPtr, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_GridGetCoord(wrtGrid(n), coordDim=2, farrayPtr=latPtr, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_GridAddCoord(wrtGrid(n), staggerLoc=ESMF_STAGGERLOC_CENTER, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - allocate(slat(jmo(n)), lat(jmo(n)), lon(imo(n))) - call splat(4, jmo(n), slat) - if(write_nsflip) then - do j=1,jmo(n) - lat(j) = asin(slat(j)) * radi - enddo - else - do j=1,jmo(n) - lat(jmo(n)-j+1) = asin(slat(j)) * radi - enddo - endif - do j=1,imo(n) - lon(j) = 360.d0/real(imo(n),8) *real(j-1,8) - enddo - do j=lbound(latPtr,2),ubound(latPtr,2) - do i=lbound(lonPtr,1),ubound(lonPtr,1) - lonPtr(i,j) = 360.d0/real(imo(n),8) * real(i-1,8) - latPtr(i,j) = lat(j) - enddo - enddo - lon1(n) = lon(1) - lon2(n) = lon(imo(n)) - lat1(n) = lat(1) - lat2(n) = lat(jmo(n)) - dlon(n) = 360.d0/real(imo(n),8) - dlat(n) = 180.d0/real(jmo(n),8) + call ESMF_GridGetCoord(wrtGrid(n), coordDim=1, farrayPtr=lonPtr, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - deallocate(slat, lat, lon) + call ESMF_GridGetCoord(wrtGrid(n), coordDim=2, farrayPtr=latPtr, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - else if ( trim(output_grid(n)) == 'global_latlon') then - wrtGrid(n) = ESMF_GridCreate1PeriDim(minIndex=(/1,1/), & + allocate(slat(jmo(n)), lat(jmo(n)), lon(imo(n))) + call splat(4, jmo(n), slat) + if(write_nsflip) then + do j=1,jmo(n) + lat(j) = asin(slat(j)) * radi + enddo + else + do j=1,jmo(n) + lat(jmo(n)-j+1) = asin(slat(j)) * radi + enddo + endif + do j=1,imo(n) + lon(j) = 360.d0/real(imo(n),8) *real(j-1,8) + enddo + do j=lbound(latPtr,2),ubound(latPtr,2) + do i=lbound(lonPtr,1),ubound(lonPtr,1) + lonPtr(i,j) = 360.d0/real(imo(n),8) * real(i-1,8) + latPtr(i,j) = lat(j) + enddo + enddo + lon1(n) = lon(1) + lon2(n) = lon(imo(n)) + lat1(n) = lat(1) + lat2(n) = lat(jmo(n)) + dlon(n) = 360.d0/real(imo(n),8) + dlat(n) = 180.d0/real(jmo(n),8) + + deallocate(slat, lat, lon) + + else if ( trim(output_grid(n)) == 'global_latlon') then + wrtGrid(n) = ESMF_GridCreate1PeriDim(minIndex=(/1,1/), & + maxIndex=(/imo(n),jmo(n)/), regDecomp=(/itasks,jtasks/), & + indexflag=ESMF_INDEX_GLOBAL, & + name='wrt_grid',rc=rc) + + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + call ESMF_GridAddCoord(wrtGrid(n), staggerLoc=ESMF_STAGGERLOC_CENTER, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + call ESMF_GridGetCoord(wrtGrid(n), coordDim=1, farrayPtr=lonPtr, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + call ESMF_GridGetCoord(wrtGrid(n), coordDim=2, farrayPtr=latPtr, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + allocate(lat(jmo(n)), lon(imo(n))) + if (mod(jmo(n),2) == 0) then + ! if jmo even, lats do not include poles and equator + delat = 180.d0/real(jmo(n),8) + if(write_nsflip) then + do j=1,jmo(n) + lat(j) = 90.d0 - 0.5*delat - real(j-1,8)*delat + enddo + else + do j=1,jmo(n) + lat(j) = -90.d0 + 0.5*delat + real(j-1,8)*delat + enddo + endif + else + ! if jmo odd, lats include poles and equator + delat = 180.d0/real(jmo(n)-1,8) + if(write_nsflip) then + do j=1,jmo(n) + lat(j) = 90.d0 - real(j-1,8)*delat + enddo + else + do j=1,jmo(n) + lat(j) = -90.d0 + real(j-1,8)*delat + enddo + endif + endif + delon = 360.d0/real(imo(n),8) + do i=1,imo(n) + lon(i) = real(i-1,8)*delon + enddo + do j=lbound(latPtr,2),ubound(latPtr,2) + do i=lbound(lonPtr,1),ubound(lonPtr,1) + lonPtr(i,j) = lon(i) + latPtr(i,j) = lat(j) + enddo + enddo + lon1(n) = lon(1) + lon2(n) = lon(imo(n)) + lat1(n) = lat(1) + lat2(n) = lat(jmo(n)) + dlon(n) = delon + dlat(n) = delat + + deallocate(lat, lon) + + else if ( trim(output_grid(n)) == 'regional_latlon' .or. & + trim(output_grid(n)) == 'regional_latlon_moving' .or. & + trim(output_grid(n)) == 'rotated_latlon' .or. & + trim(output_grid(n)) == 'rotated_latlon_moving' .or. & + trim(output_grid(n)) == 'lambert_conformal' ) then + + wrtGrid(n) = ESMF_GridCreateNoPeriDim(minIndex=(/1,1/), & maxIndex=(/imo(n),jmo(n)/), regDecomp=(/itasks,jtasks/), & indexflag=ESMF_INDEX_GLOBAL, & name='wrt_grid',rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - - call ESMF_GridAddCoord(wrtGrid(n), staggerLoc=ESMF_STAGGERLOC_CENTER, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - - call ESMF_GridGetCoord(wrtGrid(n), coordDim=1, farrayPtr=lonPtr, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - - call ESMF_GridGetCoord(wrtGrid(n), coordDim=2, farrayPtr=latPtr, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - - allocate(lat(jmo(n)), lon(imo(n))) - if (mod(jmo(n),2) == 0) then - ! if jmo even, lats do not include poles and equator - delat = 180.d0/real(jmo(n),8) - if(write_nsflip) then - do j=1,jmo(n) - lat(j) = 90.d0 - 0.5*delat - real(j-1,8)*delat - enddo - else - do j=1,jmo(n) - lat(j) = -90.d0 + 0.5*delat + real(j-1,8)*delat - enddo - endif - else - ! if jmo odd, lats include poles and equator - delat = 180.d0/real(jmo(n)-1,8) - if(write_nsflip) then - do j=1,jmo(n) - lat(j) = 90.d0 - real(j-1,8)*delat - enddo - else - do j=1,jmo(n) - lat(j) = -90.d0 + real(j-1,8)*delat - enddo - endif - endif - delon = 360.d0/real(imo(n),8) - do i=1,imo(n) - lon(i) = real(i-1,8)*delon - enddo - do j=lbound(latPtr,2),ubound(latPtr,2) - do i=lbound(lonPtr,1),ubound(lonPtr,1) - lonPtr(i,j) = lon(i) - latPtr(i,j) = lat(j) - enddo - enddo - lon1(n) = lon(1) - lon2(n) = lon(imo(n)) - lat1(n) = lat(1) - lat2(n) = lat(jmo(n)) - dlon(n) = delon - dlat(n) = delat - - deallocate(lat, lon) - - else if ( trim(output_grid(n)) == 'regional_latlon' .or. & - trim(output_grid(n)) == 'regional_latlon_moving' .or. & - trim(output_grid(n)) == 'rotated_latlon' .or. & - trim(output_grid(n)) == 'rotated_latlon_moving' .or. & - trim(output_grid(n)) == 'lambert_conformal' ) then - - wrtGrid(n) = ESMF_GridCreateNoPeriDim(minIndex=(/1,1/), & - maxIndex=(/imo(n),jmo(n)/), regDecomp=(/itasks,jtasks/), & - indexflag=ESMF_INDEX_GLOBAL, & - name='wrt_grid',rc=rc) - - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_GridAddCoord(wrtGrid(n), staggerLoc=ESMF_STAGGERLOC_CENTER, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_GridAddCoord(wrtGrid(n), staggerLoc=ESMF_STAGGERLOC_CENTER, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_GridGetCoord(wrtGrid(n), coordDim=1, farrayPtr=lonPtr, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_GridGetCoord(wrtGrid(n), coordDim=1, farrayPtr=lonPtr, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_GridGetCoord(wrtGrid(n), coordDim=2, farrayPtr=latPtr, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_GridGetCoord(wrtGrid(n), coordDim=2, farrayPtr=latPtr, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - if ( trim(output_grid(n)) == 'regional_latlon' ) then - do j=lbound(lonPtr,2),ubound(lonPtr,2) - do i=lbound(lonPtr,1),ubound(lonPtr,1) - lonPtr(i,j) = lon1(n) + (lon2(n)-lon1(n))/(imo(n)-1) * (i-1) - latPtr(i,j) = lat1(n) + (lat2(n)-lat1(n))/(jmo(n)-1) * (j-1) - enddo - enddo - else if ( trim(output_grid(n)) == 'regional_latlon_moving' ) then - ! Do not compute lonPtr, latPtr here. Will be done in the run phase - else if ( trim(output_grid(n)) == 'rotated_latlon' ) then - do j=lbound(lonPtr,2),ubound(lonPtr,2) - do i=lbound(lonPtr,1),ubound(lonPtr,1) - rot_lon = lon1(n) + (lon2(n)-lon1(n))/(imo(n)-1) * (i-1) - rot_lat = lat1(n) + (lat2(n)-lat1(n))/(jmo(n)-1) * (j-1) - call rtll(rot_lon, rot_lat, geo_lon, geo_lat, dble(cen_lon(n)), dble(cen_lat(n))) - if (geo_lon < 0.0) geo_lon = geo_lon + 360.0 - lonPtr(i,j) = geo_lon - latPtr(i,j) = geo_lat - enddo - enddo - rot_lon = lon1(n) - rot_lat = lat1(n) - call rtll(rot_lon, rot_lat, geo_lon, geo_lat, dble(cen_lon(n)), dble(cen_lat(n))) - if (geo_lon < 0.0) geo_lon = geo_lon + 360.0 - wrt_int_state%out_grid_info(n)%lonstart = geo_lon - wrt_int_state%out_grid_info(n)%latstart = geo_lat - - rot_lon = lon2(n) - rot_lat = lat1(n) - call rtll(rot_lon, rot_lat, geo_lon, geo_lat, dble(cen_lon(n)), dble(cen_lat(n))) - if (geo_lon < 0.0) geo_lon = geo_lon + 360.0 - wrt_int_state%out_grid_info(n)%lonse = geo_lon - wrt_int_state%out_grid_info(n)%latse = geo_lat - - rot_lon = lon1(n) - rot_lat = lat2(n) - call rtll(rot_lon, rot_lat, geo_lon, geo_lat, dble(cen_lon(n)), dble(cen_lat(n))) - if (geo_lon < 0.0) geo_lon = geo_lon + 360.0 - wrt_int_state%out_grid_info(n)%lonnw = geo_lon - wrt_int_state%out_grid_info(n)%latnw = geo_lat - - rot_lon = lon2(n) - rot_lat = lat2(n) - call rtll(rot_lon, rot_lat, geo_lon, geo_lat, dble(cen_lon(n)), dble(cen_lat(n))) - if (geo_lon < 0.0) geo_lon = geo_lon + 360.0 - wrt_int_state%out_grid_info(n)%lonlast = geo_lon - wrt_int_state%out_grid_info(n)%latlast = geo_lat - else if ( trim(output_grid(n)) == 'rotated_latlon_moving' ) then - ! Do not compute lonPtr, latPtr here. Will be done in the run phase - else if ( trim(output_grid(n)) == 'lambert_conformal' ) then - lon1_r8 = dble(lon1(n)) - lat1_r8 = dble(lat1(n)) - call lambert(dble(stdlat1(n)),dble(stdlat2(n)),dble(cen_lat(n)),dble(cen_lon(n)), & - lon1_r8,lat1_r8,x1,y1, 1) - do j=lbound(lonPtr,2),ubound(lonPtr,2) - do i=lbound(lonPtr,1),ubound(lonPtr,1) - x = x1 + dx(n) * (i-1) - y = y1 + dy(n) * (j-1) - call lambert(dble(stdlat1(n)),dble(stdlat2(n)),dble(cen_lat(n)),dble(cen_lon(n)), & - geo_lon,geo_lat,x,y,-1) - if (geo_lon <0.0) geo_lon = geo_lon + 360.0 - lonPtr(i,j) = geo_lon - latPtr(i,j) = geo_lat - enddo - enddo - endif + if ( trim(output_grid(n)) == 'regional_latlon' ) then + do j=lbound(lonPtr,2),ubound(lonPtr,2) + do i=lbound(lonPtr,1),ubound(lonPtr,1) + lonPtr(i,j) = lon1(n) + (lon2(n)-lon1(n))/(imo(n)-1) * (i-1) + latPtr(i,j) = lat1(n) + (lat2(n)-lat1(n))/(jmo(n)-1) * (j-1) + enddo + enddo + else if ( trim(output_grid(n)) == 'regional_latlon_moving' ) then + ! Do not compute lonPtr, latPtr here. Will be done in the run phase + else if ( trim(output_grid(n)) == 'rotated_latlon' ) then + do j=lbound(lonPtr,2),ubound(lonPtr,2) + do i=lbound(lonPtr,1),ubound(lonPtr,1) + rot_lon = lon1(n) + (lon2(n)-lon1(n))/(imo(n)-1) * (i-1) + rot_lat = lat1(n) + (lat2(n)-lat1(n))/(jmo(n)-1) * (j-1) + call rtll(rot_lon, rot_lat, geo_lon, geo_lat, dble(cen_lon(n)), dble(cen_lat(n))) + if (geo_lon < 0.0) geo_lon = geo_lon + 360.0 + lonPtr(i,j) = geo_lon + latPtr(i,j) = geo_lat + enddo + enddo + rot_lon = lon1(n) + rot_lat = lat1(n) + call rtll(rot_lon, rot_lat, geo_lon, geo_lat, dble(cen_lon(n)), dble(cen_lat(n))) + if (geo_lon < 0.0) geo_lon = geo_lon + 360.0 + wrt_int_state%out_grid_info(n)%lonstart = geo_lon + wrt_int_state%out_grid_info(n)%latstart = geo_lat + + rot_lon = lon2(n) + rot_lat = lat1(n) + call rtll(rot_lon, rot_lat, geo_lon, geo_lat, dble(cen_lon(n)), dble(cen_lat(n))) + if (geo_lon < 0.0) geo_lon = geo_lon + 360.0 + wrt_int_state%out_grid_info(n)%lonse = geo_lon + wrt_int_state%out_grid_info(n)%latse = geo_lat + + rot_lon = lon1(n) + rot_lat = lat2(n) + call rtll(rot_lon, rot_lat, geo_lon, geo_lat, dble(cen_lon(n)), dble(cen_lat(n))) + if (geo_lon < 0.0) geo_lon = geo_lon + 360.0 + wrt_int_state%out_grid_info(n)%lonnw = geo_lon + wrt_int_state%out_grid_info(n)%latnw = geo_lat + + rot_lon = lon2(n) + rot_lat = lat2(n) + call rtll(rot_lon, rot_lat, geo_lon, geo_lat, dble(cen_lon(n)), dble(cen_lat(n))) + if (geo_lon < 0.0) geo_lon = geo_lon + 360.0 + wrt_int_state%out_grid_info(n)%lonlast = geo_lon + wrt_int_state%out_grid_info(n)%latlast = geo_lat + else if ( trim(output_grid(n)) == 'rotated_latlon_moving' ) then + ! Do not compute lonPtr, latPtr here. Will be done in the run phase + else if ( trim(output_grid(n)) == 'lambert_conformal' ) then + lon1_r8 = dble(lon1(n)) + lat1_r8 = dble(lat1(n)) + call lambert(dble(stdlat1(n)),dble(stdlat2(n)),dble(cen_lat(n)),dble(cen_lon(n)), & + lon1_r8,lat1_r8,x1,y1, 1) + do j=lbound(lonPtr,2),ubound(lonPtr,2) + do i=lbound(lonPtr,1),ubound(lonPtr,1) + x = x1 + dx(n) * (i-1) + y = y1 + dy(n) * (j-1) + call lambert(dble(stdlat1(n)),dble(stdlat2(n)),dble(cen_lat(n)),dble(cen_lon(n)), & + geo_lon,geo_lat,x,y,-1) + if (geo_lon <0.0) geo_lon = geo_lon + 360.0 + lonPtr(i,j) = geo_lon + latPtr(i,j) = geo_lat + enddo + enddo + endif - else + else - write(0,*)"wrt_initialize_p1: Unknown output_grid ", trim(output_grid(n)) - call ESMF_LogWrite("wrt_initialize_p1: Unknown output_grid "//trim(output_grid(n)),ESMF_LOGMSG_ERROR,rc=RC) - call ESMF_Finalize(endflag=ESMF_END_ABORT) + write(0,*)"wrt_initialize_p1: Unknown output_grid ", trim(output_grid(n)) + call ESMF_LogWrite("wrt_initialize_p1: Unknown output_grid "//trim(output_grid(n)),ESMF_LOGMSG_ERROR,rc=RC) + call ESMF_Finalize(endflag=ESMF_END_ABORT) - endif + endif - wrt_int_state%out_grid_info(n)%i_start = lbound(lonPtr,1) - wrt_int_state%out_grid_info(n)%i_end = ubound(lonPtr,1) - wrt_int_state%out_grid_info(n)%j_start = lbound(latPtr,2) - wrt_int_state%out_grid_info(n)%j_end = ubound(latPtr,2) - - allocate( wrt_int_state%out_grid_info(n)%i_start_wrtgrp(wrt_int_state%petcount) ) - allocate( wrt_int_state%out_grid_info(n)%i_end_wrtgrp (wrt_int_state%petcount) ) - allocate( wrt_int_state%out_grid_info(n)%j_start_wrtgrp(wrt_int_state%petcount) ) - allocate( wrt_int_state%out_grid_info(n)%j_end_wrtgrp (wrt_int_state%petcount) ) - - call mpi_allgather(wrt_int_state%out_grid_info(n)%i_start, 1, MPI_INTEGER, & - wrt_int_state%out_grid_info(n)%i_start_wrtgrp, 1, MPI_INTEGER, wrt_mpi_comm, rc) - call mpi_allgather(wrt_int_state%out_grid_info(n)%i_end, 1, MPI_INTEGER, & - wrt_int_state%out_grid_info(n)%i_end_wrtgrp, 1, MPI_INTEGER, wrt_mpi_comm, rc) - call mpi_allgather(wrt_int_state%out_grid_info(n)%j_start, 1, MPI_INTEGER, & - wrt_int_state%out_grid_info(n)%j_start_wrtgrp, 1, MPI_INTEGER, wrt_mpi_comm, rc) - call mpi_allgather(wrt_int_state%out_grid_info(n)%j_end, 1, MPI_INTEGER, & - wrt_int_state%out_grid_info(n)%j_end_wrtgrp, 1, MPI_INTEGER, wrt_mpi_comm, rc) - - allocate( wrt_int_state%out_grid_info(n)%lonPtr(wrt_int_state%out_grid_info(n)%i_start:wrt_int_state%out_grid_info(n)%i_end, & - wrt_int_state%out_grid_info(n)%j_start:wrt_int_state%out_grid_info(n)%j_end) ) - allocate( wrt_int_state%out_grid_info(n)%latPtr(wrt_int_state%out_grid_info(n)%i_start:wrt_int_state%out_grid_info(n)%i_end, & - wrt_int_state%out_grid_info(n)%j_start:wrt_int_state%out_grid_info(n)%j_end) ) - - if ( trim(output_grid(n)) /= 'regional_latlon_moving' .and. trim(output_grid(n)) /= 'rotated_latlon_moving' ) then - do j=wrt_int_state%out_grid_info(n)%j_start, wrt_int_state%out_grid_info(n)%j_end - do i=wrt_int_state%out_grid_info(n)%i_start, wrt_int_state%out_grid_info(n)%i_end - wrt_int_state%out_grid_info(n)%latPtr(i,j) = latPtr(i,j) - wrt_int_state%out_grid_info(n)%lonPtr(i,j) = lonPtr(i,j) - enddo - enddo - endif + wrt_int_state%out_grid_info(n)%i_start = lbound(lonPtr,1) + wrt_int_state%out_grid_info(n)%i_end = ubound(lonPtr,1) + wrt_int_state%out_grid_info(n)%j_start = lbound(latPtr,2) + wrt_int_state%out_grid_info(n)%j_end = ubound(latPtr,2) + + allocate( wrt_int_state%out_grid_info(n)%i_start_wrtgrp(wrt_int_state%petcount) ) + allocate( wrt_int_state%out_grid_info(n)%i_end_wrtgrp (wrt_int_state%petcount) ) + allocate( wrt_int_state%out_grid_info(n)%j_start_wrtgrp(wrt_int_state%petcount) ) + allocate( wrt_int_state%out_grid_info(n)%j_end_wrtgrp (wrt_int_state%petcount) ) + + call mpi_allgather(wrt_int_state%out_grid_info(n)%i_start, 1, MPI_INTEGER, & + wrt_int_state%out_grid_info(n)%i_start_wrtgrp, 1, MPI_INTEGER, wrt_mpi_comm, rc) + call mpi_allgather(wrt_int_state%out_grid_info(n)%i_end, 1, MPI_INTEGER, & + wrt_int_state%out_grid_info(n)%i_end_wrtgrp, 1, MPI_INTEGER, wrt_mpi_comm, rc) + call mpi_allgather(wrt_int_state%out_grid_info(n)%j_start, 1, MPI_INTEGER, & + wrt_int_state%out_grid_info(n)%j_start_wrtgrp, 1, MPI_INTEGER, wrt_mpi_comm, rc) + call mpi_allgather(wrt_int_state%out_grid_info(n)%j_end, 1, MPI_INTEGER, & + wrt_int_state%out_grid_info(n)%j_end_wrtgrp, 1, MPI_INTEGER, wrt_mpi_comm, rc) + + allocate( wrt_int_state%out_grid_info(n)%lonPtr(wrt_int_state%out_grid_info(n)%i_start:wrt_int_state%out_grid_info(n)%i_end, & + wrt_int_state%out_grid_info(n)%j_start:wrt_int_state%out_grid_info(n)%j_end) ) + allocate( wrt_int_state%out_grid_info(n)%latPtr(wrt_int_state%out_grid_info(n)%i_start:wrt_int_state%out_grid_info(n)%i_end, & + wrt_int_state%out_grid_info(n)%j_start:wrt_int_state%out_grid_info(n)%j_end) ) + + if ( trim(output_grid(n)) /= 'regional_latlon_moving' .and. trim(output_grid(n)) /= 'rotated_latlon_moving' ) then + do j=wrt_int_state%out_grid_info(n)%j_start, wrt_int_state%out_grid_info(n)%j_end + do i=wrt_int_state%out_grid_info(n)%i_start, wrt_int_state%out_grid_info(n)%i_end + wrt_int_state%out_grid_info(n)%latPtr(i,j) = latPtr(i,j) + wrt_int_state%out_grid_info(n)%lonPtr(i,j) = lonPtr(i,j) + enddo + enddo + endif - wrt_int_state%out_grid_info(n)%im = imo(n) - wrt_int_state%out_grid_info(n)%jm = jmo(n) + wrt_int_state%out_grid_info(n)%im = imo(n) + wrt_int_state%out_grid_info(n)%jm = jmo(n) - end if ! non 'cubed_sphere_grid' - end do ! n = 1, ngrids + end if ! non 'cubed_sphere_grid' + end do ! n = 1, ngrids ! !----------------------------------------------------------------------- !*** get write grid component initial time from clock !----------------------------------------------------------------------- ! - call ESMF_ClockGet(clock =CLOCK & !<-- The ESMF Clock - ,startTime=wrt_int_state%IO_BASETIME & !<-- The Clock's starting time - ,rc =RC) + call ESMF_ClockGet(clock =CLOCK & !<-- The ESMF Clock + ,startTime=wrt_int_state%IO_BASETIME & !<-- The Clock's starting time + ,rc =RC) - call ESMF_TimeGet(time=wrt_int_state%IO_BASETIME,yy=idate(1),mm=idate(2),dd=idate(3), & - h=idate(4), m=idate(5), s=idate(6),rc=rc) + call ESMF_TimeGet(time=wrt_int_state%IO_BASETIME,yy=idate(1),mm=idate(2),dd=idate(3), & + h=idate(4), m=idate(5), s=idate(6),rc=rc) ! if (lprnt) write(0,*) 'in wrt initial, io_baseline time=',idate,'rc=',rc - idate(7) = 1 - start_time = idate - wrt_int_state%idate = idate - wrt_int_state%fdate = idate + idate(7) = 1 + start_time = idate + wrt_int_state%idate = idate + wrt_int_state%fdate = idate ! update IO-BASETIME and idate on write grid comp when IAU is enabled - if (iau_offset > 0) then - call ESMF_TimeIntervalSet(IAU_offsetTI, h=iau_offset, rc=rc) - wrt_int_state%IO_BASETIME = wrt_int_state%IO_BASETIME + IAU_offsetTI - call ESMF_TimeGet(time=wrt_int_state%IO_BASETIME,yy=idate(1),mm=idate(2),dd=idate(3), & - h=idate(4), m=idate(5), s=idate(6),rc=rc) - wrt_int_state%idate = idate - change_wrtidate = .true. - if (lprnt) print *,'in wrt initial, with iau, io_baseline time=',idate,'rc=',rc - endif + if (iau_offset > 0) then + call ESMF_TimeIntervalSet(IAU_offsetTI, h=iau_offset, rc=rc) + wrt_int_state%IO_BASETIME = wrt_int_state%IO_BASETIME + IAU_offsetTI + call ESMF_TimeGet(time=wrt_int_state%IO_BASETIME,yy=idate(1),mm=idate(2),dd=idate(3), & + h=idate(4), m=idate(5), s=idate(6),rc=rc) + wrt_int_state%idate = idate + change_wrtidate = .true. + if (lprnt) print *,'in wrt initial, with iau, io_baseline time=',idate,'rc=',rc + endif ! !--- Look at the incoming FieldBundles in the imp_state_write, and mirror them as 'output_' bundles ! - call ESMF_StateGet(imp_state_write, itemCount=FBCount, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_StateGet(imp_state_write, itemCount=FBCount, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - ! if (lprnt) write(0,*)'wrt_initialize_p1: FBCount=',FBCount, ' from imp_state_write' + ! if (lprnt) write(0,*)'wrt_initialize_p1: FBCount=',FBCount, ' from imp_state_write' - allocate(fcstItemNameList(FBCount), fcstItemTypeList(FBCount)) - allocate(outfilename(2000,FBCount)) - outfilename = '' + allocate(fcstItemNameList(FBCount), fcstItemTypeList(FBCount)) + allocate(outfilename(2000,FBCount)) + outfilename = '' - call ESMF_StateGet(imp_state_write, itemNameList=fcstItemNameList, & - itemTypeList=fcstItemTypeList, & - !itemorderflag=ESMF_ITEMORDER_ADDORDER, & - rc=rc) + call ESMF_StateGet(imp_state_write, itemNameList=fcstItemNameList, & + itemTypeList=fcstItemTypeList, & + !itemorderflag=ESMF_ITEMORDER_ADDORDER, & + rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return !loop over all items in the imp_state_write and collect all FieldBundles - do i=1, FBCount + do i=1, FBCount - if (fcstItemTypeList(i) == ESMF_STATEITEM_FIELDBUNDLE) then + if (fcstItemTypeList(i) == ESMF_STATEITEM_FIELDBUNDLE) then - call ESMF_StateGet(imp_state_write, itemName=fcstItemNameList(i), & - fieldbundle=fcstFB, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_StateGet(imp_state_write, itemName=fcstItemNameList(i), & + fieldbundle=fcstFB, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_AttributeGet(fcstFB, convention="NetCDF", purpose="FV3", & - name="grid_id", value=grid_id, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_AttributeGet(fcstFB, convention="NetCDF", purpose="FV3", & + name="grid_id", value=grid_id, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_AttributeGet(fcstFB, convention="NetCDF", purpose="FV3-nooutput", & - name="frestart", valueList=frestart, isPresent=isPresent, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - if (isPresent) then - ! if (lprnt) write(0,*)'wrt_initialize_p1: frestart(1:10) = ',frestart(1:10) - call ESMF_AttributeRemove(fcstFB, convention="NetCDF", purpose="FV3-nooutput", name="frestart", rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - endif + call ESMF_AttributeGet(fcstFB, convention="NetCDF", purpose="FV3-nooutput", & + name="frestart", valueList=frestart, isPresent=isPresent, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (isPresent) then + ! if (lprnt) write(0,*)'wrt_initialize_p1: frestart(1:10) = ',frestart(1:10) + call ESMF_AttributeRemove(fcstFB, convention="NetCDF", purpose="FV3-nooutput", name="frestart", rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + endif !--- get grid dim count - ! call ESMF_GridGet(wrtGrid(grid_id), dimCount=gridDimCount, rc=rc) - ! if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + ! call ESMF_GridGet(wrtGrid(grid_id), dimCount=gridDimCount, rc=rc) + ! if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return ! create a mirrored 'output_' FieldBundle and add it to importState - fieldbundle = ESMF_FieldBundleCreate(name="output_"//trim(fcstItemNameList(i)), rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + fieldbundle = ESMF_FieldBundleCreate(name="output_"//trim(fcstItemNameList(i)), rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_StateAdd(imp_state_write, (/fieldbundle/), rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_StateAdd(imp_state_write, (/fieldbundle/), rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return ! copy the fcstFB Attributes to the 'output_' FieldBundle - call ESMF_AttributeCopy(fcstFB, fieldbundle, attcopy=ESMF_ATTCOPY_REFERENCE, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_AttributeCopy(fcstFB, fieldbundle, attcopy=ESMF_ATTCOPY_REFERENCE, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return ! grids in fcstFB for which 'is_moving' is .true. must provide a first level mirror for the Redist() target - if (is_moving(grid_id)) then + if (is_moving(grid_id)) then ! create a mirrored 'mirror_' FieldBundle and add it to importState - mirrorFB = ESMF_FieldBundleCreate(name="mirror_"//trim(fcstItemNameList(i)), rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + mirrorFB = ESMF_FieldBundleCreate(name="mirror_"//trim(fcstItemNameList(i)), rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_StateAdd(imp_state_write, (/mirrorFB/), rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_StateAdd(imp_state_write, (/mirrorFB/), rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - ! copy the fcstFB Attributes to the 'mirror_' FieldBundle - call ESMF_AttributeCopy(fcstFB, mirrorFB, attcopy=ESMF_ATTCOPY_REFERENCE, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + ! copy the fcstFB Attributes to the 'mirror_' FieldBundle + call ESMF_AttributeCopy(fcstFB, mirrorFB, attcopy=ESMF_ATTCOPY_REFERENCE, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - endif + endif ! deal with all of the Fields inside this fcstFB - call ESMF_FieldBundleGet(fcstFB, fieldCount=fieldCount, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_FieldBundleGet(fcstFB, fieldCount=fieldCount, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - if (fieldCount > 0) then + if (fieldCount > 0) then - call ESMF_FieldBundleGet(fcstFB, grid=fcstGrid, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_FieldBundleGet(fcstFB, grid=fcstGrid, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - allocate(fcstField(fieldCount)) - call ESMF_FieldBundleGet(fcstFB, fieldList=fcstField, & - itemorderflag=ESMF_ITEMORDER_ADDORDER, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + allocate(fcstField(fieldCount)) + call ESMF_FieldBundleGet(fcstFB, fieldList=fcstField, & + itemorderflag=ESMF_ITEMORDER_ADDORDER, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - if (fcstItemNameList(i)(1:18) == 'cubed_sphere_grid_') then + if (fcstItemNameList(i)(1:18) == 'cubed_sphere_grid_') then - if (create_wrtGrid_cubed_sphere) then - ! create a grid from fcstGrid on forecast grid comp, by rebalancing distgrid to the local PETs - ! access the acceptor DistGrid - call ESMF_GridGet(fcstGrid, distgrid=acceptorDG, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - ! rebalance the acceptor DistGrid across the local PETs - newAcceptorDG = ESMF_DistGridCreate(acceptorDG, balanceflag=.true., rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - wrtGrid_cubed_sphere = ESMF_GridCreate(fcstGrid, newAcceptorDG, copyAttributes=.true., rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (create_wrtGrid_cubed_sphere) then + ! create a grid from fcstGrid on forecast grid comp, by rebalancing distgrid to the local PETs + ! access the acceptor DistGrid + call ESMF_GridGet(fcstGrid, distgrid=acceptorDG, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + ! rebalance the acceptor DistGrid across the local PETs + newAcceptorDG = ESMF_DistGridCreate(acceptorDG, balanceflag=.true., rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + wrtGrid_cubed_sphere = ESMF_GridCreate(fcstGrid, newAcceptorDG, copyAttributes=.true., rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - create_wrtGrid_cubed_sphere = .false. - end if + create_wrtGrid_cubed_sphere = .false. + end if - actualWrtGrid = wrtGrid_cubed_sphere - call ESMF_AttributeSet(fieldbundle, convention="NetCDF", purpose="FV3-nooutput", name="output_grid", value="cubed_sphere_grid", rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + actualWrtGrid = wrtGrid_cubed_sphere + call ESMF_AttributeSet(fieldbundle, convention="NetCDF", purpose="FV3-nooutput", name="output_grid", value="cubed_sphere_grid", rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - else if (fcstItemNameList(i)(1:8) == 'restart_') then - ! If this is a 'restart' bundle the actual grid that the output field ('field_work' below) is created on - ! must be the same grid as forecast grid, not the output grid for this grid_id (wrtGrid(grid_id)). - ! For 'cubed_sphere_grid' these are the same, but for all other output grids (like Lambert) they are not. + else if (fcstItemNameList(i)(1:8) == 'restart_') then + ! If this is a 'restart' bundle the actual grid that the output field ('field_work' below) is created on + ! must be the same grid as forecast grid, not the output grid for this grid_id (wrtGrid(grid_id)). + ! For 'cubed_sphere_grid' these are the same, but for all other output grids (like Lambert) they are not. - ! create a grid from fcstGrid on forecast grid comp, by rebalancing distgrid to the local PETs - ! access the acceptor DistGrid - call ESMF_GridGet(fcstGrid, distgrid=acceptorDG, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - ! rebalance the acceptor DistGrid across the local PETs - newAcceptorDG = ESMF_DistGridCreate(acceptorDG, balanceflag=.true., rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - actualWrtGrid = ESMF_GridCreate(fcstGrid, newAcceptorDG, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + ! create a grid from fcstGrid on forecast grid comp, by rebalancing distgrid to the local PETs + ! access the acceptor DistGrid + call ESMF_GridGet(fcstGrid, distgrid=acceptorDG, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + ! rebalance the acceptor DistGrid across the local PETs + newAcceptorDG = ESMF_DistGridCreate(acceptorDG, balanceflag=.true., rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + actualWrtGrid = ESMF_GridCreate(fcstGrid, newAcceptorDG, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_AttributeSet(fieldbundle, convention="NetCDF", purpose="FV3-nooutput", name="output_grid", value="restart_grid", rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - else - actualWrtGrid = wrtGrid(grid_id) - call ESMF_AttributeSet(fieldbundle, convention="NetCDF", purpose="FV3-nooutput", name="output_grid", value=output_grid(grid_id), rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - end if + call ESMF_AttributeSet(fieldbundle, convention="NetCDF", purpose="FV3-nooutput", name="output_grid", value="restart_grid", rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + else + actualWrtGrid = wrtGrid(grid_id) + call ESMF_AttributeSet(fieldbundle, convention="NetCDF", purpose="FV3-nooutput", name="output_grid", value=output_grid(grid_id), rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + end if - do j=1, fieldCount + call ESMF_GridAddItem(actualWrtGrid, itemflag=ESMF_GRIDITEM_MASK, & + staggerloc=ESMF_STAGGERLOC_CENTER, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_FieldGet(fcstField(j), typekind=typekind, dimCount=fieldDimCount, name=fieldName, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + do j=1, fieldCount - call ESMF_GridGet(actualWrtGrid, dimCount=gridDimCount, rc=rc) ! use actualWrtGrid instead of wrtGrid(grid_id) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_FieldGet(fcstField(j), typekind=typekind, dimCount=fieldDimCount, name=fieldName, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - allocate(gridToFieldMap(gridDimCount)) - allocate(ungriddedLBound(fieldDimCount-gridDimCount)) - allocate(ungriddedUBound(fieldDimCount-gridDimCount)) + call ESMF_GridGet(actualWrtGrid, dimCount=gridDimCount, rc=rc) ! use actualWrtGrid instead of wrtGrid(grid_id) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_FieldGet(fcstField(j), gridToFieldMap=gridToFieldMap, & - ungriddedLBound=ungriddedLBound, ungriddedUBound=ungriddedUBound, & - staggerloc=staggerloc, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + allocate(gridToFieldMap(gridDimCount)) + allocate(ungriddedLBound(fieldDimCount-gridDimCount)) + allocate(ungriddedUBound(fieldDimCount-gridDimCount)) + + call ESMF_FieldGet(fcstField(j), gridToFieldMap=gridToFieldMap, & + ungriddedLBound=ungriddedLBound, ungriddedUBound=ungriddedUBound, & + staggerloc=staggerloc, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return ! if (lprnt) print *,'in wrt,fcstfld,fieldname=', & ! trim(fieldname),'fieldDimCount=',fieldDimCount,'gridDimCount=',gridDimCount, & ! 'gridToFieldMap=',gridToFieldMap,'ungriddedLBound=',ungriddedLBound, & ! 'ungriddedUBound=',ungriddedUBound,'rc=',rc - ! create the output field on output grid - field_work = ESMF_FieldCreate(actualWrtGrid, typekind, name=fieldName, & ! use actualWrtGrid instead of wrtGrid(grid_id) - staggerloc=staggerloc, & - gridToFieldMap=gridToFieldMap, & - ungriddedLBound=ungriddedLBound, & - ungriddedUBound=ungriddedUBound, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - - call ESMF_AttributeCopy(fcstField(j), field_work, attcopy=ESMF_ATTCOPY_REFERENCE, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + ! create the output field on output grid + field_work = ESMF_FieldCreate(actualWrtGrid, typekind, name=fieldName, & ! use actualWrtGrid instead of wrtGrid(grid_id) + staggerloc=staggerloc, & + gridToFieldMap=gridToFieldMap, & + ungriddedLBound=ungriddedLBound, & + ungriddedUBound=ungriddedUBound, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - ! get output file name - call ESMF_AttributeGet(fcstField(j), convention="NetCDF", purpose="FV3", & - name="output_file", value=outfile_name, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call init_field_to_missing_value(field_work, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_LogWrite("bf fcstfield, get output_file "//trim(outfile_name)//" "//trim(fieldName),ESMF_LOGMSG_INFO,rc=RC) - if (trim(outfile_name) /= '') then - outfilename(j,i) = trim(outfile_name) - endif - call ESMF_LogWrite("af fcstfield, get output_file",ESMF_LOGMSG_INFO,rc=RC) + call ESMF_AttributeCopy(fcstField(j), field_work, attcopy=ESMF_ATTCOPY_REFERENCE, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - ! if (lprnt) print *,' i=',i,' j=',j,' outfilename=',trim(outfilename(j,i)) + ! get output file name + call ESMF_AttributeGet(fcstField(j), convention="NetCDF", purpose="FV3", & + name="output_file", value=outfile_name, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - ! add the output field to the 'output_' FieldBundle - call ESMF_FieldBundleAdd(fieldbundle, (/field_work/), rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_LogWrite("bf fcstfield, get output_file "//trim(outfile_name)//" "//trim(fieldName),ESMF_LOGMSG_INFO,rc=RC) + if (trim(outfile_name) /= '') then + outfilename(j,i) = trim(outfile_name) + endif + call ESMF_LogWrite("af fcstfield, get output_file",ESMF_LOGMSG_INFO,rc=RC) - ! deal with grids for which 'is_moving' is .true. - if (is_moving(grid_id)) then - ! create an empty field that will serve as acceptor for GridTransfer of fcstGrid - field_work = ESMF_FieldEmptyCreate(name=fieldName, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + ! if (lprnt) print *,' i=',i,' j=',j,' outfilename=',trim(outfilename(j,i)) - ! use attributes to carry information for later FieldEmptyComplete() - call ESMF_InfoGetFromHost(field_work, info=info, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - tk = typekind ! convert TypeKind_Flag to integer - call ESMF_InfoSet(info, key="typekind", value=tk, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - sloc = staggerloc ! convert StaggerLoc_Flag to integer - call ESMF_InfoSet(info, key="staggerloc", value=sloc, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_InfoSet(info, key="gridToFieldMap", values=gridToFieldMap, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_InfoSet(info, key="ungriddedLBound", values=ungriddedLBound, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_InfoSet(info, key="ungriddedUBound", values=ungriddedUBound, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + ! add the output field to the 'output_' FieldBundle + call ESMF_FieldBundleAdd(fieldbundle, (/field_work/), rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - ! add to 'mirror_' FieldBundle - call ESMF_FieldBundleAdd(mirrorFB, (/field_work/), rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + ! deal with grids for which 'is_moving' is .true. + if (is_moving(grid_id)) then + ! create an empty field that will serve as acceptor for GridTransfer of fcstGrid + field_work = ESMF_FieldEmptyCreate(name=fieldName, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + ! use attributes to carry information for later FieldEmptyComplete() + call ESMF_InfoGetFromHost(field_work, info=info, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + tk = typekind ! convert TypeKind_Flag to integer + call ESMF_InfoSet(info, key="typekind", value=tk, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + sloc = staggerloc ! convert StaggerLoc_Flag to integer + call ESMF_InfoSet(info, key="staggerloc", value=sloc, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_InfoSet(info, key="gridToFieldMap", values=gridToFieldMap, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_InfoSet(info, key="ungriddedLBound", values=ungriddedLBound, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_InfoSet(info, key="ungriddedUBound", values=ungriddedUBound, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + ! add to 'mirror_' FieldBundle + call ESMF_FieldBundleAdd(mirrorFB, (/field_work/), rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - endif + endif - ! local garbage collection - deallocate(gridToFieldMap, ungriddedLBound, ungriddedUBound) - enddo + ! local garbage collection + deallocate(gridToFieldMap, ungriddedLBound, ungriddedUBound) + enddo - call ESMF_AttributeCopy(fcstGrid, actualWrtGrid , & - attcopy=ESMF_ATTCOPY_REFERENCE, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_AttributeCopy(fcstGrid, actualWrtGrid , & + attcopy=ESMF_ATTCOPY_REFERENCE, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - deallocate(fcstField) + deallocate(fcstField) - endif !if (fieldCount > 0) then + endif !if (fieldCount > 0) then - else ! anything but a FieldBundle in the state is unexpected here - call ESMF_LogSetError(ESMF_RC_ARG_BAD, & - msg="Only FieldBundles supported in fcstState.", line=__LINE__, file=__FILE__) - return - endif + else ! anything but a FieldBundle in the state is unexpected here + call ESMF_LogSetError(ESMF_RC_ARG_BAD, & + msg="Only FieldBundles supported in fcstState.", line=__LINE__, file=__FILE__) + return + endif - enddo !FBCount + enddo !FBCount - !loop over all items in the imp_state_write and count output FieldBundles - call get_outfile(FBCount, outfilename, FBlist_outfilename, noutfile) - wrt_int_state%FBCount = noutfile + !loop over all items in the imp_state_write and count output FieldBundles + call get_outfile(FBCount, outfilename, FBlist_outfilename, noutfile) + wrt_int_state%FBCount = noutfile - !create output field bundles - allocate(wrt_int_state%wrtFB(wrt_int_state%FBCount)) - ! if (lprnt) write(0,*)'wrt_initialize_p1: allocated ',wrt_int_state%FBCount, ' wrt_int_state%wrtFB' + !create output field bundles + allocate(wrt_int_state%wrtFB(wrt_int_state%FBCount)) + ! if (lprnt) write(0,*)'wrt_initialize_p1: allocated ',wrt_int_state%FBCount, ' wrt_int_state%wrtFB' - do i=1, wrt_int_state%FBCount + do i=1, wrt_int_state%FBCount - wrt_int_state%wrtFB(i) = ESMF_FieldBundleCreate(name=trim(FBlist_outfilename(i)), rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - ! if (lprnt) write(0,*)'wrt_initialize_p1: created wrtFB ',i, ' with name ', trim(FBlist_outfilename(i)) + wrt_int_state%wrtFB(i) = ESMF_FieldBundleCreate(name=trim(FBlist_outfilename(i)), rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + ! if (lprnt) write(0,*)'wrt_initialize_p1: created wrtFB ',i, ' with name ', trim(FBlist_outfilename(i)) - ! if (lprnt) write(0,*)'wrt_initialize_p1: loop over ', FBCount, ' forecast bundles' - do n=1, FBCount + ! if (lprnt) write(0,*)'wrt_initialize_p1: loop over ', FBCount, ' forecast bundles' + do n=1, FBCount - call ESMF_StateGet(imp_state_write, itemName="output_"//trim(fcstItemNameList(n)), & - fieldbundle=fcstFB, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_StateGet(imp_state_write, itemName="output_"//trim(fcstItemNameList(n)), & + fieldbundle=fcstFB, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - ! if (lprnt) write(0,*)'wrt_initialize_p1: got forecast bundle ', "output_"//trim(fcstItemNameList(n)) - ! if (lprnt) write(0,*)'wrt_initialize_p1: is ', trim(fcstItemNameList(n)), ' == ', trim(FBlist_outfilename(i)) + ! if (lprnt) write(0,*)'wrt_initialize_p1: got forecast bundle ', "output_"//trim(fcstItemNameList(n)) + ! if (lprnt) write(0,*)'wrt_initialize_p1: is ', trim(fcstItemNameList(n)), ' == ', trim(FBlist_outfilename(i)) - if (trim_regridmethod_suffix(fcstItemNameList(n)) == trim_regridmethod_suffix(FBlist_outfilename(i))) then + if (trim_regridmethod_suffix(fcstItemNameList(n)) == trim_regridmethod_suffix(FBlist_outfilename(i))) then - ! copy the fcstfield bundle Attributes to the output field bundle - ! if (lprnt) write(0,*)'wrt_initialize_p1: copy atts/fields from ', "output_"//trim(fcstItemNameList(n)), ' to ', trim(FBlist_outfilename(i)) - call ESMF_AttributeCopy(fcstFB, wrt_int_state%wrtFB(i), & - attcopy=ESMF_ATTCOPY_REFERENCE, rc=rc) + ! copy the fcstfield bundle Attributes to the output field bundle + ! if (lprnt) write(0,*)'wrt_initialize_p1: copy atts/fields from ', "output_"//trim(fcstItemNameList(n)), ' to ', trim(FBlist_outfilename(i)) + call ESMF_AttributeCopy(fcstFB, wrt_int_state%wrtFB(i), & + attcopy=ESMF_ATTCOPY_REFERENCE, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_AttributeGet(wrt_int_state%wrtFB(i), convention="NetCDF", purpose="FV3", & - name="grid_id", value=grid_id, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - ! if (lprnt) write(0,*)'wrt_initialize_p1: got grid_id for wrtFB ', i, ' grid_id =', grid_id, trim(output_grid(grid_id)) + call ESMF_AttributeGet(wrt_int_state%wrtFB(i), convention="NetCDF", purpose="FV3", & + name="grid_id", value=grid_id, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + ! if (lprnt) write(0,*)'wrt_initialize_p1: got grid_id for wrtFB ', i, ' grid_id =', grid_id, trim(output_grid(grid_id)) - call ESMF_FieldBundleGet(fcstFB, fieldCount=fieldCount, rc=rc) + call ESMF_FieldBundleGet(fcstFB, fieldCount=fieldCount, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - allocate(fcstField(fieldCount),fieldnamelist(fieldCount)) - call ESMF_FieldBundleGet(fcstFB, fieldList=fcstField, fieldNameList=fieldnamelist, & - itemorderflag=ESMF_ITEMORDER_ADDORDER, rc=rc) + allocate(fcstField(fieldCount),fieldnamelist(fieldCount)) + call ESMF_FieldBundleGet(fcstFB, fieldList=fcstField, fieldNameList=fieldnamelist, & + itemorderflag=ESMF_ITEMORDER_ADDORDER, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - do j=1, fieldCount + do j=1, fieldCount - call ESMF_AttributeGet(fcstField(j),convention="NetCDF", purpose="FV3", & - name='output_file',value=outfile_name, rc=rc) + call ESMF_AttributeGet(fcstField(j),convention="NetCDF", purpose="FV3", & + name='output_file',value=outfile_name, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - if( trim(outfile_name) == trim(FBlist_outfilename(i))) then - call ESMF_FieldBundleAdd(wrt_int_state%wrtFB(i), (/fcstField(j)/), rc=rc) + if( trim(outfile_name) == trim(FBlist_outfilename(i))) then + call ESMF_FieldBundleAdd(wrt_int_state%wrtFB(i), (/fcstField(j)/), rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - endif + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + endif - enddo ! fieldCount - deallocate(fcstField, fieldnamelist) + enddo ! fieldCount + deallocate(fcstField, fieldnamelist) - endif ! index(trim(fcstItemNameList(n)),trim(FBlist_outfilename(i))) + endif ! index(trim(fcstItemNameList(n)),trim(FBlist_outfilename(i))) - enddo ! FBCount + enddo ! FBCount - enddo ! end wrt_int_state%FBCount + enddo ! end wrt_int_state%FBCount ! ! add time Attribute ! look at the importState attributes and copy those starting with "time" - call ESMF_AttributeGet(imp_state_write, convention="NetCDF", purpose="FV3", & - attnestflag=ESMF_ATTNEST_OFF, count=attCount, rc=rc) + call ESMF_AttributeGet(imp_state_write, convention="NetCDF", purpose="FV3", & + attnestflag=ESMF_ATTNEST_OFF, count=attCount, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_LogWrite("Write component AttributeGet, attCount ", ESMF_LOGMSG_INFO, rc=rc) + call ESMF_LogWrite("Write component AttributeGet, attCount ", ESMF_LOGMSG_INFO, rc=rc) ! prepare the lists needed to transfer attributes - allocate(attNameList(attCount), attNameList2(attCount)) - allocate(typekindList(attCount)) + allocate(attNameList(attCount), attNameList2(attCount)) + allocate(typekindList(attCount)) ! loop over all the attributes on importState within AttPack - j = 1 - k = 1 - do i=1, attCount - call ESMF_AttributeGet(imp_state_write, convention="NetCDF", purpose="FV3", & - attnestflag=ESMF_ATTNEST_OFF, attributeIndex=i, name=attName, & - typekind=typekind, rc=rc) + j = 1 + k = 1 + do i=1, attCount + call ESMF_AttributeGet(imp_state_write, convention="NetCDF", purpose="FV3", & + attnestflag=ESMF_ATTNEST_OFF, attributeIndex=i, name=attName, & + typekind=typekind, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return ! test for name starting with "time" - if (index(trim(attName), "time") == 1) then + if (index(trim(attName), "time") == 1) then ! add this attribute to the list of transfers - attNameList(j) = attName - typekindList(j) = typekind - j = j + 1 - if (index(trim(attName), "time:") == 1) then - ! store names of attributes starting with "time:" for later use - attNameList2(k) = attName - k = k+1 - endif - endif - enddo + attNameList(j) = attName + typekindList(j) = typekind + j = j + 1 + if (index(trim(attName), "time:") == 1) then + ! store names of attributes starting with "time:" for later use + attNameList2(k) = attName + k = k+1 + endif + endif + enddo - do n = 1, ngrids - ! add the transfer attributes from importState to grid - call ESMF_AttributeAdd(wrtGrid(n), convention="NetCDF", purpose="FV3", & - attrList=attNameList(1:j-1), rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + do n = 1, ngrids + ! add the transfer attributes from importState to grid + call ESMF_AttributeAdd(wrtGrid(n), convention="NetCDF", purpose="FV3", & + attrList=attNameList(1:j-1), rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - ! add the transfer attributes from importState to special cubed_sphere grid - if (n == 1 .and. top_parent_is_global .and. history_file_on_native_grid) then - call ESMF_AttributeAdd(wrtGrid_cubed_sphere, convention="NetCDF", purpose="FV3", & - attrList=attNameList(1:j-1), rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - endif + ! add the transfer attributes from importState to special cubed_sphere grid + if (n == 1 .and. top_parent_is_global .and. history_file_on_native_grid) then + call ESMF_AttributeAdd(wrtGrid_cubed_sphere, convention="NetCDF", purpose="FV3", & + attrList=attNameList(1:j-1), rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + endif ! loop over the added attributes, access the value (only scalar allowed), ! and set them on the grid - do i=1, j-1 - if (typekindList(i) == ESMF_TYPEKIND_CHARACTER) then - call ESMF_AttributeGet(imp_state_write, & - convention="NetCDF", purpose="FV3", & - name=trim(attNameList(i)), value=valueS, rc=rc) + do i=1, j-1 + if (typekindList(i) == ESMF_TYPEKIND_CHARACTER) then + call ESMF_AttributeGet(imp_state_write, & + convention="NetCDF", purpose="FV3", & + name=trim(attNameList(i)), value=valueS, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return ! save calendar_type (as integer) for use in 'coupler.res' - if (index(trim(attNameList(i)),'time:calendar') > 0) then - select case( fms_mpp_uppercase(trim(valueS)) ) - case( 'JULIAN' ) - calendar_type = JULIAN - case( 'GREGORIAN' ) - calendar_type = GREGORIAN - case( 'NOLEAP' ) - calendar_type = NOLEAP - case( 'THIRTY_DAY' ) - calendar_type = THIRTY_DAY_MONTHS - case( 'NO_CALENDAR' ) - calendar_type = NO_CALENDAR - case default - call fms_mpp_error ( FATAL, 'fcst_initialize: calendar must be one of '// & - 'JULIAN|GREGORIAN|NOLEAP|THIRTY_DAY|NO_CALENDAR.' ) - end select - endif + if (index(trim(attNameList(i)),'time:calendar') > 0) then + select case( fms_mpp_uppercase(trim(valueS)) ) + case( 'JULIAN' ) + calendar_type = JULIAN + case( 'GREGORIAN' ) + calendar_type = GREGORIAN + case( 'NOLEAP' ) + calendar_type = NOLEAP + case( 'THIRTY_DAY' ) + calendar_type = THIRTY_DAY_MONTHS + case( 'NO_CALENDAR' ) + calendar_type = NO_CALENDAR + case default + call fms_mpp_error ( FATAL, 'fcst_initialize: calendar must be one of '// & + 'JULIAN|GREGORIAN|NOLEAP|THIRTY_DAY|NO_CALENDAR.' ) + end select + endif ! update the time:units when idate on write grid component is changed - if (index(trim(attNameList(i)),'time:units') > 0) then - if ( change_wrtidate ) then - idx = index(trim(valueS),' since ') - if(lprnt) print *,'in write grid comp, time:unit=',trim(valueS) - write(newdate,'(I4.4,a,I2.2,a,I2.2,a,I2.2,a,I2.2,a,I2.2)') idate(1),'-', & - idate(2),'-',idate(3),' ',idate(4),':',idate(5),':',idate(6) - valueS = valueS(1:idx+6)//newdate - if(lprnt) print *,'in write grid comp, new time:unit=',trim(valueS) - endif - endif - call ESMF_AttributeSet(wrtGrid(n), convention="NetCDF", purpose="FV3", & - name=trim(attNameList(i)), value=valueS, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - - if (n == 1 .and. top_parent_is_global .and. history_file_on_native_grid) then - call ESMF_AttributeSet(wrtGrid_cubed_sphere, convention="NetCDF", purpose="FV3", & - name=trim(attNameList(i)), value=valueS, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - endif + if (index(trim(attNameList(i)),'time:units') > 0) then + if ( change_wrtidate ) then + idx = index(trim(valueS),' since ') + if(lprnt) print *,'in write grid comp, time:unit=',trim(valueS) + write(newdate,'(I4.4,a,I2.2,a,I2.2,a,I2.2,a,I2.2,a,I2.2)') idate(1),'-', & + idate(2),'-',idate(3),' ',idate(4),':',idate(5),':',idate(6) + valueS = valueS(1:idx+6)//newdate + if(lprnt) print *,'in write grid comp, new time:unit=',trim(valueS) + endif + endif + call ESMF_AttributeSet(wrtGrid(n), convention="NetCDF", purpose="FV3", & + name=trim(attNameList(i)), value=valueS, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + if (n == 1 .and. top_parent_is_global .and. history_file_on_native_grid) then + call ESMF_AttributeSet(wrtGrid_cubed_sphere, convention="NetCDF", purpose="FV3", & + name=trim(attNameList(i)), value=valueS, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + endif - else if (typekindList(i) == ESMF_TYPEKIND_I4) then - call ESMF_AttributeGet(imp_state_write, & - convention="NetCDF", purpose="FV3", & - name=trim(attNameList(i)), value=valueI4, rc=rc) + else if (typekindList(i) == ESMF_TYPEKIND_I4) then + call ESMF_AttributeGet(imp_state_write, & + convention="NetCDF", purpose="FV3", & + name=trim(attNameList(i)), value=valueI4, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_AttributeSet(wrtGrid(n), convention="NetCDF", purpose="FV3", & - name=trim(attNameList(i)), value=valueI4, rc=rc) + call ESMF_AttributeSet(wrtGrid(n), convention="NetCDF", purpose="FV3", & + name=trim(attNameList(i)), value=valueI4, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - else if (typekindList(i) == ESMF_TYPEKIND_R4) then - call ESMF_AttributeGet(imp_state_write, & - convention="NetCDF", purpose="FV3", & - name=trim(attNameList(i)), value=valueR4, rc=rc) + else if (typekindList(i) == ESMF_TYPEKIND_R4) then + call ESMF_AttributeGet(imp_state_write, & + convention="NetCDF", purpose="FV3", & + name=trim(attNameList(i)), value=valueR4, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_AttributeSet(wrtGrid(n), convention="NetCDF", purpose="FV3", & - name=trim(attNameList(i)), value=valueR4, rc=rc) + call ESMF_AttributeSet(wrtGrid(n), convention="NetCDF", purpose="FV3", & + name=trim(attNameList(i)), value=valueR4, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - else if (typekindList(i) == ESMF_TYPEKIND_R8) then - call ESMF_AttributeGet(imp_state_write, & - convention="NetCDF", purpose="FV3", & - name=trim(attNameList(i)), value=valueR8, rc=rc) + else if (typekindList(i) == ESMF_TYPEKIND_R8) then + call ESMF_AttributeGet(imp_state_write, & + convention="NetCDF", purpose="FV3", & + name=trim(attNameList(i)), value=valueR8, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_AttributeSet(wrtGrid(n), convention="NetCDF", purpose="FV3", & - name=trim(attNameList(i)), value=valueR8, rc=rc) + call ESMF_AttributeSet(wrtGrid(n), convention="NetCDF", purpose="FV3", & + name=trim(attNameList(i)), value=valueR8, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - endif - enddo + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + endif + enddo ! Add special attribute that holds names of "time" related attributes ! for faster access during Run(). - call ESMF_AttributeAdd(wrtGrid(n), convention="NetCDF", purpose="FV3", & - attrList=(/"TimeAttributes"/), rc=rc) + call ESMF_AttributeAdd(wrtGrid(n), convention="NetCDF", purpose="FV3", & + attrList=(/"TimeAttributes"/), rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_AttributeSet(wrtGrid(n), convention="NetCDF", purpose="FV3", & - name="TimeAttributes", valueList=attNameList2(1:k-1), rc=rc) + call ESMF_AttributeSet(wrtGrid(n), convention="NetCDF", purpose="FV3", & + name="TimeAttributes", valueList=attNameList2(1:k-1), rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return ! !*** create temporary field bundle for axes information ! write the Grid coordinate arrays into the output files via temporary FB - gridFB = ESMF_FieldBundleCreate(rc=rc) + gridFB = ESMF_FieldBundleCreate(rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_GridGetCoord(wrtGrid(n), coordDim=1, & - staggerloc=ESMF_STAGGERLOC_CENTER, array=array, rc=rc) + call ESMF_GridGetCoord(wrtGrid(n), coordDim=1, & + staggerloc=ESMF_STAGGERLOC_CENTER, array=array, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - field = ESMF_FieldCreate(wrtGrid(n), array, name="grid_xt", rc=rc) + field = ESMF_FieldCreate(wrtGrid(n), array, name="grid_xt", rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return !add attribute info ! long name - call ESMF_AttributeAdd(field,convention="NetCDF",purpose="FV3", & - attrList=(/'long_name'/), rc=rc) + call ESMF_AttributeAdd(field,convention="NetCDF",purpose="FV3", & + attrList=(/'long_name'/), rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_AttributeSet(field,convention="NetCDF",purpose="FV3",name='long_name', & - value="T-cell longitude", rc=rc) + call ESMF_AttributeSet(field,convention="NetCDF",purpose="FV3",name='long_name', & + value="T-cell longitude", rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return ! units - call ESMF_AttributeAdd(field,convention="NetCDF",purpose="FV3", & - attrList=(/'units'/), rc=rc) + call ESMF_AttributeAdd(field,convention="NetCDF",purpose="FV3", & + attrList=(/'units'/), rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_AttributeSet(field,convention="NetCDF",purpose="FV3",name='units', & - value="degrees_E", rc=rc) + call ESMF_AttributeSet(field,convention="NetCDF",purpose="FV3",name='units', & + value="degrees_E", rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return ! cartesian_axis - call ESMF_AttributeAdd(field,convention="NetCDF",purpose="FV3", & - attrList=(/'cartesian_axis'/), rc=rc) + call ESMF_AttributeAdd(field,convention="NetCDF",purpose="FV3", & + attrList=(/'cartesian_axis'/), rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_AttributeSet(field,convention="NetCDF",purpose="FV3",name='cartesian_axis', & - value="X", rc=rc) + call ESMF_AttributeSet(field,convention="NetCDF",purpose="FV3",name='cartesian_axis', & + value="X", rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return ! add field to bundle - call ESMF_FieldBundleAdd(gridFB, (/field/), rc=rc) + call ESMF_FieldBundleAdd(gridFB, (/field/), rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return ! ! get 2nd dimension - call ESMF_GridGetCoord(wrtGrid(n), coordDim=2, & - staggerloc=ESMF_STAGGERLOC_CENTER, array=array, rc=rc) + call ESMF_GridGetCoord(wrtGrid(n), coordDim=2, & + staggerloc=ESMF_STAGGERLOC_CENTER, array=array, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - field = ESMF_FieldCreate(wrtGrid(n), array, name="grid_yt", rc=rc) + field = ESMF_FieldCreate(wrtGrid(n), array, name="grid_yt", rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return !add attribute info ! long name - call ESMF_AttributeAdd(field,convention="NetCDF",purpose="FV3", & - attrList=(/'long_name'/), rc=rc) + call ESMF_AttributeAdd(field,convention="NetCDF",purpose="FV3", & + attrList=(/'long_name'/), rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_AttributeSet(field,convention="NetCDF",purpose="FV3",name='long_name', & - value="T-cell latitude", rc=rc) + call ESMF_AttributeSet(field,convention="NetCDF",purpose="FV3",name='long_name', & + value="T-cell latitude", rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return ! units - call ESMF_AttributeAdd(field,convention="NetCDF",purpose="FV3", & - attrList=(/'units'/), rc=rc) + call ESMF_AttributeAdd(field,convention="NetCDF",purpose="FV3", & + attrList=(/'units'/), rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_AttributeSet(field,convention="NetCDF",purpose="FV3",name='units', & - value="degrees_N", rc=rc) + call ESMF_AttributeSet(field,convention="NetCDF",purpose="FV3",name='units', & + value="degrees_N", rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return ! cartesian_axis - call ESMF_AttributeAdd(field,convention="NetCDF",purpose="FV3", & - attrList=(/'cartesian_axis'/), rc=rc) + call ESMF_AttributeAdd(field,convention="NetCDF",purpose="FV3", & + attrList=(/'cartesian_axis'/), rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_AttributeSet(field,convention="NetCDF",purpose="FV3",name='cartesian_axis', & - value="Y", rc=rc) + call ESMF_AttributeSet(field,convention="NetCDF",purpose="FV3",name='cartesian_axis', & + value="Y", rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_FieldBundleAdd(gridFB, (/field/), rc=rc) + call ESMF_FieldBundleAdd(gridFB, (/field/), rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - end do ! n=1, ngrids + end do ! n=1, ngrids - deallocate(attNameList, attNameList2, typekindList) + deallocate(attNameList, attNameList2, typekindList) ! ! write_init_tim = MPI_Wtime() - btim0 ! !----------------------------------------------------------------------- ! - end subroutine wrt_initialize_p1 + end subroutine wrt_initialize_p1 ! !----------------------------------------------------------------------- !####################################################################### !----------------------------------------------------------------------- ! - subroutine wrt_initialize_p2(wrt_comp, imp_state_write, exp_state_write, clock, rc) + subroutine wrt_initialize_p2(wrt_comp, imp_state_write, exp_state_write, clock, rc) ! !----------------------------------------------------------------------- !*** INITIALIZE THE WRITE GRIDDED COMPONENT. !----------------------------------------------------------------------- ! - type(esmf_GridComp) :: wrt_comp - type(ESMF_State) :: imp_state_write, exp_state_write - type(esmf_Clock) :: clock - integer,intent(out) :: rc + type(esmf_GridComp) :: wrt_comp + type(ESMF_State) :: imp_state_write, exp_state_write + type(esmf_Clock) :: clock + integer,intent(out) :: rc ! !*** LOCAL VARIABLES - type(ESMF_Info) :: info - logical, allocatable :: is_moving(:) - type(ESMF_StateItem_Flag), allocatable :: itemTypeList(:) - character(len=ESMF_MAXSTR),allocatable :: itemNameList(:) - integer :: i, j, bundleCount, fieldCount - type(ESMF_FieldBundle) :: mirrorFB - type(ESMF_Field), allocatable :: fieldList(:) - type(ESMF_Grid) :: grid - integer :: sloc - type(ESMF_StaggerLoc) :: staggerloc - type(ESMF_DistGrid) :: acceptorDG, newAcceptorDG + type(ESMF_Info) :: info + logical, allocatable :: is_moving(:) + type(ESMF_StateItem_Flag), allocatable :: itemTypeList(:) + character(len=ESMF_MAXSTR),allocatable :: itemNameList(:) + integer :: i, j, bundleCount, fieldCount + type(ESMF_FieldBundle) :: mirrorFB + type(ESMF_Field), allocatable :: fieldList(:) + type(ESMF_Grid) :: grid + integer :: sloc + type(ESMF_StaggerLoc) :: staggerloc + type(ESMF_DistGrid) :: acceptorDG, newAcceptorDG ! ! !----------------------------------------------------------------------- !*********************************************************************** !----------------------------------------------------------------------- ! - rc = ESMF_SUCCESS + rc = ESMF_SUCCESS ! - call ESMF_InfoGetFromHost(imp_state_write, info=info, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_InfoGetAlloc(info, key="is_moving", values=is_moving, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - - call ESMF_StateGet(imp_state_write, itemCount=bundleCount, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - - allocate(itemNameList(bundleCount), itemTypeList(bundleCount)) + call ESMF_InfoGetFromHost(imp_state_write, info=info, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_InfoGetAlloc(info, key="is_moving", values=is_moving, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_StateGet(imp_state_write, itemNameList=itemNameList, & - itemTypeList=itemTypeList, & - !itemorderflag=ESMF_ITEMORDER_ADDORDER, & - rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_StateGet(imp_state_write, itemCount=bundleCount, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - do i=1, bundleCount + allocate(itemNameList(bundleCount), itemTypeList(bundleCount)) - if (itemTypeList(i) == ESMF_STATEITEM_FIELDBUNDLE) then + call ESMF_StateGet(imp_state_write, itemNameList=itemNameList, & + itemTypeList=itemTypeList, & + !itemorderflag=ESMF_ITEMORDER_ADDORDER, & + rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - if (index(trim(itemNameList(i)), "mirror_")==1) then - ! this is a 'mirror_' FieldBundle -> GridTransfer acceptor side - call ESMF_StateGet(imp_state_write, itemName=trim(itemNameList(i)), fieldbundle=mirrorFB, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - ! access the grid that is passed in from the provider side - call ESMF_FieldBundleGet(mirrorFB, grid=grid, fieldCount=fieldCount, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - ! access the acceptor DistGrid - call ESMF_GridGet(grid, distgrid=acceptorDG, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - ! rebalance the acceptor DistGrid across the local PETs - newAcceptorDG = ESMF_DistGridCreate(acceptorDG, balanceflag=.true., rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - ! create a new Grid on the rebalanced DistGrid - grid = ESMF_GridCreate(newAcceptorDG, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - ! point all of the acceptor fields to the new acceptor Grid - allocate(fieldList(fieldCount)) - call ESMF_FieldBundleGet(mirrorFB, fieldList=fieldList, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - do j=1, fieldCount - ! first access information stored on the field needed for completion - call ESMF_InfoGetFromHost(fieldList(j), info=info, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_InfoGet(info, key="staggerloc", value=sloc, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - staggerloc = sloc ! convert integer into StaggerLoc_Flag - call ESMF_FieldEmptySet(fieldList(j), grid=grid, staggerloc=staggerloc, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - enddo - ! clean-up - deallocate(fieldList) - endif + do i=1, bundleCount + + if (itemTypeList(i) == ESMF_STATEITEM_FIELDBUNDLE) then + + if (index(trim(itemNameList(i)), "mirror_")==1) then + ! this is a 'mirror_' FieldBundle -> GridTransfer acceptor side + call ESMF_StateGet(imp_state_write, itemName=trim(itemNameList(i)), fieldbundle=mirrorFB, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + ! access the grid that is passed in from the provider side + call ESMF_FieldBundleGet(mirrorFB, grid=grid, fieldCount=fieldCount, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + ! access the acceptor DistGrid + call ESMF_GridGet(grid, distgrid=acceptorDG, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + ! rebalance the acceptor DistGrid across the local PETs + newAcceptorDG = ESMF_DistGridCreate(acceptorDG, balanceflag=.true., rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + ! create a new Grid on the rebalanced DistGrid + grid = ESMF_GridCreate(newAcceptorDG, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + ! point all of the acceptor fields to the new acceptor Grid + allocate(fieldList(fieldCount)) + call ESMF_FieldBundleGet(mirrorFB, fieldList=fieldList, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + do j=1, fieldCount + ! first access information stored on the field needed for completion + call ESMF_InfoGetFromHost(fieldList(j), info=info, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_InfoGet(info, key="staggerloc", value=sloc, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + staggerloc = sloc ! convert integer into StaggerLoc_Flag + call ESMF_FieldEmptySet(fieldList(j), grid=grid, staggerloc=staggerloc, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + enddo + ! clean-up + deallocate(fieldList) + endif - else ! anything but a FieldBundle in the state is unexpected here - call ESMF_LogSetError(ESMF_RC_ARG_BAD, msg="Only FieldBundles supported in fcstState.", line=__LINE__, file=__FILE__) - return - endif + else ! anything but a FieldBundle in the state is unexpected here + call ESMF_LogSetError(ESMF_RC_ARG_BAD, msg="Only FieldBundles supported in fcstState.", line=__LINE__, file=__FILE__) + return + endif - enddo + enddo !----------------------------------------------------------------------- ! - end subroutine wrt_initialize_p2 + end subroutine wrt_initialize_p2 ! !----------------------------------------------------------------------- !####################################################################### !----------------------------------------------------------------------- ! - subroutine wrt_initialize_p3(wrt_comp, imp_state_write, exp_state_write, clock, rc) + subroutine wrt_initialize_p3(wrt_comp, imp_state_write, exp_state_write, clock, rc) ! !----------------------------------------------------------------------- !*** INITIALIZE THE WRITE GRIDDED COMPONENT. !----------------------------------------------------------------------- ! - type(esmf_GridComp) :: wrt_comp - type(ESMF_State) :: imp_state_write, exp_state_write - type(esmf_Clock) :: clock - integer,intent(out) :: rc + type(esmf_GridComp) :: wrt_comp + type(ESMF_State) :: imp_state_write, exp_state_write + type(esmf_Clock) :: clock + integer,intent(out) :: rc !*** LOCAL VARIABLES - type(ESMF_Info) :: info - logical, allocatable :: is_moving(:) - type(ESMF_StateItem_Flag), allocatable :: itemTypeList(:) - character(len=ESMF_MAXSTR),allocatable :: itemNameList(:) - integer :: i, j, bundleCount, fieldCount - type(ESMF_FieldBundle) :: mirrorFB - type(ESMF_Field), allocatable :: fieldList(:) - type(ESMF_TypeKind_Flag) :: typekind - integer :: tk - integer, allocatable :: gridToFieldMap(:) - integer, allocatable :: ungriddedLBound(:) - integer, allocatable :: ungriddedUBound(:) + type(ESMF_Info) :: info + logical, allocatable :: is_moving(:) + type(ESMF_StateItem_Flag), allocatable :: itemTypeList(:) + character(len=ESMF_MAXSTR),allocatable :: itemNameList(:) + integer :: i, j, bundleCount, fieldCount + type(ESMF_FieldBundle) :: mirrorFB + type(ESMF_Field), allocatable :: fieldList(:) + type(ESMF_TypeKind_Flag) :: typekind + integer :: tk + integer, allocatable :: gridToFieldMap(:) + integer, allocatable :: ungriddedLBound(:) + integer, allocatable :: ungriddedUBound(:) ! ! @@ -1578,501 +1587,499 @@ subroutine wrt_initialize_p3(wrt_comp, imp_state_write, exp_state_write, clock, !*********************************************************************** !----------------------------------------------------------------------- ! - rc = ESMF_SUCCESS + rc = ESMF_SUCCESS ! - call ESMF_InfoGetFromHost(imp_state_write, info=info, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_InfoGetAlloc(info, key="is_moving", values=is_moving, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - - call ESMF_StateGet(imp_state_write, itemCount=bundleCount, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - - allocate(itemNameList(bundleCount), itemTypeList(bundleCount)) + call ESMF_InfoGetFromHost(imp_state_write, info=info, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_InfoGetAlloc(info, key="is_moving", values=is_moving, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_StateGet(imp_state_write, itemNameList=itemNameList, & - itemTypeList=itemTypeList, & - !itemorderflag=ESMF_ITEMORDER_ADDORDER, & - rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_StateGet(imp_state_write, itemCount=bundleCount, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - do i=1, bundleCount + allocate(itemNameList(bundleCount), itemTypeList(bundleCount)) - if (itemTypeList(i) == ESMF_STATEITEM_FIELDBUNDLE) then + call ESMF_StateGet(imp_state_write, itemNameList=itemNameList, & + itemTypeList=itemTypeList, & + !itemorderflag=ESMF_ITEMORDER_ADDORDER, & + rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - if (index(trim(itemNameList(i)), "mirror_")==1) then - ! this is a 'mirror_' FieldBundle -> GridTransfer acceptor side - call ESMF_StateGet(imp_state_write, itemName=trim(itemNameList(i)), fieldbundle=mirrorFB, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - ! finish creating all the mirror Fields - call ESMF_FieldBundleGet(mirrorFB, fieldCount=fieldCount, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - allocate(fieldList(fieldCount)) - call ESMF_FieldBundleGet(mirrorFB, fieldList=fieldList, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - do j=1, fieldCount - ! first access information stored on the field needed for completion - call ESMF_InfoGetFromHost(fieldList(j), info=info, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_InfoGet(info, key="typekind", value=tk, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - typekind = tk ! convert integer into TypeKind_Flag - call ESMF_InfoGetAlloc(info, key="gridToFieldMap", values=gridToFieldMap, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_InfoGetAlloc(info, key="ungriddedLBound", values=ungriddedLBound, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_InfoGetAlloc(info, key="ungriddedUBound", values=ungriddedUBound, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - ! now complete the field creation - call ESMF_FieldEmptyComplete(fieldList(j), typekind=typekind, gridToFieldMap=gridToFieldMap, & - ungriddedLBound=ungriddedLBound, ungriddedUBound=ungriddedUBound, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - enddo - ! clean-up - deallocate(fieldList) - endif + do i=1, bundleCount + + if (itemTypeList(i) == ESMF_STATEITEM_FIELDBUNDLE) then + + if (index(trim(itemNameList(i)), "mirror_")==1) then + ! this is a 'mirror_' FieldBundle -> GridTransfer acceptor side + call ESMF_StateGet(imp_state_write, itemName=trim(itemNameList(i)), fieldbundle=mirrorFB, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + ! finish creating all the mirror Fields + call ESMF_FieldBundleGet(mirrorFB, fieldCount=fieldCount, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + allocate(fieldList(fieldCount)) + call ESMF_FieldBundleGet(mirrorFB, fieldList=fieldList, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + do j=1, fieldCount + ! first access information stored on the field needed for completion + call ESMF_InfoGetFromHost(fieldList(j), info=info, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_InfoGet(info, key="typekind", value=tk, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + typekind = tk ! convert integer into TypeKind_Flag + call ESMF_InfoGetAlloc(info, key="gridToFieldMap", values=gridToFieldMap, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_InfoGetAlloc(info, key="ungriddedLBound", values=ungriddedLBound, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_InfoGetAlloc(info, key="ungriddedUBound", values=ungriddedUBound, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + ! now complete the field creation + call ESMF_FieldEmptyComplete(fieldList(j), typekind=typekind, gridToFieldMap=gridToFieldMap, & + ungriddedLBound=ungriddedLBound, ungriddedUBound=ungriddedUBound, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + enddo + ! clean-up + deallocate(fieldList) + endif - else ! anything but a FieldBundle in the state is unexpected here - call ESMF_LogSetError(ESMF_RC_ARG_BAD, msg="Only FieldBundles supported in fcstState.", line=__LINE__, file=__FILE__) - return - endif + else ! anything but a FieldBundle in the state is unexpected here + call ESMF_LogSetError(ESMF_RC_ARG_BAD, msg="Only FieldBundles supported in fcstState.", line=__LINE__, file=__FILE__) + return + endif - enddo + enddo !----------------------------------------------------------------------- ! - end subroutine wrt_initialize_p3 + end subroutine wrt_initialize_p3 ! !----------------------------------------------------------------------- !####################################################################### !----------------------------------------------------------------------- ! - subroutine wrt_run(wrt_comp, imp_state_write, exp_state_write,clock,rc) + subroutine wrt_run(wrt_comp, imp_state_write, exp_state_write,clock,rc) ! !----------------------------------------------------------------------- !*** the run step for the write gridded component. !----------------------------------------------------------------------- ! - type(ESMF_GridComp) :: wrt_comp - type(ESMF_State) :: imp_state_write, exp_state_write - type(ESMF_Clock) :: clock - integer,intent(out) :: rc + type(ESMF_GridComp) :: wrt_comp + type(ESMF_State) :: imp_state_write, exp_state_write + type(ESMF_Clock) :: clock + integer,intent(out) :: rc ! !----------------------------------------------------------------------- !*** local variables ! - type(ESMF_VM) :: VM - type(ESMF_FieldBundle) :: file_bundle, mirror_bundle - type(ESMF_StateItem_Flag) :: itemType - type(ESMF_Time) :: currtime - type(ESMF_TimeInterval) :: io_currtimediff - type(ESMF_Grid) :: fbgrid, wrtGrid - type(ESMF_State),save :: stateGridFB - type(optimizeT), save :: optimize(40) ! FIXME - type(ESMF_GridComp), save, allocatable :: compsGridFB(:) - type(ESMF_RouteHandle) :: rh - type(ESMF_RegridMethod_Flag) :: regridmethod - integer :: srcTermProcessing -! - type(write_wrap) :: wrap - type(wrt_internal_state),pointer :: wrt_int_state -! - integer :: i,j,n,m, mype,nolog, grid_id, localPet -! - integer :: nf_hours,nf_seconds,nf_minutes - integer :: fcst_seconds - real(ESMF_KIND_R8) :: nfhour -! - integer :: nbdl, cdate(6), ndig, nnnn - integer :: step=1 - integer :: out_phase -! - logical :: opened - logical :: lmask_fields -! - character(esmf_maxstr) :: filename,compname,wrtFBName,traceString - character(40) :: cfhour, cform - character(20) :: time_iso - character(15) :: time_restart - character(15) :: tile_id -! - type(ESMF_Grid) :: grid - type(ESMF_Info) :: info - real(ESMF_KIND_R8), allocatable :: values(:) - character(160) :: msgString - type(ESMF_Field), allocatable :: fieldList(:) - type(ESMF_Array) :: coordArray(2) - type(ESMF_DistGrid) :: coordDG - type(ESMF_DELayout) :: coordDL - integer :: fieldCount, deCount, rootPet - integer :: minIndexPTile(2,1), maxIndexPTile(2,1), centerIndex(2) - integer, allocatable :: minIndexPDe(:,:), maxIndexPDe(:,:), petMap(:) - real(ESMF_KIND_R8), pointer :: farrayPtr(:,:) - real(ESMF_KIND_R8) :: centerCoord(2) - - integer :: ii, jj - real(ESMF_KIND_R8), pointer :: lonPtr(:,:), latPtr(:,:) - real(ESMF_KIND_R8) :: rot_lon, rot_lat - real(ESMF_KIND_R8) :: geo_lon, geo_lat - real(ESMF_KIND_R8), parameter :: rtod=180.0/pi - - real(kind=8) :: MPI_Wtime - real(kind=8) :: tbeg - real(kind=8) :: wbeg,wend - - logical :: use_parallel_netcdf - real, allocatable :: output_fh(:) - logical :: is_restart_bundle, restart_written - logical :: lupp_history, lrestart - integer :: tileCount - type(ESMF_Info) :: fcstInfo, wrtInfo - character(len=ESMF_MAXSTR) :: output_grid_name + type(ESMF_VM) :: VM + type(ESMF_FieldBundle) :: file_bundle, mirror_bundle + type(ESMF_StateItem_Flag) :: itemType + type(ESMF_Time) :: currtime + type(ESMF_TimeInterval) :: io_currtimediff + type(ESMF_Grid) :: fbgrid, wrtGrid + type(ESMF_State),save :: stateGridFB + type(optimizeT), save :: optimize(40) ! FIXME + type(ESMF_GridComp), save, allocatable :: compsGridFB(:) + type(ESMF_RouteHandle) :: rh + type(ESMF_RegridMethod_Flag) :: regridmethod + integer :: srcTermProcessing +! + type(write_wrap) :: wrap + type(wrt_internal_state),pointer :: wrt_int_state +! + integer :: i,j,n,m, mype,nolog, grid_id, localPet +! + integer :: nf_hours,nf_seconds,nf_minutes + integer :: fcst_seconds + real(ESMF_KIND_R8) :: nfhour +! + integer :: nbdl, cdate(6), ndig, nnnn + integer :: step=1 + integer :: out_phase +! + logical :: opened +! + character(esmf_maxstr) :: filename,compname,wrtFBName,traceString + character(40) :: cfhour, cform + character(20) :: time_iso + character(15) :: time_restart + character(15) :: tile_id +! + type(ESMF_Grid) :: grid + type(ESMF_Info) :: info + real(ESMF_KIND_R8), allocatable :: values(:) + character(160) :: msgString + type(ESMF_Field), allocatable :: fieldList(:) + type(ESMF_Array) :: coordArray(2) + type(ESMF_DistGrid) :: coordDG + type(ESMF_DELayout) :: coordDL + integer :: fieldCount, deCount, rootPet + integer :: minIndexPTile(2,1), maxIndexPTile(2,1), centerIndex(2) + integer, allocatable :: minIndexPDe(:,:), maxIndexPDe(:,:), petMap(:) + real(ESMF_KIND_R8), pointer :: farrayPtr(:,:) + real(ESMF_KIND_R8) :: centerCoord(2) + + integer :: ii, jj + real(ESMF_KIND_R8), pointer :: lonPtr(:,:), latPtr(:,:) + real(ESMF_KIND_R8) :: rot_lon, rot_lat + real(ESMF_KIND_R8) :: geo_lon, geo_lat + real(ESMF_KIND_R8), parameter :: rtod=180.0/pi + + real(kind=8) :: tbeg + real(kind=8) :: wbeg,wend + + logical :: use_parallel_netcdf + real, allocatable :: output_fh(:) + logical :: is_restart_bundle, restart_written + logical :: lupp_history, lrestart + integer :: tileCount + type(ESMF_Info) :: fcstInfo, wrtInfo + character(len=ESMF_MAXSTR) :: output_grid_name + + type(ESMF_Grid) :: src_grid, dst_grid + type(ESMF_Field) :: dst_field_mask ! !----------------------------------------------------------------------- !*********************************************************************** !----------------------------------------------------------------------- ! - tbeg = MPI_Wtime() - rc = esmf_success + tbeg = MPI_Wtime() + rc = esmf_success ! !----------------------------------------------------------------------- !*** get the current write grid comp name, and internal state ! - call ESMF_GridCompGet(wrt_comp, name=compname, localPet=localPet, rc=rc) + call ESMF_GridCompGet(wrt_comp, name=compname, localPet=localPet, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return ! Provide log message indicating which wrtComp is active - call ESMF_LogWrite("Write component activated: "//trim(compname), & - ESMF_LOGMSG_INFO, rc=rc) + call ESMF_LogWrite("Write component activated: "//trim(compname), & + ESMF_LOGMSG_INFO, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return ! access the internal state - call ESMF_GridCompGetInternalState(wrt_Comp, wrap, rc) + call ESMF_GridCompGetInternalState(wrt_Comp, wrap, rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - wrt_int_state => wrap%write_int_state + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + wrt_int_state => wrap%write_int_state - call ESMF_VMGetCurrent(VM,rc=RC) + call ESMF_VMGetCurrent(VM,rc=RC) - mype = wrt_int_state%mype + mype = wrt_int_state%mype ! print *,'in wrt run, mype=',mype,'lead_write_task=',lead_write_task - call ESMF_InfoGetFromHost(imp_state_write, info=info, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_InfoGetAlloc(info, key="output_fh", values=output_fh, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_InfoGetFromHost(imp_state_write, info=info, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_InfoGetAlloc(info, key="output_fh", values=output_fh, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return ! !----------------------------------------------------------------------- !*** get current time and elapsed forecast time - call ESMF_ClockGet(clock=CLOCK, currTime=currTime, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_ClockGet(clock=CLOCK, currTime=currTime, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_TimeGet(time=currTime,yy=cdate(1),mm=cdate(2),dd=cdate(3), & - h=cdate(4), m=cdate(5), s=cdate(6),rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_TimeGet(time=currTime,yy=cdate(1),mm=cdate(2),dd=cdate(3), & + h=cdate(4), m=cdate(5), s=cdate(6),rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - wrt_int_state%fdate(7) = 1 - wrt_int_state%fdate(1:6) = cdate(1:6) - write(time_iso,'(I4,"-",I2.2,"-",I2.2,"T",I2.2,":",I2.2,":",I2.2,"Z")') cdate(1:6) + wrt_int_state%fdate(7) = 1 + wrt_int_state%fdate(1:6) = cdate(1:6) + write(time_iso,'(I4,"-",I2.2,"-",I2.2,"T",I2.2,":",I2.2,":",I2.2,"Z")') cdate(1:6) - io_currtimediff = currtime - wrt_int_state%IO_BASETIME + io_currtimediff = currtime - wrt_int_state%IO_BASETIME - call ESMF_TimeIntervalGet(timeinterval=io_currtimediff & - ,h_r8=nfhour,h=nf_hours,m=nf_minutes,s=nf_seconds,rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_TimeIntervalGet(timeinterval=io_currtimediff & + ,h_r8=nfhour,h=nf_hours,m=nf_minutes,s=nf_seconds,rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - if (nf_hours < 0) return - ! - !*** set up output time on write grid comp: - ! - call ESMF_TimeIntervalGet(timeinterval=io_currtimediff, s=fcst_seconds, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - - ! fcst_seconds is number of seconds in io_currtimediff, which is time interval between currenttime and io_basetime. - ! io_basetime has been adjusted by iau_offset in initialize phase. - ! Since output_fh and frestart and NOT adjusted by iau_offset, in order to compare - ! them with fcst_seconds, we must also adjust fcst_seconds by iau_offset - if (iau_offset > 0) then - fcst_seconds = fcst_seconds + iau_offset*3600 - endif - ! - !--- set up logic variable to run upp/history and restart - ! - lupp_history = .false. - lrestart = .false. - if ( ANY(nint(output_fh(:)*3600) == fcst_seconds) ) lupp_history = .true. - if ( ANY(frestart(:) == fcst_seconds) ) lrestart = .true. - if ( .not. (lupp_history .or. lrestart ) ) return - ! - !--- set up current forecast hours - ! - if (lflname_fulltime) then - ndig = max(log10(nf_hours+0.5)+1., 3.) - write(cform, '("(I",I1,".",I1,",A1,I2.2,A1,I2.2)")') ndig, ndig - write(cfhour, cform) nf_hours,'-',nf_minutes,'-',nf_seconds - else - ndig = max(log10(nf_hours+0.5)+1., 3.) - write(cform, '("(I",I1,".",I1,")")') ndig, ndig - write(cfhour, cform) nf_hours - endif + if (nf_hours < 0) return + ! + !*** set up output time on write grid comp: + ! + call ESMF_TimeIntervalGet(timeinterval=io_currtimediff, s=fcst_seconds, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + ! fcst_seconds is number of seconds in io_currtimediff, which is time interval between currenttime and io_basetime. + ! io_basetime has been adjusted by iau_offset in initialize phase. + ! Since output_fh and frestart and NOT adjusted by iau_offset, in order to compare + ! them with fcst_seconds, we must also adjust fcst_seconds by iau_offset + if (iau_offset > 0) then + fcst_seconds = fcst_seconds + iau_offset*3600 + endif + ! + !--- set up logic variable to run upp/history and restart + ! + lupp_history = .false. + lrestart = .false. + if ( ANY(nint(output_fh(:)*3600) == fcst_seconds) ) lupp_history = .true. + if ( ANY(frestart(:) == fcst_seconds) ) lrestart = .true. + if ( .not. (lupp_history .or. lrestart ) ) return + ! + !--- set up current forecast hours + ! + if (lflname_fulltime) then + ndig = max(log10(nf_hours+0.5)+1., 3.) + write(cform, '("(I",I1,".",I1,",A1,I2.2,A1,I2.2)")') ndig, ndig + write(cfhour, cform) nf_hours,'-',nf_minutes,'-',nf_seconds + else + ndig = max(log10(nf_hours+0.5)+1., 3.) + write(cform, '("(I",I1,".",I1,")")') ndig, ndig + write(cfhour, cform) nf_hours + endif ! - if(lprnt) print *,'in wrt run, cdate=',cdate(1:4),'fcst_seconds=',fcst_seconds/3600.,'nfhour=',nfhour,& - 'lupp_history=', lupp_history, 'lrestart=',lrestart,'write grid comp not return,cfhour=',trim(cfhour) + if(lprnt) print *,'in wrt run, cdate=',cdate(1:4),'fcst_seconds=',fcst_seconds/3600.,'nfhour=',nfhour,& + 'lupp_history=', lupp_history, 'lrestart=',lrestart,'write grid comp not return,cfhour=',trim(cfhour) ! ! !----------------------------------------------------------------------- !*** loop on the "output_" FieldBundles, i.e. files that need to write out !----------------------------------------------------------------------- - do i=1, FBCount - call ESMF_StateGet(imp_state_write, itemName="output_"//trim(fcstItemNameList(i)), & - fieldbundle=file_bundle, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - - do m=1, wrt_int_state%FBCount - if (trim_regridmethod_suffix(fcstItemNameList(i)) == trim_regridmethod_suffix(FBlist_outfilename(m))) then - - call ESMF_InfoGetFromHost(file_bundle, info=fcstInfo, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - - call ESMF_InfoGetFromHost(wrt_int_state%wrtFB(m), info=wrtInfo, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - - call ESMF_InfoUpdate(lhs=wrtInfo, rhs=fcstInfo, recursive=.true., overwrite=.true., rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - ! if (lprnt) call print_att_list(wrt_int_state%wrtFB(m), rc) - - end if - end do - - ! see whether a "mirror_" FieldBundle exists, i.e. dealing with moving domain that needs updated Regrid() here. - call ESMF_StateGet(imp_state_write, itemName="mirror_"//trim(fcstItemNameList(i)), & - itemType=itemType, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - - if (itemType == ESMF_STATEITEM_FIELDBUNDLE) then - ! Regrid() for a moving domain - call ESMF_LogWrite("Regrid() for moving domain: mirror_"//trim(fcstItemNameList(i)), ESMF_LOGMSG_INFO, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_StateGet(imp_state_write, itemName="mirror_"//trim(fcstItemNameList(i)), & - fieldbundle=mirror_bundle, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - - ! if (fcstItemNameList(i)(1:8) == "restart_" .or. fcstItemNameList(i)(1:18) == 'cubed_sphere_grid_') then - if (fcstItemNameList(i)(1:8) == "restart_") then - ! restart output forecast bundles, use Redist instead of Regrid - - call ESMF_FieldBundleRedistStore(mirror_bundle, file_bundle, & - routehandle=rh, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - - else ! not restart bundle - - ! Find the centerCoord of the moving domain - call ESMF_FieldBundleGet(mirror_bundle, fieldCount=fieldCount, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - allocate(fieldList(fieldCount)) - call ESMF_FieldBundleGet(mirror_bundle, fieldList=fieldList, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_FieldGet(fieldList(1), grid=grid, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - deallocate(fieldList) - - call ESMF_GridGetCoord(grid, coordDim=1, array=coordArray(1), rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_GridGetCoord(grid, coordDim=2, array=coordArray(2), rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_ArrayGet(coordArray(1), distgrid=coordDG, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_DistGridGet(coordDG, deCount=deCount, minIndexPTile=minIndexPTile, maxIndexPTile=maxIndexPTile, & - delayout=coordDL, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - allocate(petMap(deCount),minIndexPDe(2,deCount), maxIndexPDe(2,deCount)) - call ESMF_DELayoutGet(coordDL, petMap=petMap, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_DistGridGet(coordDG, minIndexPDe=minIndexPDe, maxIndexPDe=maxIndexPDe, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - - centerIndex(1) = (maxIndexPTile(1,1)-minIndexPTile(1,1)+1)/2 - centerIndex(2) = (maxIndexPTile(2,1)-minIndexPTile(2,1)+1)/2 - - ! write(msgString,*) "Determined centerIndex: ", centerIndex - ! call ESMF_LogWrite(trim(msgString), ESMF_LOGMSG_DEBUG, rc=rc) - ! if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - - do n=1, deCount - if (minIndexPDe(1,n)<=centerIndex(1) .and. centerIndex(1)<=maxIndexPDe(1,n) .and. & - minIndexPDe(2,n)<=centerIndex(2) .and. centerIndex(2)<=maxIndexPDe(2,n)) then - ! found the DE that holds the center coordinate - rootPet = petMap(n) - if (localPet == rootPet) then - ! center DE is on local PET -> fill centerCoord locally - call ESMF_ArrayGet(coordArray(1), farrayPtr=farrayPtr, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - centerCoord(1) = farrayPtr(centerIndex(1)-minIndexPDe(1,n)+1,centerIndex(2)-minIndexPDe(2,n)+1) - call ESMF_ArrayGet(coordArray(2), farrayPtr=farrayPtr, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - centerCoord(2) = farrayPtr(centerIndex(1)-minIndexPDe(1,n)+1,centerIndex(2)-minIndexPDe(2,n)+1) - ! write(msgString,*) "Found centerCoord: ", centerCoord - ! call ESMF_LogWrite(trim(msgString), ESMF_LOGMSG_DEBUG, rc=rc) - ! if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - endif - exit - endif - enddo - - deallocate(petMap,minIndexPDe,maxIndexPDe) - - call ESMF_VMBroadcast(vm, centerCoord, count=2, rootPet=rootPet, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - - write(msgString,*) "All PETs know centerCoord in radians: ", centerCoord - call ESMF_LogWrite(trim(msgString), ESMF_LOGMSG_DEBUG, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - - ! determine regridmethod - if (index(fcstItemNameList(i),"_bilinear") >0 ) then - traceString = "-bilinear" - regridmethod = ESMF_REGRIDMETHOD_BILINEAR - else if (index(fcstItemNameList(i),"_patch") >0) then - traceString = "-patch" - regridmethod = ESMF_REGRIDMETHOD_PATCH - else if (index(fcstItemNameList(i),"_nearest_stod") >0) then - traceString = "-nearest_stod" - regridmethod = ESMF_REGRIDMETHOD_NEAREST_STOD - else if (index(fcstItemNameList(i),"_nearest_dtos") >0) then - traceString = "-nearest_dtos" - regridmethod = ESMF_REGRIDMETHOD_NEAREST_DTOS - else if (index(fcstItemNameList(i),"_conserve") >0) then - traceString = "-conserve" - regridmethod = ESMF_REGRIDMETHOD_CONSERVE - else - call ESMF_LogSetError(ESMF_RC_ARG_BAD, & - msg="Unable to determine regrid method.", & - line=__LINE__, file=__FILE__, rcToReturn=rc) - return - endif - srcTermProcessing = 1 ! have this fixed for bit-for-bit reproducibility - ! RegridStore() - - ! update output grid coordinates based of fcstgrid center lat/lon - call ESMF_FieldBundleGet(file_bundle, grid=grid, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_GridGetCoord(grid, coordDim=1, farrayPtr=lonPtr, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_GridGetCoord(grid, coordDim=2, farrayPtr=latPtr, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_AttributeGet(mirror_bundle, convention="NetCDF", purpose="FV3", & - name="grid_id", value=grid_id, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - - if (trim(output_grid(grid_id)) == 'regional_latlon_moving' .or. & - trim(output_grid(grid_id)) == 'rotated_latlon_moving') then - n = grid_id - cen_lon(n) = centerCoord(1)*rtod - cen_lat(n) = centerCoord(2)*rtod - if (cen_lon(n) > 180.0) cen_lon(n) = cen_lon(n) - 360.0 - cen_lon(n) = NINT(cen_lon(n)*1000.0)/1000.0 - cen_lat(n) = NINT(cen_lat(n)*1000.0)/1000.0 - endif - - if (trim(output_grid(grid_id)) == 'regional_latlon_moving') then - lon1(n) = cen_lon(n) - 0.5 * (imo(n)-1) * dlon(n) - lat1(n) = cen_lat(n) - 0.5 * (jmo(n)-1) * dlat(n) - lon2(n) = cen_lon(n) + 0.5 * (imo(n)-1) * dlon(n) - lat2(n) = cen_lat(n) + 0.5 * (jmo(n)-1) * dlat(n) - do jj=lbound(lonPtr,2),ubound(lonPtr,2) - do ii=lbound(lonPtr,1),ubound(lonPtr,1) - lonPtr(ii,jj) = lon1(n) + (lon2(n)-lon1(n))/(imo(n)-1) * (ii-1) - latPtr(ii,jj) = lat1(n) + (lat2(n)-lat1(n))/(jmo(n)-1) * (jj-1) - wrt_int_state%out_grid_info(n)%latPtr(ii,jj) = latPtr(ii,jj) - wrt_int_state%out_grid_info(n)%lonPtr(ii,jj) = lonPtr(ii,jj) - enddo - enddo - else if (trim(output_grid(grid_id)) == 'rotated_latlon_moving') then - lon1(n) = - 0.5 * (imo(n)-1) * dlon(n) - lat1(n) = - 0.5 * (jmo(n)-1) * dlat(n) - lon2(n) = 0.5 * (imo(n)-1) * dlon(n) - lat2(n) = 0.5 * (jmo(n)-1) * dlat(n) - do jj=lbound(lonPtr,2),ubound(lonPtr,2) - do ii=lbound(lonPtr,1),ubound(lonPtr,1) - rot_lon = lon1(n) + (lon2(n)-lon1(n))/(imo(n)-1) * (ii-1) - rot_lat = lat1(n) + (lat2(n)-lat1(n))/(jmo(n)-1) * (jj-1) - call rtll(rot_lon, rot_lat, geo_lon, geo_lat, dble(cen_lon(n)), dble(cen_lat(n))) - if (geo_lon < 0.0) geo_lon = geo_lon + 360.0 - lonPtr(ii,jj) = geo_lon - latPtr(ii,jj) = geo_lat - wrt_int_state%out_grid_info(n)%latPtr(ii,jj) = latPtr(ii,jj) - wrt_int_state%out_grid_info(n)%lonPtr(ii,jj) = lonPtr(ii,jj) - enddo - enddo - endif + do i=1, FBCount + call ESMF_StateGet(imp_state_write, itemName="output_"//trim(fcstItemNameList(i)), & + fieldbundle=file_bundle, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + do m=1, wrt_int_state%FBCount + if (trim_regridmethod_suffix(fcstItemNameList(i)) == trim_regridmethod_suffix(FBlist_outfilename(m))) then + + call ESMF_InfoGetFromHost(file_bundle, info=fcstInfo, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + call ESMF_InfoGetFromHost(wrt_int_state%wrtFB(m), info=wrtInfo, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + call ESMF_InfoUpdate(lhs=wrtInfo, rhs=fcstInfo, recursive=.true., overwrite=.true., rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + ! if (lprnt) call print_att_list(wrt_int_state%wrtFB(m), rc) + + end if + end do + + ! see whether a "mirror_" FieldBundle exists, i.e. dealing with moving domain that needs updated Regrid() here. + call ESMF_StateGet(imp_state_write, itemName="mirror_"//trim(fcstItemNameList(i)), & + itemType=itemType, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + if (itemType == ESMF_STATEITEM_FIELDBUNDLE) then + ! Regrid() for a moving domain + call ESMF_LogWrite("Regrid() for moving domain: mirror_"//trim(fcstItemNameList(i)), ESMF_LOGMSG_INFO, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_StateGet(imp_state_write, itemName="mirror_"//trim(fcstItemNameList(i)), & + fieldbundle=mirror_bundle, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + ! if (fcstItemNameList(i)(1:8) == "restart_" .or. fcstItemNameList(i)(1:18) == 'cubed_sphere_grid_') then + if (fcstItemNameList(i)(1:8) == "restart_") then + ! restart output forecast bundles, use Redist instead of Regrid + + call ESMF_FieldBundleRedistStore(mirror_bundle, file_bundle, & + routehandle=rh, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + else ! not restart bundle + + ! Find the centerCoord of the moving domain + call ESMF_FieldBundleGet(mirror_bundle, fieldCount=fieldCount, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + allocate(fieldList(fieldCount)) + call ESMF_FieldBundleGet(mirror_bundle, fieldList=fieldList, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_FieldGet(fieldList(1), grid=grid, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + deallocate(fieldList) + + src_grid = grid + + call ESMF_GridGetCoord(grid, coordDim=1, array=coordArray(1), rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_GridGetCoord(grid, coordDim=2, array=coordArray(2), rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_ArrayGet(coordArray(1), distgrid=coordDG, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_DistGridGet(coordDG, deCount=deCount, minIndexPTile=minIndexPTile, maxIndexPTile=maxIndexPTile, & + delayout=coordDL, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + allocate(petMap(deCount),minIndexPDe(2,deCount), maxIndexPDe(2,deCount)) + call ESMF_DELayoutGet(coordDL, petMap=petMap, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_DistGridGet(coordDG, minIndexPDe=minIndexPDe, maxIndexPDe=maxIndexPDe, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + centerIndex(1) = (maxIndexPTile(1,1)-minIndexPTile(1,1)+1)/2 + centerIndex(2) = (maxIndexPTile(2,1)-minIndexPTile(2,1)+1)/2 + + ! write(msgString,*) "Determined centerIndex: ", centerIndex + ! call ESMF_LogWrite(trim(msgString), ESMF_LOGMSG_DEBUG, rc=rc) + ! if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + do n=1, deCount + if (minIndexPDe(1,n)<=centerIndex(1) .and. centerIndex(1)<=maxIndexPDe(1,n) .and. & + minIndexPDe(2,n)<=centerIndex(2) .and. centerIndex(2)<=maxIndexPDe(2,n)) then + ! found the DE that holds the center coordinate + rootPet = petMap(n) + if (localPet == rootPet) then + ! center DE is on local PET -> fill centerCoord locally + call ESMF_ArrayGet(coordArray(1), farrayPtr=farrayPtr, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + centerCoord(1) = farrayPtr(centerIndex(1)-minIndexPDe(1,n)+1,centerIndex(2)-minIndexPDe(2,n)+1) + call ESMF_ArrayGet(coordArray(2), farrayPtr=farrayPtr, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + centerCoord(2) = farrayPtr(centerIndex(1)-minIndexPDe(1,n)+1,centerIndex(2)-minIndexPDe(2,n)+1) + ! write(msgString,*) "Found centerCoord: ", centerCoord + ! call ESMF_LogWrite(trim(msgString), ESMF_LOGMSG_DEBUG, rc=rc) + ! if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + endif + exit + endif + enddo - call ESMF_TraceRegionEnter("ESMF_FieldBundleRegridStore()"//trim(traceString), rc=rc) - call ESMF_FieldBundleRegridStore(mirror_bundle, file_bundle, & - regridMethod=regridmethod, routehandle=rh, & - unmappedaction=ESMF_UNMAPPEDACTION_IGNORE, & - srcTermProcessing=srcTermProcessing, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_TraceRegionExit("ESMF_FieldBundleRegridStore()"//trim(traceString), rc=rc) + deallocate(petMap,minIndexPDe,maxIndexPDe) + + call ESMF_VMBroadcast(vm, centerCoord, count=2, rootPet=rootPet, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + write(msgString,*) "All PETs know centerCoord in radians: ", centerCoord + call ESMF_LogWrite(trim(msgString), ESMF_LOGMSG_DEBUG, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + ! determine regridmethod + if (index(fcstItemNameList(i),"_bilinear") >0 ) then + traceString = "-bilinear" + regridmethod = ESMF_REGRIDMETHOD_BILINEAR + else if (index(fcstItemNameList(i),"_patch") >0) then + traceString = "-patch" + regridmethod = ESMF_REGRIDMETHOD_PATCH + else if (index(fcstItemNameList(i),"_nearest_stod") >0) then + traceString = "-nearest_stod" + regridmethod = ESMF_REGRIDMETHOD_NEAREST_STOD + else if (index(fcstItemNameList(i),"_nearest_dtos") >0) then + traceString = "-nearest_dtos" + regridmethod = ESMF_REGRIDMETHOD_NEAREST_DTOS + else if (index(fcstItemNameList(i),"_conserve") >0) then + traceString = "-conserve" + regridmethod = ESMF_REGRIDMETHOD_CONSERVE + else + call ESMF_LogSetError(ESMF_RC_ARG_BAD, & + msg="Unable to determine regrid method.", & + line=__LINE__, file=__FILE__, rcToReturn=rc) + return + endif + srcTermProcessing = 1 ! have this fixed for bit-for-bit reproducibility + ! RegridStore() + + ! update output grid coordinates based of fcstgrid center lat/lon + call ESMF_FieldBundleGet(file_bundle, grid=grid, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + dst_grid = grid + call ESMF_GridGetCoord(grid, coordDim=1, farrayPtr=lonPtr, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_GridGetCoord(grid, coordDim=2, farrayPtr=latPtr, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_AttributeGet(mirror_bundle, convention="NetCDF", purpose="FV3", & + name="grid_id", value=grid_id, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + if (trim(output_grid(grid_id)) == 'regional_latlon_moving' .or. & + trim(output_grid(grid_id)) == 'rotated_latlon_moving') then + n = grid_id + cen_lon(n) = centerCoord(1)*rtod + cen_lat(n) = centerCoord(2)*rtod + if (cen_lon(n) > 180.0) cen_lon(n) = cen_lon(n) - 360.0 + cen_lon(n) = NINT(cen_lon(n)*1000.0)/1000.0 + cen_lat(n) = NINT(cen_lat(n)*1000.0)/1000.0 + endif - endif ! fieldbundle restart vs. not restart + if (trim(output_grid(grid_id)) == 'regional_latlon_moving') then + lon1(n) = cen_lon(n) - 0.5 * (imo(n)-1) * dlon(n) + lat1(n) = cen_lat(n) - 0.5 * (jmo(n)-1) * dlat(n) + lon2(n) = cen_lon(n) + 0.5 * (imo(n)-1) * dlon(n) + lat2(n) = cen_lat(n) + 0.5 * (jmo(n)-1) * dlat(n) + do jj=lbound(lonPtr,2),ubound(lonPtr,2) + do ii=lbound(lonPtr,1),ubound(lonPtr,1) + lonPtr(ii,jj) = lon1(n) + (lon2(n)-lon1(n))/(imo(n)-1) * (ii-1) + latPtr(ii,jj) = lat1(n) + (lat2(n)-lat1(n))/(jmo(n)-1) * (jj-1) + wrt_int_state%out_grid_info(n)%latPtr(ii,jj) = latPtr(ii,jj) + wrt_int_state%out_grid_info(n)%lonPtr(ii,jj) = lonPtr(ii,jj) + enddo + enddo + else if (trim(output_grid(grid_id)) == 'rotated_latlon_moving') then + lon1(n) = - 0.5 * (imo(n)-1) * dlon(n) + lat1(n) = - 0.5 * (jmo(n)-1) * dlat(n) + lon2(n) = 0.5 * (imo(n)-1) * dlon(n) + lat2(n) = 0.5 * (jmo(n)-1) * dlat(n) + do jj=lbound(lonPtr,2),ubound(lonPtr,2) + do ii=lbound(lonPtr,1),ubound(lonPtr,1) + rot_lon = lon1(n) + (lon2(n)-lon1(n))/(imo(n)-1) * (ii-1) + rot_lat = lat1(n) + (lat2(n)-lat1(n))/(jmo(n)-1) * (jj-1) + call rtll(rot_lon, rot_lat, geo_lon, geo_lat, dble(cen_lon(n)), dble(cen_lat(n))) + if (geo_lon < 0.0) geo_lon = geo_lon + 360.0 + lonPtr(ii,jj) = geo_lon + latPtr(ii,jj) = geo_lat + wrt_int_state%out_grid_info(n)%latPtr(ii,jj) = latPtr(ii,jj) + wrt_int_state%out_grid_info(n)%lonPtr(ii,jj) = lonPtr(ii,jj) + enddo + enddo + endif - ! Regrid() - call ESMF_TraceRegionEnter("ESMF_FieldBundleRegrid()"//trim(traceString), rc=rc) - call ESMF_FieldBundleRegrid(mirror_bundle, file_bundle, & - routehandle=rh, termorderflag=(/ESMF_TERMORDER_SRCSEQ/), rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_TraceRegionExit("ESMF_FieldBundleRegrid()"//trim(traceString), rc=rc) - ! RegridRelease() - call ESMF_FieldBundleRegridRelease(routehandle=rh, noGarbage=.true., rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - ! done - call ESMF_LogWrite("Done Regrid() for moving domain: mirror_"//trim(fcstItemNameList(i)), ESMF_LOGMSG_INFO, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - endif + ! create file_bundle's grid (destination grid) mask with values = dstOutsideMaskValue for all 'outside' grid points, similar as in fv3_cap + call generate_dst_field_mask(src_grid, dst_grid, dst_field_mask, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + call add_dst_mask(dst_grid, dst_field_mask, dstOutsideMaskValue, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + call ESMF_FieldDestroy(dst_field_mask, noGarbage=.true., rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + ! reset all fields in file_bundle to missing value + call reset_bundle_to_missing_value(file_bundle, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + call ESMF_TraceRegionEnter("ESMF_FieldBundleRegridStore()"//trim(traceString), rc=rc) + call ESMF_FieldBundleRegridStore(mirror_bundle, file_bundle, & + dstMaskValues=(/dstOutsideMaskValue/), & + regridMethod=regridmethod, routehandle=rh, & + unmappedaction=ESMF_UNMAPPEDACTION_IGNORE, & + srcTermProcessing=srcTermProcessing, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_TraceRegionExit("ESMF_FieldBundleRegridStore()"//trim(traceString), rc=rc) + + endif ! fieldbundle restart vs. not restart + + ! Regrid() + call ESMF_TraceRegionEnter("ESMF_FieldBundleRegrid()"//trim(traceString), rc=rc) + call ESMF_FieldBundleRegrid(mirror_bundle, file_bundle, & + zeroregion=ESMF_REGION_SELECT, & + routehandle=rh, termorderflag=(/ESMF_TERMORDER_SRCSEQ/), rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_TraceRegionExit("ESMF_FieldBundleRegrid()"//trim(traceString), rc=rc) + ! RegridRelease() + call ESMF_FieldBundleRegridRelease(routehandle=rh, noGarbage=.true., rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + ! done + call ESMF_LogWrite("Done Regrid() for moving domain: mirror_"//trim(fcstItemNameList(i)), ESMF_LOGMSG_INFO, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + endif - if (fcstItemNameList(i)(1:8) /= "restart_") then - !recover fields from cartesian vector and sfc pressure - call recover_fields(file_bundle,rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - end if + if (fcstItemNameList(i)(1:8) /= "restart_") then + !recover fields from cartesian vector and sfc pressure + call recover_fields(file_bundle,rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + end if - enddo + enddo ! !----------------------------------------------------------------------- !*** do post !----------------------------------------------------------------------- - lmask_fields = .false. if( wrt_int_state%write_dopost .and. lupp_history ) then #ifdef INLINE_POST wbeg = MPI_Wtime() do n=1,ngrids if (trim(output_grid(n)) /= 'cubed_sphere_grid') then - - if (trim(output_grid(n)) == 'regional_latlon' .or. & - trim(output_grid(n)) == 'regional_latlon_moving' .or. & - trim(output_grid(n)) == 'rotated_latlon' .or. & - trim(output_grid(n)) == 'rotated_latlon_moving' .or. & - trim(output_grid(n)) == 'lambert_conformal') then - - !mask fields according to sfc pressure, only history bundles - do nbdl=1, wrt_int_state%FBCount - call ESMF_FieldBundleGet(wrt_int_state%wrtFB(nbdl), name=wrtFBName, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__,file=__FILE__)) return - - if (wrtFBName(1:8) == 'restart_') cycle - if (wrtFBName(1:18) == 'cubed_sphere_grid_') cycle - - call mask_fields(wrt_int_state%wrtFB(nbdl),rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - enddo - lmask_fields = .true. - endif - call post_run_fv3(wrt_int_state, n, mype, wrt_mpi_comm, lead_write_task, & itasks, jtasks, nf_hours, nf_minutes, nf_seconds) else @@ -2446,12 +2453,6 @@ subroutine wrt_run(wrt_comp, imp_state_write, exp_state_write,clock,rc) trim(output_grid(grid_id)) == 'rotated_latlon_moving' .or. & trim(output_grid(grid_id)) == 'lambert_conformal') then - !mask fields according to sfc pressure - if( .not. lmask_fields ) then - call mask_fields(file_bundle,rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - endif - call write_netcdf(wrt_int_state%wrtFB(nbdl), trim(filename), & use_parallel_netcdf, wrt_mpi_comm, wrt_int_state%mype, & grid_id, rc=rc) @@ -2739,6 +2740,7 @@ subroutine recover_fields(file_bundle,rc) !$omp private(i,j,coslon,sinlon,sinlat) do j=jstart, jend do i=istart, iend + if (cart3dPtr3dr4(1,i,j,k) < 9.99e20) then coslon = cos(lonloc(i,j)) sinlon = sin(lonloc(i,j)) sinlat = sin(latloc(i,j)) @@ -2747,6 +2749,7 @@ subroutine recover_fields(file_bundle,rc) vwind3dr4(i,j,k) =-cart3dPtr3dr4(1,i,j,k) * sinlat*sinlon & + cart3dPtr3dr4(2,i,j,k) * sinlat*coslon & + cart3dPtr3dr4(3,i,j,k) * cos(latloc(i,j)) + endif enddo enddo enddo @@ -2770,6 +2773,7 @@ subroutine recover_fields(file_bundle,rc) !$omp private(i,j,k,coslon,sinlon,sinlat) do j=jstart, jend do i=istart, iend + if (cart3dPtr2dr4(1,i,j) < 9.99e20) then coslon = cos(lonloc(i,j)) sinlon = sin(lonloc(i,j)) sinlat = sin(latloc(i,j)) @@ -2778,6 +2782,7 @@ subroutine recover_fields(file_bundle,rc) vwind2dr4(i,j) =-cart3dPtr2dr4(1,i,j) * sinlat*sinlon & + cart3dPtr2dr4(2,i,j) * sinlat*coslon & + cart3dPtr2dr4(3,i,j) * cos(latloc(i,j)) + endif enddo enddo endif @@ -2795,7 +2800,7 @@ subroutine recover_fields(file_bundle,rc) !$omp parallel do default(none) shared(pressfc,jstart,jend,istart,iend) private(i,j) do j=jstart, jend do i=istart, iend - pressfc(i,j) = pressfc(i,j)**(grav/(rdgas*stndrd_atmos_lapse))*stndrd_atmos_ps + if (pressfc(i,j) < 9.99e20) pressfc(i,j) = pressfc(i,j)**(grav/(rdgas*stndrd_atmos_lapse))*stndrd_atmos_ps enddo enddo endif @@ -2809,263 +2814,6 @@ subroutine recover_fields(file_bundle,rc) end subroutine recover_fields ! !----------------------------------------------------------------------- -! - subroutine mask_fields(file_bundle,rc) - - type(ESMF_FieldBundle), intent(in) :: file_bundle - integer, intent(out), optional :: rc -! - integer i,j,k,ifld,fieldCount,fieldDimCount,gridDimCount - integer istart,iend,jstart,jend,kstart,kend - type(ESMF_Grid) fieldGrid - type(ESMF_TypeKind_Flag) typekind - type(ESMF_TypeKind_Flag) attTypeKind - character(len=ESMF_MAXSTR) fieldName - type(ESMF_Field), allocatable :: fcstField(:) - real(ESMF_KIND_R4), dimension(:,:), pointer :: var2dPtr2dr4 - real(ESMF_KIND_R4), dimension(:,:,:), pointer :: var3dPtr3dr4 - real(ESMF_KIND_R4), dimension(:,:,:), pointer :: vect3dPtr2dr4 - real(ESMF_KIND_R4), dimension(:,:,:,:), pointer :: vect4dPtr3dr4 - real(ESMF_KIND_R4), dimension(:,:), allocatable :: maskwrt - - logical :: mvispresent=.false. - real(ESMF_KIND_R4) :: missing_value_r4=-1.e+10 - real(ESMF_KIND_R8) :: missing_value_r8=9.99e20 - character(len=ESMF_MAXSTR) :: msg - - call ESMF_LogWrite("call mask field on wrt comp",ESMF_LOGMSG_INFO,rc=RC) - -! get fieldCount - call ESMF_FieldBundleGet(file_bundle, fieldCount=fieldCount, & - grid=fieldGrid, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, file=__FILE__)) return ! bail out -! get gridDimCount - call ESMF_GridGet(fieldgrid, dimCount=gridDimCount, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, file=__FILE__)) return ! bail out - - allocate(fcstField(fieldCount)) - call ESMF_LogWrite("call mask field get fcstField",ESMF_LOGMSG_INFO,rc=RC) - call ESMF_FieldBundleGet(file_bundle, fieldList=fcstField, itemorderflag=ESMF_ITEMORDER_ADDORDER, rc=rc) - -! generate the maskwrt according to surface pressure - do ifld=1,fieldCount - !call ESMF_LogWrite("call mask field get fieldname, type dimcount",ESMF_LOGMSG_INFO,rc=RC) - call ESMF_FieldGet(fcstField(ifld),name=fieldName,typekind=typekind,dimCount=fieldDimCount, rc=rc) - !write(msg,*) 'fieldName,typekind,fieldDimCount=',trim(fieldName),typekind,fieldDimCount - !call ESMF_LogWrite("call mask field: "//trim(msg),ESMF_LOGMSG_INFO,rc=RC) - if (.not. allocated(maskwrt)) then - if ( typekind == ESMF_TYPEKIND_R4 .and. fieldDimCount == gridDimCount) then - call ESMF_FieldGet(fcstField(ifld),localDe=0, farrayPtr=var2dPtr2dr4, rc=rc) - istart = lbound(var2dPtr2dr4,1) - iend = ubound(var2dPtr2dr4,1) - jstart = lbound(var2dPtr2dr4,2) - jend = ubound(var2dPtr2dr4,2) - allocate(maskwrt(istart:iend,jstart:jend)) - maskwrt(istart:iend,jstart:jend)=1.0 - endif - endif - if(index(trim(fieldName),"pressfc")>0) then - call ESMF_FieldGet(fcstField(ifld),localDe=0, farrayPtr=var2dPtr2dr4, rc=rc) - istart = lbound(var2dPtr2dr4,1) - iend = ubound(var2dPtr2dr4,1) - jstart = lbound(var2dPtr2dr4,2) - jend = ubound(var2dPtr2dr4,2) - if (.not. allocated(maskwrt)) then - allocate(maskwrt(istart:iend,jstart:jend)) - maskwrt(istart:iend,jstart:jend)=1.0 - endif -!$omp parallel do default(shared) private(i,j) - do j=jstart, jend - do i=istart, iend - if(abs(var2dPtr2dr4(i,j)-0.) < 1.0e-6) maskwrt(i,j)=0. - enddo - enddo - call ESMF_LogWrite("call mask field pressfc found, maskwrt generated",ESMF_LOGMSG_INFO,rc=RC) - exit - endif - enddo - -! loop to mask all fields according to maskwrt - do ifld=1,fieldCount - !call ESMF_LogWrite("call mask field get fieldname, type dimcount",ESMF_LOGMSG_INFO,rc=RC) - call ESMF_FieldGet(fcstField(ifld),name=fieldName,typekind=typekind,dimCount=fieldDimCount, rc=rc) - !write(msg,*) 'fieldName,typekind,fieldDimCount=',trim(fieldName),typekind,fieldDimCount - !call ESMF_LogWrite("call mask field: "//trim(msg),ESMF_LOGMSG_INFO,rc=RC) - ! For vector fields - if(index(trim(fieldName),"vector")>0) then - ! Only work on ESMF_TYPEKIND_R4 fields for now - if ( typekind == ESMF_TYPEKIND_R4 ) then - ! 3-d vector fields with 4-d arrays - if( fieldDimCount > gridDimCount+1 ) then - !call ESMF_LogWrite("call mask field get vector 3d farray",ESMF_LOGMSG_INFO,rc=RC) - call ESMF_FieldGet(fcstField(ifld), localDe=0, farrayPtr=vect4dPtr3dr4, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, file=__FILE__)) return ! bail out - if( ubound(vect4dPtr3dr4,1)-lbound(vect4dPtr3dr4,1)+1/=3 ) then - rc=991 - write(0,*) 'ERROR, 3D the vector dimension /= 3, rc=',rc - call ESMF_Finalize(endflag=ESMF_END_ABORT) - endif - ! Get the _FillValue from the field attribute if exists - call ESMF_AttributeGet(fcstField(ifld), convention="NetCDF", purpose="FV3", & - name="_FillValue", typekind=attTypeKind, isPresent=mvispresent, rc=rc) - !write(msg,*) 'fieldName,attTypeKind,isPresent=',trim(fieldName),attTypeKind,mvispresent - !call ESMF_LogWrite("call mask field: "//trim(msg),ESMF_LOGMSG_INFO,rc=RC) - if ( mvispresent ) then - if (attTypeKind==ESMF_TYPEKIND_R4) then - call ESMF_AttributeGet(fcstField(ifld), convention="NetCDF", purpose="FV3", & - name="_FillValue", value=missing_value_r4, isPresent=mvispresent, rc=rc) - !write(msg,*) 'fieldName,_FillValue,isPresent=',trim(fieldName),missing_value_r4,mvispresent - !call ESMF_LogWrite("call mask field: "//trim(msg),ESMF_LOGMSG_INFO,rc=RC) - else if (attTypeKind==ESMF_TYPEKIND_R8) then - call ESMF_AttributeGet(fcstField(ifld), convention="NetCDF", purpose="FV3", & - name="_FillValue", value=missing_value_r8, isPresent=mvispresent, rc=rc) - !write(msg,*) 'fieldName,_FillValue,isPresent=',trim(fieldName),missing_value_r8,mvispresent - !call ESMF_LogWrite("call mask field: "//trim(msg),ESMF_LOGMSG_INFO,rc=RC) - endif - istart = lbound(vect4dPtr3dr4,2) - iend = ubound(vect4dPtr3dr4,2) - jstart = lbound(vect4dPtr3dr4,3) - jend = ubound(vect4dPtr3dr4,3) - kstart = lbound(vect4dPtr3dr4,4) - kend = ubound(vect4dPtr3dr4,4) -!$omp parallel do default(shared) private(i,j,k) - do k=kstart,kend - do j=jstart, jend - do i=istart, iend - if (maskwrt(i,j)<1.0 .and. attTypeKind==ESMF_TYPEKIND_R4) vect4dPtr3dr4(:,i,j,k)=missing_value_r4 - if (maskwrt(i,j)<1.0 .and. attTypeKind==ESMF_TYPEKIND_R8) vect4dPtr3dr4(:,i,j,k)=missing_value_r8 - enddo - enddo - enddo - endif !mvispresent - ! 2-d vector fields with 3-d arrays - else - call ESMF_FieldGet(fcstField(ifld), localDe=0, farrayPtr=vect3dPtr2dr4, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, file=__FILE__)) return ! bail out - if( ubound(vect3dPtr2dr4,1)-lbound(vect3dPtr2dr4,1)+1 /= 3 ) then - rc=991 - write(0,*) 'ERROR, 2D the vector dimension /= 3, rc=',rc - call ESMF_Finalize(endflag=ESMF_END_ABORT) - endif - ! Get the _FillValue from the field attribute if exists - call ESMF_AttributeGet(fcstField(ifld), convention="NetCDF", purpose="FV3", & - name="_FillValue", typekind=attTypeKind, isPresent=mvispresent, rc=rc) - !write(msg,*) 'fieldName,attTypeKind,isPresent=',trim(fieldName),attTypeKind,mvispresent - !call ESMF_LogWrite("call mask field: "//trim(msg),ESMF_LOGMSG_INFO,rc=RC) - if ( mvispresent ) then - if (attTypeKind==ESMF_TYPEKIND_R4) then - call ESMF_AttributeGet(fcstField(ifld), convention="NetCDF", purpose="FV3", & - name="_FillValue", value=missing_value_r4, isPresent=mvispresent, rc=rc) - !write(msg,*) 'fieldName,_FillValue,isPresent=',trim(fieldName),missing_value_r4,mvispresent - !call ESMF_LogWrite("call mask field: "//trim(msg),ESMF_LOGMSG_INFO,rc=RC) - else if (attTypeKind==ESMF_TYPEKIND_R8) then - call ESMF_AttributeGet(fcstField(ifld), convention="NetCDF", purpose="FV3", & - name="_FillValue", value=missing_value_r8, isPresent=mvispresent, rc=rc) - !write(msg,*) 'fieldName,_FillValue,isPresent=',trim(fieldName),missing_value_r8,mvispresent - !call ESMF_LogWrite("call mask field: "//trim(msg),ESMF_LOGMSG_INFO,rc=RC) - endif - istart = lbound(vect3dPtr2dr4,2) - iend = ubound(vect3dPtr2dr4,2) - jstart = lbound(vect3dPtr2dr4,3) - jend = ubound(vect3dPtr2dr4,3) -!$omp parallel do default(shared) private(i,j) - do j=jstart, jend - do i=istart, iend - if (maskwrt(i,j)<1.0 .and. attTypeKind==ESMF_TYPEKIND_R4) vect3dPtr2dr4(:,i,j)=missing_value_r4 - if (maskwrt(i,j)<1.0 .and. attTypeKind==ESMF_TYPEKIND_R8) vect3dPtr2dr4(:,i,j)=missing_value_r8 - enddo - enddo - endif !mvispresent - endif - endif -! For non-vector fields - else - ! Only work on ESMF_TYPEKIND_R4 fields for now - if ( typekind == ESMF_TYPEKIND_R4 ) then - ! 2-d fields - if(fieldDimCount == gridDimCount) then - call ESMF_FieldGet(fcstField(ifld),localDe=0, farrayPtr=var2dPtr2dr4, rc=rc) - ! Get the _FillValue from the field attribute if exists - call ESMF_AttributeGet(fcstField(ifld), convention="NetCDF", purpose="FV3", & - name="_FillValue", typekind=attTypeKind, isPresent=mvispresent, rc=rc) - !write(msg,*) 'fieldName,attTypeKind,isPresent=',trim(fieldName),attTypeKind,mvispresent - !call ESMF_LogWrite("call mask field: "//trim(msg),ESMF_LOGMSG_INFO,rc=RC) - if ( mvispresent ) then - if (attTypeKind==ESMF_TYPEKIND_R4) then - call ESMF_AttributeGet(fcstField(ifld), convention="NetCDF", purpose="FV3", & - name="_FillValue", value=missing_value_r4, isPresent=mvispresent, rc=rc) - !write(msg,*) 'fieldName,_FillValue,isPresent=',trim(fieldName),missing_value_r4,mvispresent - !call ESMF_LogWrite("call mask field: "//trim(msg),ESMF_LOGMSG_INFO,rc=RC) - else if (attTypeKind==ESMF_TYPEKIND_R8) then - call ESMF_AttributeGet(fcstField(ifld), convention="NetCDF", purpose="FV3", & - name="_FillValue", value=missing_value_r8, isPresent=mvispresent, rc=rc) - !write(msg,*) 'fieldName,_FillValue,isPresent=',trim(fieldName),missing_value_r8,mvispresent - !call ESMF_LogWrite("call mask field: "//trim(msg),ESMF_LOGMSG_INFO,rc=RC) - endif - istart = lbound(var2dPtr2dr4,1) - iend = ubound(var2dPtr2dr4,1) - jstart = lbound(var2dPtr2dr4,2) - jend = ubound(var2dPtr2dr4,2) -!$omp parallel do default(shared) private(i,j) - do j=jstart, jend - do i=istart, iend - if (maskwrt(i,j)<1.0 .and. attTypeKind==ESMF_TYPEKIND_R4) var2dPtr2dr4(i,j)=missing_value_r4 - if (maskwrt(i,j)<1.0 .and. attTypeKind==ESMF_TYPEKIND_R8) var2dPtr2dr4(i,j)=missing_value_r8 - enddo - enddo - endif !mvispresent - ! 3-d fields - else if(fieldDimCount == gridDimCount+1) then - call ESMF_FieldGet(fcstField(ifld),localDe=0, farrayPtr=var3dPtr3dr4, rc=rc) - ! Get the _FillValue from the field attribute if exists - call ESMF_AttributeGet(fcstField(ifld), convention="NetCDF", purpose="FV3", & - name="_FillValue", typekind=attTypeKind, isPresent=mvispresent, rc=rc) - !write(msg,*) 'fieldName,attTypeKind,isPresent=',trim(fieldName),attTypeKind,mvispresent - !call ESMF_LogWrite("call mask field: "//trim(msg),ESMF_LOGMSG_INFO,rc=RC) - if ( mvispresent ) then - if (attTypeKind==ESMF_TYPEKIND_R4) then - call ESMF_AttributeGet(fcstField(ifld), convention="NetCDF", purpose="FV3", & - name="_FillValue", value=missing_value_r4, isPresent=mvispresent, rc=rc) - !write(msg,*) 'fieldName,_FillValue,isPresent=',trim(fieldName),missing_value_r4,mvispresent - !call ESMF_LogWrite("call mask field: "//trim(msg),ESMF_LOGMSG_INFO,rc=RC) - else if (attTypeKind==ESMF_TYPEKIND_R8) then - call ESMF_AttributeGet(fcstField(ifld), convention="NetCDF", purpose="FV3", & - name="_FillValue", value=missing_value_r8, isPresent=mvispresent, rc=rc) - !write(msg,*) 'fieldName,_FillValue,isPresent=',trim(fieldName),missing_value_r8,mvispresent - !call ESMF_LogWrite("call mask field: "//trim(msg),ESMF_LOGMSG_INFO,rc=RC) - endif - istart = lbound(var3dPtr3dr4,1) - iend = ubound(var3dPtr3dr4,1) - jstart = lbound(var3dPtr3dr4,2) - jend = ubound(var3dPtr3dr4,2) - kstart = lbound(var3dPtr3dr4,3) - kend = ubound(var3dPtr3dr4,3) -!$omp parallel do default(shared) private(i,j,k) - do k=kstart,kend - do j=jstart, jend - do i=istart, iend - if (maskwrt(i,j)<1.0 .and. attTypeKind==ESMF_TYPEKIND_R4) var3dPtr3dr4(i,j,k)=missing_value_r4 - if (maskwrt(i,j)<1.0 .and. attTypeKind==ESMF_TYPEKIND_R8) var3dPtr3dr4(i,j,k)=missing_value_r8 - enddo - enddo - enddo - endif !mvispresent - endif - endif - endif - enddo -! - deallocate(maskwrt) - deallocate(fcstField) - rc = 0 - - end subroutine mask_fields -! -!----------------------------------------------------------------------- ! #ifdef USE_ESMF_IO subroutine ESMFproto_FieldBundleWrite(fieldbundle, fileName, & @@ -4774,6 +4522,211 @@ subroutine compute_fields_checksum(bundle, rc) end subroutine compute_fields_checksum + subroutine reset_bundle_to_missing_value(file_bundle,rc) + + type(ESMF_FieldBundle), intent(in) :: file_bundle + integer, intent(out), optional :: rc + + integer ifld,fieldCount + type(ESMF_Field), allocatable :: fcstField(:) + + ! get field count + call ESMF_FieldBundleGet(file_bundle, fieldCount=fieldCount, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + if (fieldCount == 0) return + + allocate(fcstField(fieldCount)) + call ESMF_FieldBundleGet(file_bundle, fieldList=fcstField, itemorderflag=ESMF_ITEMORDER_ADDORDER, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + do ifld=1,fieldCount + + call init_field_to_missing_value(fcstField(ifld), rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + enddo + + deallocate(fcstField) + + rc = 0 + + end subroutine reset_bundle_to_missing_value + + subroutine init_field_to_missing_value(field, rc) + + type(ESMF_Field), intent(inout) :: field + integer, intent(out), optional :: rc + + integer dimCount, rank + type(ESMF_TypeKind_Flag) typekind + character(len=ESMF_MAXSTR) fieldName + real(ESMF_KIND_R4), dimension(:,:), pointer :: ptr2d_r4 + real(ESMF_KIND_R4), dimension(:,:,:), pointer :: ptr3d_r4 + real(ESMF_KIND_R4), dimension(:,:,:,:), pointer :: ptr4d_r4 + real(ESMF_KIND_R8), dimension(:,:), pointer :: ptr2d_r8 + real(ESMF_KIND_R8), dimension(:,:,:), pointer :: ptr3d_r8 + real(ESMF_KIND_R8), dimension(:,:,:,:), pointer :: ptr4d_r8 + + real(ESMF_KIND_R4) :: missing_value_r4=9.99e20 + real(ESMF_KIND_R8) :: missing_value_r8=9.99e20 + + call ESMF_FieldGet(field, name=fieldName, typekind=typekind, dimCount=dimCount, rank=rank, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return ! bail out + + ! Only work on ESMF_TYPEKIND_R4 fields for now + if (typekind == ESMF_TYPEKIND_R4) then + if (dimCount == 2) then + call ESMF_FieldGet(field, localDe=0, farrayPtr=ptr2d_r4, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return ! bail out + ptr2d_r4 = missing_value_r4 + else if (dimCount == 3) then + call ESMF_FieldGet(field, localDe=0, farrayPtr=ptr3d_r4, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return ! bail out + ptr3d_r4 = missing_value_r4 + else if (dimCount == 4) then + call ESMF_FieldGet(field, localDe=0, farrayPtr=ptr4d_r4, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return ! bail out + ptr4d_r4 = missing_value_r4 + else + write(0,*)' Unsupported dimCount = ', dimCount + stop + endif + else if (typekind == ESMF_TYPEKIND_R8) then + if (dimCount == 2) then + call ESMF_FieldGet(field, localDe=0, farrayPtr=ptr2d_r8, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return ! bail out + ptr2d_r8 = missing_value_r8 + else if (dimCount == 3) then + call ESMF_FieldGet(field, localDe=0, farrayPtr=ptr3d_r8, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return ! bail out + ptr3d_r8 = missing_value_r8 + else if (dimCount == 4) then + call ESMF_FieldGet(field, localDe=0, farrayPtr=ptr4d_r8, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return ! bail out + ptr4d_r8 = missing_value_r8 + else + write(0,*)' Unsupported dimCount = ', dimCount + stop + endif + else + write(0,*)' Unsupported typekind = ', typekind + stop + endif + + rc = 0 + + end subroutine init_field_to_missing_value + + subroutine generate_dst_field_mask(src_grid, dst_grid, dst_field, rc) + + type(ESMF_Grid), intent(in) :: src_grid, dst_grid + type(ESMF_Field), intent(inout):: dst_field + integer, intent(out) :: rc + + type(ESMF_Field) :: src_field + real(ESMF_KIND_R4), pointer :: ptr(:,:) + integer(ESMF_KIND_I4), pointer :: maskPtr(:,:) + integer(ESMF_KIND_I4), pointer :: ptr_dst_status(:,:) + type(ESMF_RouteHandle) :: routehandle_mask + character(ESMF_MAXSTR) :: itemName + integer :: localDeCount + integer :: ig,jg, istart,iend, jstart,jend + integer :: srcTermProcessing + + + src_field = ESMF_FieldCreate(src_grid, & + typekind=ESMF_TYPEKIND_R4, & + staggerloc=ESMF_STAGGERLOC_CENTER, & + rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + call ESMF_FieldGet(src_field, localDeCount=localDeCount, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + if (localDeCount > 0) then + call ESMF_FieldGet(src_field, farrayPtr=ptr, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + ptr = 1.0 + end if + + + dst_field = ESMF_FieldCreate(dst_grid, & + typekind=ESMF_TYPEKIND_R4, & + staggerloc=ESMF_STAGGERLOC_CENTER, & + rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + call ESMF_FieldGet(dst_field, localDeCount=localDeCount, rc=rc); + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + if (localDeCount > 0) then + call ESMF_FieldGet(dst_field, farrayPtr=ptr, rc=rc); + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + ptr = 0.0 + end if + + srcTermProcessing = 0 + + call ESMF_FieldRegridStore(srcField=src_field, & + dstField=dst_field, & + regridmethod=ESMF_REGRIDMETHOD_BILINEAR, & + routehandle=routehandle_mask, & + unmappedaction=ESMF_UNMAPPEDACTION_IGNORE, & + srcTermProcessing=srcTermProcessing, & + rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + call ESMF_FieldRegrid(src_field, dst_field, & + routehandle=routehandle_mask, & + termorderflag=ESMF_TERMORDER_SRCSEQ, & + zeroregion=ESMF_REGION_SELECT, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + call ESMF_RouteHandleDestroy(routehandle_mask, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + call ESMF_FieldDestroy(src_field, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + rc = 0 + end subroutine generate_dst_field_mask + + subroutine add_dst_mask(dst_grid, dst_field, dstOutsideMaskValue, rc) + + type(ESMF_Grid), intent(inout) :: dst_grid + type(ESMF_Field), intent(in) :: dst_field + integer, intent(in) :: dstOutsideMaskValue + integer, intent(out) :: rc + + real(ESMF_KIND_R4), pointer :: ptr(:,:) + integer(ESMF_KIND_I4), pointer :: maskPtr(:,:) + integer :: localDeCount + + + call ESMF_FieldGet(dst_field, localDeCount=localDeCount, rc=rc); + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + ! Set destination grid mask to dstOutsideMaskValue where dst_field is 0 which will be for all destination + ! points outside of source (forecast, computational) grid + if (localDeCount > 0) then + call ESMF_FieldGet(dst_field, farrayPtr=ptr, rc=rc); + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + call ESMF_GridGetItem(dst_grid, itemflag=ESMF_GRIDITEM_MASK, & + staggerloc=ESMF_STAGGERLOC_CENTER, farrayPtr=maskPtr, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + maskPtr = 0 + where (ptr == 0.0) + maskPtr = dstOutsideMaskValue + endwhere + end if + + rc = 0 + + end subroutine add_dst_mask + end module module_wrt_grid_comp ! !----------------------------------------------------------------------- diff --git a/fv3/io/post_fv3.F90 b/io/post_fv3.F90 similarity index 98% rename from fv3/io/post_fv3.F90 rename to io/post_fv3.F90 index 6fa5e47a2f..05ed62e7a9 100644 --- a/fv3/io/post_fv3.F90 +++ b/io/post_fv3.F90 @@ -46,6 +46,12 @@ subroutine post_run_fv3(wrt_int_state,grid_id,mype,mpicomp,lead_write, & lonstart,lonlast use grib2_module, only : gribit2,num_pset,nrecout,first_grbtbl use xml_perl_data,only : paramset + use read_xml_upp_mod, only : read_xml + use set_outflds_upp_mod, only : set_outflds + use get_postfilename_mod, only : get_postfilename + use process_upp_mod, only : process + use post_nems_routines, only : read_postnmlt, post_alctvars, & + post_finalize ! !----------------------------------------------------------------------- ! @@ -542,7 +548,7 @@ subroutine set_postvars_fv3(wrt_int_state,grid_id,mype,mpicomp) no3cb, nh4cb, dusmass, ducmass, dusmass25,ducmass25, & snownc, graupelnc, qrmax, hail_maxhailcast, & smoke_ave,dust_ave,coarsepm_ave,swddif,swddni, & - xlaixy,wspd10umax,wspd10vmax + xlaixy,wspd10umax,wspd10vmax,f10m use soil, only: sldpth, sh2o, smc, stc, sllevel use masks, only: lmv, lmh, htm, vtm, gdlat, gdlon, dx, dy, hbm2, sm, sice use ctlblk_mod, only: im, jm, lm, lp1, jsta, jend, jsta_2l, jend_2u, jsta_m,jend_m, & @@ -554,7 +560,7 @@ subroutine set_postvars_fv3(wrt_int_state,grid_id,mype,mpicomp) alsl, spl, ihrst, modelname, nsoil, rdaod, gocart_on, & gccpp_on, nasa_on, d2d_chem, nbin_ss, nbin_bc, nbin_oc,& nbin_du,nbin_su, nbin_no3, nbin_nh4 - use params_mod, only: erad, dtr, capa, p1000, small,h1, d608, pi, rd + use params_mod, only: erad, dtr, capa, p1000, small,h1, d608, pi, rd, rtd use gridspec_mod,only: latstart, latlast, lonstart, lonlast, cenlon, cenlat, & dxval, dyval, truelat2, truelat1, psmapf, cenlat, & lonstartv, lonlastv, cenlonv, latstartv, latlastv, & @@ -565,6 +571,9 @@ subroutine set_postvars_fv3(wrt_int_state,grid_id,mype,mpicomp) use physcons, only: grav => con_g, fv => con_fvirt, rgas => con_rd, & eps => con_eps, epsm1 => con_epsm1 use rqstfld_mod + use exch_upp_mod, only : exch + use table_upp_mod, only : table + use tableq_upp_mod, only : tableq ! ! use write_internal_state, only: wrt_internal_state ! @@ -584,12 +593,13 @@ subroutine set_postvars_fv3(wrt_int_state,grid_id,mype,mpicomp) integer i, ip1, j, l, k, n, iret, ibdl, rc, kstart, kend integer i1,i2,j1,j2,k1,k2 integer fieldDimCount,gridDimCount,ncount_field,bundle_grid_id - integer jdate(8) + integer jdate(8), jdn logical foundland, foundice, found, mvispresent integer totalLBound3d(3), totalUBound3d(3) real(4) rinc(5), fillvalue real(8) fillvalue8 real tlmh,RADI,TMP,ES,TV,RHOAIR,tem,tstart,dtp + real sun_zenith, sun_azimuth real, dimension(:),allocatable :: ak5, bk5 real(ESMF_KIND_R4),dimension(:,:),pointer :: arrayr42d real(ESMF_KIND_R8),dimension(:,:),pointer :: arrayr82d @@ -597,6 +607,7 @@ subroutine set_postvars_fv3(wrt_int_state,grid_id,mype,mpicomp) real(ESMF_KIND_R8),dimension(:,:,:),pointer :: arrayr83d real,dimension(:), allocatable :: slat,qstl real,external::FPVSNEW + real,external::iw3jdn real,dimension(:,:),allocatable :: dummy, p2d, t2d, q2d, qs2d, & cw2d, cfr2d, snacc_land, snacc_ice, & acsnom_land, acsnom_ice @@ -1115,15 +1126,25 @@ subroutine set_postvars_fv3(wrt_int_state,grid_id,mype,mpicomp) enddo enddo endif + + ! surface specific humidity + if(trim(fieldname)=='qs') then + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,arrayr42d,qs,fillValue,spval) + do j=jsta,jend + do i=ista, iend + qs(i,j) = arrayr42d(i,j) + if(abs(arrayr42d(i,j)-fillValue) < small) qs(i,j)=spval + enddo + enddo + endif ! foundation temperature if(trim(fieldname)=='tref') then - !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,arrayr42d,fdnsst) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,arrayr42d,fdnsst,fillValue) do j=jsta,jend do i=ista, iend - if (arrayr42d(i,j) /= spval) then - fdnsst(i,j) = arrayr42d(i,j) - endif + fdnsst(i,j) = arrayr42d(i,j) + if (abs(arrayr42d(i,j)-fillValue) < small) fdnsst(i,j)=spval enddo enddo endif @@ -2383,6 +2404,17 @@ subroutine set_postvars_fv3(wrt_int_state,grid_id,mype,mpicomp) enddo endif + ! f10m + if(trim(fieldname)=='f10m') then + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,f10m,arrayr42d,v10h,spval,fillValue) + do j=jsta,jend + do i=ista, iend + f10m(i,j) = arrayr42d(i,j) + if( abs(arrayr42d(i,j)-fillValue) < small) f10m(i,j) = spval + enddo + enddo + endif + ! vegetation type if(trim(fieldname)=='vtype') then !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,arrayr42d,ivgtyp,fillValue) @@ -4416,6 +4448,29 @@ subroutine set_postvars_fv3(wrt_int_state,grid_id,mype,mpicomp) ! end file_loop_all enddo file_loop_all +! calculate cos(SZA) + call w3fs13(idat(3),idat(1),idat(2),jdn) +!$omp parallel do default(none) private(i,j,sun_zenith,sun_azimuth) shared(jsta,jend,ista,iend,czen,czmean,gdlat,gdlon,jdn,idat) + do j=jsta,jend + do i=ista,iend + call zensun(jdn,float(idat(4)),gdlat(i,j),gdlon(i,j),pi,sun_zenith,sun_azimuth) + czen(i,j) = cos(sun_zenith/rtd) + czmean(i,j) = czen(i,j) + enddo + enddo + +! if u10/v10 are missing, derive them from f10m and surface wind +!$omp parallel do default(none) private(i,j) shared(jsta,jend,lm,spval,ista,iend,u10,v10,f10m,uh,vh) + do j=jsta,jend + do i=ista,iend + if(u10(i,j) == spval .and. v10(i,j) == spval .and. & + f10m(i,j) /=spval .and. uh(i,j,lm)/=spval .and. vh(i,j,lm)/=spval) then + u10(i,j) = f10m(i,j) * uh(i,j,lm) + v10(i,j) = f10m(i,j) * vh(i,j,lm) + endif + enddo + enddo + ! recompute full layer of zint !$omp parallel do default(none) private(i,j) shared(jsta,jend,lp1,spval,zint,fis,ista,iend) do j=jsta,jend diff --git a/fv3/io/post_nems_routines.F90 b/io/post_nems_routines.F90 similarity index 98% rename from fv3/io/post_nems_routines.F90 rename to io/post_nems_routines.F90 index 337139b1de..8775580154 100644 --- a/fv3/io/post_nems_routines.F90 +++ b/io/post_nems_routines.F90 @@ -1,3 +1,7 @@ +module post_nems_routines + implicit none + contains + !----------------------------------------------------------------------- !%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% !----------------------------------------------------------------------- @@ -28,6 +32,7 @@ subroutine post_alctvars(imi,jmi,lmi,mype,nwtlpes,lead_write, mpicomp, & ileft,iright,ileftb,irightb, & icnt2, idsp2,isxa,iexa,jsxa,jexa, & num_procs + use allocate_all_upp_mod, only: allocate_all ! !----------------------------------------------------------------------- ! @@ -346,6 +351,7 @@ subroutine post_finalize(post_gribversion) ! Jul 2019 Jun Wang: finalize post step ! use grib2_module, only : grib_info_finalize + use DE_ALLOCATE_UPP_MOD , only : de_allocate ! character(*),intent(in) :: post_gribversion ! @@ -357,3 +363,5 @@ subroutine post_finalize(post_gribversion) ! end subroutine post_finalize +end module post_nems_routines + diff --git a/mpas/CMakeLists.txt b/mpas/CMakeLists.txt new file mode 100644 index 0000000000..8954cf7740 --- /dev/null +++ b/mpas/CMakeLists.txt @@ -0,0 +1,128 @@ +cmake_minimum_required(VERSION 3.19) + +project(MPAS + VERSION 1.0.0 + LANGUAGES Fortran) + +include(${CMAKE_CURRENT_SOURCE_DIR}/MPAS-Model/cmake/Functions/MPAS_Functions.cmake) + +list(INSERT CMAKE_MODULE_PATH 0 ${CMAKE_CURRENT_SOURCE_DIR}/cmake/Modules) +set(CMAKE_DIRECTORY_LABELS ${PROJECT_NAME}) +include(GNUInstallDirs) + +# Build product output locations +set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) +set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib) +set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib) + +# Set default build type to RelWithDebInfo +if(NOT CMAKE_BUILD_TYPE) + message(STATUS "Setting default build type to Release. Specify CMAKE_BUILD_TYPE to override.") + set(CMAKE_BUILD_TYPE "Release" CACHE STRING "CMake Build type" FORCE) + set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release" "MinSizeRel" "RelWithDebInfo") +endif() + +# Find C pre-processor +if(CMAKE_C_COMPILER_ID MATCHES GNU) + find_program(CPP_EXECUTABLE NAMES cpp REQUIRED) + set(CPP_EXTRA_FLAGS -traditional) +elseif(CMAKE_C_COMPILER_ID MATCHES "(Apple)?Clang" ) + find_program(CPP_EXECUTABLE NAMES cpp REQUIRED) +else() + message(STATUS "Unknown compiler: ${CMAKE_C_COMPILER_ID}") + set(CPP_EXECUTABLE ${CMAKE_C_COMPILER}) +endif() + +# Fortran module output directory for build interface +set(MPAS_MODULE_DIR ${PROJECT_NAME}/module/${CMAKE_Fortran_COMPILER_ID}/${CMAKE_Fortran_COMPILER_VERSION}) + +# Install Fortran module directory +install(DIRECTORY ${CMAKE_BINARY_DIR}/${MPAS_MODULE_DIR}/ DESTINATION ${CMAKE_INSTALL_LIBDIR}/${MPAS_MODULE_DIR}/) + +############################################################################### +# Build MPAS libraries... +############################################################################### + +# MPAS Utilities (Externals) +add_subdirectory(MPAS-Model/src/external/ezxml) + +# ESMF libraries. +if(NOT ESMF_FOUND) + find_package(ESMF REQUIRED) +endif() +add_definitions(-DMPAS_EXTERNAL_ESMF_LIB -DMPAS_NO_ESMF_INIT) +add_library(${PROJECT_NAME}::external::esmf ALIAS esmf) + +# MPAS Namelist +add_subdirectory(MPAS-Model/src/tools/input_gen) # Targets: namelist_gen, streams_gen + +# MPAS Registry +add_subdirectory(MPAS-Model/src/tools/registry) # Targets: mpas_parse_ + +# MPAS framework +add_subdirectory(MPAS-Model/src/framework) # Target: MPAS::framework + +# MPAS operators +add_subdirectory(MPAS-Model/src/operators) # Target: MPAS::operators + +# MPAS atmosphere +add_subdirectory(MPAS-Model/src/core_atmosphere) # Target: core_atmosphere +add_library(mpas ALIAS core_atmosphere) + +############################################################################### +# Package Configurations +############################################################################### +include(CMakePackageConfigHelpers) + +# Build-tree target exports +export(EXPORT ${PROJECT_NAME}ExportsExternal NAMESPACE ${PROJECT_NAME}::external:: FILE ${PROJECT_NAME}-targets-external.cmake) +export(EXPORT ${PROJECT_NAME}Exports NAMESPACE ${PROJECT_NAME}:: FILE ${PROJECT_NAME}-targets.cmake) +export(EXPORT ${PROJECT_NAME}ExportsCore NAMESPACE ${PROJECT_NAME}::core:: FILE ${PROJECT_NAME}-targets-core.cmake) + +# CMake Config file install location +set(CONFIG_INSTALL_DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}) +install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/cmake/Modules DESTINATION ${CONFIG_INSTALL_DESTINATION}) + +#### +set(BINDIR ${CMAKE_BINARY_DIR}/bin) +set(CORE_DATADIR_ROOT ${CMAKE_BINARY_DIR}/${PROJECT_NAME}) +set(CMAKE_MODULE_INSTALL_PATH ${CMAKE_CURRENT_SOURCE_DIR}/cmake/Modules) +string(TOLOWER ${PROJECT_NAME} PROJECT_NAME_LOWER) +configure_package_config_file( + MPAS-Model/cmake/PackageConfig.cmake.in ${PROJECT_NAME_LOWER}-config.cmake + INSTALL_DESTINATION . + INSTALL_PREFIX ${CMAKE_CURRENT_BINARY_DIR} + PATH_VARS BINDIR CORE_DATADIR_ROOT CMAKE_MODULE_INSTALL_PATH) + +### +set(BINDIR ${CMAKE_INSTALL_BINDIR}) +set(CORE_DATADIR_ROOT ${CMAKE_INSTALL_DATADIR}/${PROJECT_NAME}) +set(CMAKE_MODULE_INSTALL_PATH ${CONFIG_INSTALL_DESTINATION}/Modules) +configure_package_config_file( + MPAS-Model/cmake/PackageConfig.cmake.in install/${PROJECT_NAME_LOWER}-config.cmake + INSTALL_DESTINATION ${CONFIG_INSTALL_DESTINATION} + PATH_VARS BINDIR CORE_DATADIR_ROOT CMAKE_MODULE_INSTALL_PATH) +install(FILES ${CMAKE_CURRENT_BINARY_DIR}/install/${PROJECT_NAME_LOWER}-config.cmake + DESTINATION ${CONFIG_INSTALL_DESTINATION}) + +### +write_basic_package_version_file( + ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME_LOWER}-config-version.cmake + VERSION ${PROJECT_VERSION} + COMPATIBILITY AnyNewerVersion) +install(FILES ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME_LOWER}-config-version.cmake + DESTINATION ${CONFIG_INSTALL_DESTINATION}) + +### +install(EXPORT ${PROJECT_NAME}ExportsExternal + NAMESPACE ${PROJECT_NAME}::external:: + FILE ${PROJECT_NAME_LOWER}-targets-external.cmake + DESTINATION ${CONFIG_INSTALL_DESTINATION}) +install(EXPORT ${PROJECT_NAME}Exports + NAMESPACE ${PROJECT_NAME}:: + FILE ${PROJECT_NAME_LOWER}-targets.cmake + DESTINATION ${CONFIG_INSTALL_DESTINATION}) +install(EXPORT ${PROJECT_NAME}ExportsCore + NAMESPACE ${PROJECT_NAME}::core:: + FILE ${PROJECT_NAME_LOWER}-targets-core.cmake + DESTINATION ${CONFIG_INSTALL_DESTINATION}) diff --git a/mpas/MPAS-Model b/mpas/MPAS-Model new file mode 160000 index 0000000000..38d2177aef --- /dev/null +++ b/mpas/MPAS-Model @@ -0,0 +1 @@ +Subproject commit 38d2177aef842a5c6abe26ffe876804b95fd9e0a diff --git a/mpas/atmos_coupling.F90 b/mpas/atmos_coupling.F90 new file mode 100644 index 0000000000..cd18a22844 --- /dev/null +++ b/mpas/atmos_coupling.F90 @@ -0,0 +1,463 @@ +! ########################################################################################### +!> \file atmos_coupling.F90 +!> Procedures for coupling the MPAS dynamical core to the CCPP Physics. +!> +! ########################################################################################### +module atmos_coupling_mod + use mpas_kind_types, only : mpas_kind => RKIND + use ufs_mpas_subdriver, only : domain_ptr + + implicit none + public :: MPAS_statein_type + public :: MPAS_stateout_type + public :: ufs_mpas_to_physics + public :: ufs_physics_to_mpas + + ! Indices for MPAS domain deceomposition on each task. + integer, dimension(:), pointer :: indicesGlobal + + !> ####################################################################################### + !> MPAS_statein_type + !> + !> Fields needed by the MPAS dynamical core for forward integration. + !> + !> ####################################################################################### + type MPAS_statein_type + ! Dimensions + integer, pointer :: nCells ! Number of cells, including halo cells + integer, pointer :: nEdges ! Number of edges, including halo edges + integer, pointer :: nVertices ! Number of vertices, including halo vertices + integer, pointer :: nVertLevels ! Number of vertical layers + ! + integer, pointer :: nCellsSolve ! Number of cells, excluding halo cells + integer, pointer :: nEdgesSolve ! Number of edges, excluding halo edges + integer, pointer :: nVerticesSolve ! Number of vertices, excluding halo vertices + + ! MPAS vertical coordiante (invariant) + real(mpas_kind), pointer :: zint(:,:) ! Geometric height [m] at layer interfaces (nlev+1,ncol) + real(mpas_kind), pointer :: zz(:,:) ! Vertical coordinate metric [1] at layer + ! midpoints (nlev,ncol) + real(mpas_kind), pointer :: fzm(:) ! Interp weight from k layer midpoint to k + ! layer interface [1] (nlev) + real(mpas_kind), pointer :: fzp(:) ! Interp weight from k-1 layer midpoint to k + ! layer interface [dimensionless] (nlev) + ! Cell area (invariant) + real(mpas_kind), pointer :: areaCell(:) ! cell area [m^2] + + ! For edge-normal velocity calculations (invariant) + real(mpas_kind), pointer :: east(:,:) ! Cartesian components of unit east vector + ! at cell centers [dimensionless] (3,ncol) + real(mpas_kind), pointer :: north(:,:) ! Cartesian components of unit north vector + ! at cell centers [dimensionless] (3,ncol) + real(mpas_kind), pointer :: normal(:,:) ! Cartesian components of the vector normal + ! to an edge and tangential to the surface + ! of the sphere [dimensionless] (3,ncol) + integer, pointer :: cellsOnEdge(:,:) ! Indices of cells separated by an edge (2,nedge) + + ! Indices for tracer (scalar) indices + integer, pointer :: index_qv ! Tracer index for water-vapor mixing-ratio + + ! Base state variables + real(mpas_kind), pointer :: rho_base(:,:) ! Base-state dry air density [kg/m^3] (nlev,ncol) + real(mpas_kind), pointer :: theta_base(:,:) ! Base-state potential temperature [K] (nlev,ncol) + + ! State that is directly prognosed by the dycore + real(mpas_kind), pointer :: uperp(:,:) ! Normal velocity at edges [m/s] (nlev ,nedge) + real(mpas_kind), pointer :: w(:,:) ! Vertical velocity [m/s] (nlev+1,ncol) + real(mpas_kind), pointer :: theta_m(:,:) ! Moist potential temperature [K] (nlev ,ncol) + real(mpas_kind), pointer :: rho_zz(:,:) ! Dry density [kg/m^3] + ! divided by d(zeta)/dz (nlev ,ncol) + real(mpas_kind), pointer :: tracers(:,:,:) ! Tracers [kg/kg dry air] (nq,nlev ,ncol) + + ! State that may be directly derived from dycore prognostic state + real(mpas_kind), pointer :: theta(:,:) ! Potential temperature [K] (nlev,ncol) + real(mpas_kind), pointer :: exner(:,:) ! Exner function [-] (nlev,ncol) + real(mpas_kind), pointer :: rho(:,:) ! Dry density [kg/m^3] (nlev,ncol) + real(mpas_kind), pointer :: ux(:,:) ! Zonal veloc at center [m/s] (nlev,ncol) + real(mpas_kind), pointer :: uy(:,:) ! Meridional veloc at center [m/s] (nlev,ncol) + + ! Tendencies from physics + real(mpas_kind), pointer :: ru_tend(:,:) ! Normal horizontal momentum tendency + ! from physics [kg/m^2/s] (nlev,nedge) + real(mpas_kind), pointer :: rtheta_tend(:,:) ! Tendency of rho*theta/zz + ! from physics [kg K/m^3/s] (nlev,ncol) + real(mpas_kind), pointer :: rho_tend(:,:) ! Dry air density tendency + ! from physics [kg/m^3/s] (nlev,ncol) + + end type MPAS_statein_type + + !> ####################################################################################### + !> MPAS_stateout_type + !> + !> Fields prognosed (or diagnosed) by the MPAS dynamical core. + !> ####################################################################################### + type MPAS_stateout_type + ! Dimensions + integer, pointer :: nCells ! Number of cells, including halo cells + integer, pointer :: nEdges ! Number of edges, including halo edges + integer, pointer :: nVertices ! Number of vertices, including halo vertices + integer, pointer :: nVertLevels ! Number of vertical layers + ! + integer, pointer :: nCellsSolve ! Number of cells, excluding halo cells + integer, pointer :: nEdgesSolve ! Number of edges, excluding halo edges + integer, pointer :: nVerticesSolve ! Number of vertices, excluding halo vertices + + ! MPAS vertical coordiante (invariant) + real(mpas_kind), pointer :: zint(:,:) ! Geometric height [m] at layer interfaces (nlev+1,ncol) + real(mpas_kind), pointer :: zz(:,:) ! Vertical coordinate metric [1] at layer + ! midpoints (nlev,ncol) + real(mpas_kind), pointer :: fzm(:) ! Interp weight from k layer midpoint to k + ! layer interface [1] (nlev) + real(mpas_kind), pointer :: fzp(:) ! Interp weight from k-1 layer midpoint to k + ! layer interface [dimensionless] (nlev) + + ! Indices for tracer (scalar) indices + integer, pointer :: index_qv ! Tracer index for water-vapor mixing-ratio + + ! State that is directly prognosed by the dycore + real(mpas_kind), pointer :: uperp(:,:) ! Normal velocity at edges [m/s] (nlev ,nedge) + real(mpas_kind), pointer :: w(:,:) ! Vertical velocity [m/s] (nlev+1,ncol) + real(mpas_kind), pointer :: theta_m(:,:) ! Moist potential temperature [K] (nlev ,ncol) + real(mpas_kind), pointer :: rho_zz(:,:) ! Dry density [kg/m^3] + ! divided by d(zeta)/dz (nlev ,ncol) + real(mpas_kind), pointer :: tracers(:,:,:) ! Tracers [kg/kg dry air] (nq,nlev ,ncol) + + ! State that may be directly derived from dycore prognostic state. + real(mpas_kind), pointer :: theta(:,:) ! Potential temperature [K] (nlev,ncol) + real(mpas_kind), pointer :: exner(:,:) ! Exner function [-] (nlev,ncol) + real(mpas_kind), pointer :: rho(:,:) ! Dry density [kg/m^3] (nlev,ncol) + real(mpas_kind), pointer :: ux(:,:) ! Zonal veloc at center [m/s] (nlev,ncol) + real(mpas_kind), pointer :: uy(:,:) ! Meridional veloc at center [m/s] (nlev,ncol) + real(mpas_kind), pointer :: pmiddry(:,:) ! Dry hydrostatic pressure [Pa] + ! at layer midpoints (nlev,ncol) + real(mpas_kind), pointer :: pintdry(:,:) ! Dry hydrostatic pressure [Pa] + ! at layer interfaces (nlev+1,ncol) + real(mpas_kind), pointer :: pmid(:,:) ! Pressure at layer midpoints (nlev,ncol) + real(mpas_kind), pointer :: vorticity(:,:) ! Relative vertical vorticity [s^-1] + ! (nlev,nvtx) + real(mpas_kind), pointer :: divergence(:,:) ! Horizontal velocity divergence [s^-1] + ! (nlev,ncol) + end type MPAS_stateout_type + +contains + !> ######################################################################################### + !> Procedure to populate inputs to the CCPP physics using outputs the MPAS dynamical core. + !> + !> Use indicesGlobal to map from MPAS dycore deceomposition to CCPP Physics contiguous data + !> structures. + !> + !> ######################################################################################### + subroutine ufs_mpas_to_physics(physics_state) + use GFS_typedefs, only : GFS_statein_type + use mpas_derived_types, only : mpas_pool_type + use mpas_pool_routines, only : mpas_pool_get_subpool, mpas_pool_get_array, mpas_pool_get_dimension + use atm_core, only : atm_compute_output_diagnostics + use mpas_kind_types, only : RKIND + ! Arguments + type(GFS_statein_type), intent(inout) :: physics_state + ! Locals + type(mpas_stateout_type) :: mpas_state + type(mpas_pool_type), pointer :: state_pool + type(mpas_pool_type), pointer :: diag_pool + type(mpas_pool_type), pointer :: mesh_pool + integer :: iCell, iCol, iTracer + integer, pointer :: nCellsSolve, num_scalars, nwat, index_qv, nVertLevels + real(RKIND), pointer :: surface_p(:) + + ! Access MPAS data pools. + call mpas_pool_get_subpool(domain_ptr % blocklist % structs, 'state', state_pool) + call mpas_pool_get_subpool(domain_ptr % blocklist % structs, 'diag', diag_pool) + call mpas_pool_get_subpool(domain_ptr % blocklist % structs, 'mesh', mesh_pool) + + ! Get MPAS dimensions + call mpas_pool_get_dimension(mesh_pool, 'nCellsSolve', nCellsSolve) + call mpas_pool_get_dimension(state_pool, 'num_scalars', num_scalars) + call mpas_pool_get_dimension(state_pool, 'index_qv', index_qv) + call mpas_pool_get_dimension(state_pool, 'moist_end', nwat) + call mpas_pool_get_dimension(mesh_pool, 'nVertLevels', nVertLevels) + + ! Grab fields from MPAS pools + call mpas_pool_get_array(diag_pool, 'theta', MPAS_state % theta) + call mpas_pool_get_array(diag_pool, 'uReconstructZonal', MPAS_state % ux) + call mpas_pool_get_array(diag_pool, 'uReconstructMeridional', MPAS_state % uy) + call mpas_pool_get_array(state_pool, 'scalars', MPAS_state % tracers, timeLevel=1) + call mpas_pool_get_array(state_pool, 'w', MPAS_state % w, timeLevel=1) + call mpas_pool_get_array(diag_pool, 'exner', MPAS_state % exner) + call mpas_pool_get_array(mesh_pool, 'zgrid', MPAS_state % zint) + call mpas_pool_get_array(mesh_pool, 'zz', MPAS_state % zz) + call mpas_pool_get_array(state_pool, 'theta_m', MPAS_state % theta_m, timeLevel=1) + call mpas_pool_get_array(state_pool, 'rho_zz', MPAS_state % rho_zz, timeLevel=1) + + ! Copy fields from MPAS data containers to physics data containers. + ! [k, i] -> [i, k] + ! bottom-up -> top-down ordering convention + do iCell = 1, nCellsSolve + iCol = indicesGlobal(iCell) + physics_state % tgrs(iCol,:) = MPAS_state % theta(nVertLevels:1:-1,iCell) + physics_state % ugrs(iCol,:) = MPAS_state % ux(nVertLevels:1:-1,iCell) + physics_state % vgrs(iCol,:) = MPAS_state % uy(nVertLevels:1:-1,iCell) + physics_state % phil(iCol,:) = MPAS_state % zz(nVertLevels:1:-1,iCell) + physics_state % phii(iCol,:) = MPAS_state % zint(nVertLevels+1:1:-1,iCell) + physics_state % prslk(iCol,:) = MPAS_state % exner(nVertLevels:1:-1,iCell) + physics_state % vvl(iCol,:) = MPAS_state % w(nVertLevels:1:-1,iCell) + do iTracer = 1,num_scalars + physics_state % qgrs(iCol,:,iTracer) = MPAS_state % tracers(iTracer,nVertLevels:1:-1,iCell) + enddo + enddo + + ! Compute hydrostatic pressures + allocate(MPAS_state % pmid( nVertLevels, nCellsSolve)) + allocate(MPAS_state % pmiddry(nVertLevels, nCellsSolve)) + allocate(MPAS_state % pintdry(nVertLevels+1, nCellsSolve)) + call hydrostatic_pressure(nCellsSolve, nVertLevels, nwat, index_qv, MPAS_state % zz, & + MPAS_state % zint, MPAS_state % rho_zz, MPAS_state % theta_m, MPAS_state % exner, & + MPAS_state % tracers, MPAS_state % pmiddry, MPAS_state % pintdry, MPAS_state % pmid) + + ! Copy MPAS pressures into physics data containers. + ! [k, i] -> [i, k] + ! bottom-up -> top-down ordering convention + do iCell = 1, nCellsSolve + iCol = indicesGlobal(iCell) + physics_state % pgr(iCol) = MPAS_state % pintdry(1,iCell) + physics_state % prsl(iCol,:) = MPAS_state % pmiddry(nVertLevels:1:-1,iCell) + physics_state % prsi(iCol,:) = MPAS_state % pintdry(nVertLevels+1:1:-1,iCell) + enddo + end subroutine ufs_mpas_to_physics + + !> ######################################################################################### + !> Procedure to populate inputs to the MPAS dynamical core using outputs from the CCPP + !> physics. + !> + !> ######################################################################################### + subroutine ufs_physics_to_mpas(physics_state) + use GFS_typedefs, only : GFS_stateout_type + ! Arguments + type(GFS_stateout_type), intent(in ) :: physics_state + ! Locals + type(mpas_statein_type) :: mpas_state + + ! [i, k] -> [k, i] + ! top-down -> bottom-up ordering convention + ! Thermodynamic conversions from moist (CCPP) to dry (MPAS) + + end subroutine ufs_physics_to_mpas + + !> ######################################################################################### + !> Procedure to compute dry hydrostatic pressure at layer interfaces and midpoints. + !> + !> Given arrays of zz, zgrid, rho_zz, and theta_m from the MPAS-A prognostic state, compute + !> dry hydrostatic pressure at layer interfaces and midpoints. + !> The vertical dimension for 3-d arrays is innermost, and k=1 represents the lowest layer + !> or level in the fields. + !> + !> \update: Dustin Swales April 2025 - Modified for use in UWM + !> + !> ######################################################################################### + subroutine hydrostatic_pressure(nCells, nVertLevels, qsize, index_qv, zz, zgrid, rho_zz, & + theta_m, exner, q, pmiddry, pintdry,pmid) + use mpas_constants, only: cp, rgas, cv, gravity, p0, Rv_over_Rd => rvord + use mpas_kind_types, only: RKIND + ! Arguments + integer, intent(in) :: nCells + integer, intent(in) :: nVertLevels + integer, intent(in) :: qsize + integer, intent(in) :: index_qv + real(RKIND), dimension(nVertLevels, nCells), intent(in) :: zz ! d(zeta)/dz [-] + real(RKIND), dimension(nVertLevels+1, nCells), intent(in) :: zgrid ! geometric heights of layer interfaces [m] + real(RKIND), dimension(nVertLevels, nCells), intent(in) :: rho_zz ! dry density / zz [kg m^-3] + real(RKIND), dimension(nVertLevels, nCells), intent(in) :: theta_m ! modified potential temperature + real(RKIND), dimension(nVertLevels, nCells), intent(in) :: exner ! Exner function + real(RKIND), dimension(qsize,nVertLevels, nCells), intent(in) :: q ! water vapor dry mixing ratio + real(RKIND), dimension(nVertLevels, nCells), intent(out):: pmiddry ! layer midpoint dry hydrostatic pressure [Pa] + real(RKIND), dimension(nVertLevels+1, nCells), intent(out):: pintdry ! layer interface dry hydrostatic pressure [Pa] + real(RKIND), dimension(nVertLevels, nCells), intent(out):: pmid ! layer midpoint hydrostatic pressure [Pa] + + ! Local variables + integer :: iCell, k, idx + real(RKIND), dimension(nVertLevels) :: dz ! Geometric layer thickness in column + real(RKIND), dimension(nVertLevels) :: dp,dpdry ! Pressure thickness + real(RKIND), dimension(nVertLevels+1,nCells) :: pint ! hydrostatic pressure at interface + real(RKIND) :: sum_water + real(RKIND) :: pk,rhok,rhodryk,thetavk,kap1,kap2,tvk,tk + real(RKIND), parameter :: epsilon = 0.05_RKIND + real(RKIND) :: dp_epsilon, dpdry_epsilon + + ! + ! For each column, integrate downward from model top to compute dry hydrostatic pressure at layer + ! midpoints and interfaces. The pressure averaged to layer midpoints should be consistent with + ! the ideal gas law using the rho_zz and theta values prognosed by MPAS at layer midpoints. + ! + do iCell = 1, nCells + dz(:) = zgrid(2:nVertLevels+1,iCell) - zgrid(1:nVertLevels,iCell) + do k = nVertLevels, 1, -1 + rhodryk = zz(k,iCell)* rho_zz(k,iCell) !full CAM physics density + rhok = 1.0_RKIND + do idx=2,qsize!dry_air_species_num+1,thermodynamic_active_species_num + rhok = rhok+q(idx,k,iCell) + end do + rhok = rhok*rhodryk + dp(k) = gravity*dz(k)*rhok + dpdry(k) = gravity*dz(k)*rhodryk + end do + + k = nVertLevels + sum_water = 1.0_RKIND + do idx=2,qsize!dry_air_species_num+1,thermodynamic_active_species_num + sum_water = sum_water+q(idx,k,iCell) + end do + rhok = sum_water*zz(k,iCell) * rho_zz(k,iCell) + thetavk = theta_m(k,iCell)/sum_water + tvk = thetavk*exner(k,iCell) + pk = dp(k)*rgas*tvk/(gravity*dz(k)) + ! + ! model top pressure consistently diagnosed using the assumption that the mid level + ! is at height z(nVertLevels-1)+0.5*dz + ! + pintdry(nVertLevels+1,iCell) = pk-0.5_RKIND*dz(nVertLevels)*rhok*gravity !hydrostatic + pint (nVertLevels+1,iCell) = pintdry(nVertLevels+1,iCell) + do k = nVertLevels, 1, -1 + ! + ! compute hydrostatic dry interface pressure so that (pintdry(k+1)-pintdry(k))/g is pseudo density + ! + sum_water = 1.0_RKIND + do idx=2,qsize!dry_air_species_num+1,thermodynamic_active_species_num + sum_water = sum_water+q(idx,k,iCell) + end do + thetavk = theta_m(k,iCell)/sum_water!convert modified theta to virtual theta + tvk = thetavk*exner(k,iCell) + tk = tvk*sum_water/(1.0_RKIND+Rv_over_Rd*q(index_qv,k,iCell)) + pint (k,iCell) = pint (k+1,iCell)+dp(k) + pintdry(k,iCell) = pintdry(k+1,iCell)+dpdry(k) + pmid(k,iCell) = dp(k) *rgas*tvk/(gravity*dz(k)) + pmiddry(k,iCell) = dpdry(k)*rgas*tk /(gravity*dz(k)) + ! + ! PMID is not necessarily bounded by the hydrostatic interface pressure. + ! (has been found to be an issue at ~3.75km resolution in surface layer) + ! + dp_epsilon = dp(k) * epsilon + dpdry_epsilon = dpdry(k)*epsilon + pmid (k, iCell) = max(min(pmid (k, iCell), pint (k, iCell) - dp_epsilon), pint (k + 1, iCell) + dp_epsilon) + pmiddry(k, iCell) = max(min(pmiddry(k, iCell), pintdry(k, iCell) - dpdry_epsilon), pintdry(k + 1, iCell) + dpdry_epsilon) + end do + end do + end subroutine hydrostatic_pressure + + !> ######################################################################################### + !> Procedure to retreieve MPAS domain decomposition , for . + !> Called from atmos_model.F90:_init() + !> + !> ######################################################################################### + subroutine get_mpas_pio_decomp(varname) + use mpas_kind_types, only : StrKIND, RKIND + use mpas_pool_routines, only : mpas_pool_get_field_info, mpas_pool_get_field + use mpas_pool_routines, only : mpas_pool_get_subpool, mpas_pool_get_array + use mpas_pool_routines, only : mpas_pool_get_dimension + use mpas_derived_types, only : mpas_pool_field_info_type, field2DReal, field3DReal + use mpas_derived_types, only : mpas_pool_type + ! Arguments + character(len=*), intent(in) :: varname + ! Locals + character(len=*), parameter :: subname = 'ufs_mpas_subdriver::get_mpas_pio_decomp' + integer, dimension(:), pointer :: indexArray, indices + integer, pointer :: indexDimension + type (field2DReal), pointer :: field2d + type (field3DReal), pointer :: field3d + type (mpas_pool_field_info_type) :: fieldInfo + character (len=StrKIND) :: elementName, elementNamePlural + logical :: meshFieldDim, cellFieldDIm + integer :: i + + ! + call mpas_pool_get_field_info(domain_ptr % blocklist % allFields, trim(varname), fieldInfo) + if (trim(varname) == 'scalars') then + nullify(field3d) + if (fieldInfo % nTimeLevels > 1) then + call mpas_pool_get_field(domain_ptr % blocklist % allFields, trim(varname), field3d, & + timeLevel=fieldInfo % nTimeLevels ) + else + call mpas_pool_get_field(domain_ptr % blocklist % allFields, trim(varname), field3d) + endif + if ( field3d % isDecomposed ) then + meshFieldDim = .false. + cellFieldDIm = .false. + if (trim(field3d % dimNames(fieldInfo % nDims)) == 'nCells') then + elementName = 'Cell' + elementNamePlural = 'Cells' + meshFieldDim = .true. + cellFieldDIm = .true. + else if (trim(field3d % dimNames(fieldInfo % nDims)) == 'nEdges') then + elementName = 'Edge' + elementNamePlural = 'Edges' + meshFieldDim = .true. + else if (trim(field3d % dimNames(fieldInfo % nDims)) == 'nVertices') then + elementName = 'Vertex' + elementNamePlural = 'Vertices' + meshFieldDim = .true. + end if + endif + nullify(field3d) + else + nullify(field2d) + if (fieldInfo % nTimeLevels > 1) then + call mpas_pool_get_field(domain_ptr % blocklist % allFields, trim(varname), field2d, & + timeLevel=fieldInfo % nTimeLevels ) + else + call mpas_pool_get_field(domain_ptr % blocklist % allFields, trim(varname), field2d) + endif + if ( field2d % isDecomposed ) then + meshFieldDim = .false. + cellFieldDIm = .false. + if (trim(field2d % dimNames(fieldInfo % nDims)) == 'nCells') then + elementName = 'Cell' + elementNamePlural = 'Cells' + meshFieldDim = .true. + cellFieldDIm = .true. + else if (trim(field2d % dimNames(fieldInfo % nDims)) == 'nEdges') then + elementName = 'Edge' + elementNamePlural = 'Edges' + meshFieldDim = .true. + else if (trim(field2d % dimNames(fieldInfo % nDims)) == 'nVertices') then + elementName = 'Vertex' + elementNamePlural = 'Vertices' + meshFieldDim = .true. + end if + endif + nullify(field2d) + endif + ! + if ( meshFieldDim ) then + allocate(indices(0)) + call mpas_pool_get_array(domain_ptr % blocklist % allFields, 'indexTo' // & + trim(elementName) // 'ID', indexArray) + call mpas_pool_get_dimension(domain_ptr % blocklist % dimensions, 'n' // & + trim(elementNamePlural) // 'Solve', indexDimension) + call mergeArrays(indices, indexArray(1:indexDimension)) + endif + ! Save indices for P2D coupling in run phase(s). + if ( cellFieldDIm ) then + allocate(indicesGlobal(indexDimension)) + indicesGlobal = indices + endif + + end subroutine get_mpas_pio_decomp + + subroutine mergeArrays(array1, array2) + implicit none + integer, dimension(:), pointer :: array1 + integer, dimension(:), intent(in) :: array2 + integer :: n1, n2 + integer, dimension(:), pointer :: newArray + + n1 = size(array1) + n2 = size(array2) + + allocate(newArray(n1+n2)) + + newArray(1:n1) = array1(:) + newArray(n1+1:n1+n2) = array2(:) + + deallocate(array1) + array1 => newArray + end subroutine mergeArrays + +end module atmos_coupling_mod diff --git a/mpas/atmos_model.F90 b/mpas/atmos_model.F90 new file mode 100644 index 0000000000..fff6197ec8 --- /dev/null +++ b/mpas/atmos_model.F90 @@ -0,0 +1,382 @@ +! ########################################################################################### +!> \file atmos_model.F90 +!> Driver for the UFS ATMospheric model with MPAS dynamical core and CCPP Physics. +!> Contains routines to advance the atmospheric model state by one forecast time step. +!> +! ########################################################################################### +module atmos_model_mod + ! Fortran + use mpi_f08, only : MPI_Comm, MPI_CHARACTER, MPI_INTEGER, MPI_REAL8, MPI_LOGICAL + ! MPAS + use MPAS_typedefs, only : MPAS_kind_phys => kind_phys + ! CCPP + use CCPP_data, only : UFSATM_control => GFS_control + use CCPP_data, only : UFSATM_intdiag => GFS_intdiag + use CCPP_data, only : UFSATM_interstitial => GFS_interstitial + use CCPP_data, only : UFSATM_grid => GFS_grid + use CCPP_data, only : UFSATM_tbd => GFS_tbd + use CCPP_data, only : UFSATM_sfcprop => GFS_sfcprop + use CCPP_data, only : UFSATM_statein => GFS_statein + use CCPP_data, only : UFSATM_stateout => GFS_stateout + use CCPP_data, only : UFSATM_cldprop => GFS_cldprop + use CCPP_data, only : UFSATM_radtend => GFS_radtend + use CCPP_data, only : UFSATM_coupling => GFS_coupling + use CCPP_data, only : ccpp_suite + use CCPP_driver, only : CCPP_step + ! FMS + use time_manager_mod, only : time_type, get_time, get_date, operator(+), operator(-) + use field_manager_mod, only : MODEL_ATMOS + use tracer_manager_mod, only : get_number_tracers, get_tracer_names, get_tracer_index + use fms_mod, only : check_nml_error + use fms2_io_mod, only : file_exists + use mpp_mod, only : input_nml_file, mpp_error, FATAL + use mpp_mod, only : mpp_pe, mpp_root_pe, mpp_clock_id, mpp_clock_begin + use mpp_mod, only : mpp_clock_end, CLOCK_COMPONENT, MPP_CLOCK_SYNC + use fms_mod, only : clock_flag_default + use fms_mod, only : stdlog + use mpp_mod, only : stdout + ! UFSATM + use module_mpas_config, only : pio_numiotasks, nCellsGlobal, ic_filename, lbc_filename + use module_mpas_config, only : lonCellGlobal, latCellGlobal, areaCellGlobal + use module_mpas_config, only : pi + use mod_ufsatm_util, only : get_atmos_tracer_types +#ifdef _OPENMP + use omp_lib +#endif + implicit none + + private + + public :: atmos_control_type + public :: atmos_model_init + public :: atmos_model_end + public :: atmos_model_radiation_physics + public :: atmos_model_microphysics + public :: atmos_model_dynamics + + !> ######################################################################################### + !> Type containing information on MPAS enabled UFSATM forecast. + !> + !> ######################################################################################### + type atmos_control_type + type(time_type) :: Time ! current time + type(time_type) :: Time_step ! atmospheric time step. + type(time_type) :: Time_init ! reference time. + integer :: nblks ! Number of physics blocks. + end type atmos_control_type + + ! Index map between MPAS tracers and CAM constituents + integer, dimension(:), pointer :: mpas_from_ufs_cnst => null() ! indices into UFS constituent array + ! Index map between MPAS tracers and UFS constituents + integer, dimension(:), pointer :: ufs_from_mpas_cnst => null() ! indices into MPAS tracers array + + ! Namelist + integer :: blocksize = 1 + logical :: dycore_only = .false. + logical :: debug = .false. + + namelist /atmos_model_nml/ blocksize, dycore_only, debug, ccpp_suite, ic_filename, lbc_filename + + ! Component Timers + integer :: setupClock, radClock, physClock, mpasClock, mpClock, atmiClock + + ! DJS2025: For UFS WM RTs unitl output is setup for MPAS. + integer, parameter :: mpas_logfile_handle = 42323 + +contains + !> ######################################################################################### + !> Procedure to initialize UWM ATMosphere with MPAS dynamical core. + !> + !> - Read in ATMosphere namelist + !> - Initialize MPAS framework + !> - Read in MPAS namelist + !> - Initialize MPAS dynamical core + !> - Read in MPAS initial conditions + !> - Read in physics namelist + !> - Initialize CCPP framework + !> - Initialize CCPP Physics + !> + !> ######################################################################################### + subroutine atmos_model_init(Atmos, Time_init, Time, Time_end, Time_step, mpicomm, calendar) + use ufs_mpas_subdriver, only : MPAS_control_type + use ufs_mpas_subdriver, only : ufs_mpas_init_phase1, ufs_mpas_init_phase2 + use ufs_mpas_subdriver, only : ufs_mpas_open_init + use ufs_mpas_subdriver, only : dyn_mpas_read_write_stream, ufs_mpas_define_scalars + use ufs_mpas_subdriver, only : constituent_name, is_water_species + use atmos_coupling_mod, only : ufs_mpas_to_physics, get_mpas_pio_decomp + use MPAS_init, only : MPAS_initialize + + ! Arguments + type(atmos_control_type), intent(inout) :: Atmos + type(time_type), intent(in ) :: Time_init, Time, Time_step, Time_end + type(MPI_Comm), intent(in ) :: mpicomm + character(17), intent(in ) :: calendar + + ! Locals + integer :: i, io, ierr, nConstituents, sec, iCol + type(MPAS_control_type) :: Cfg + integer :: times(6), timee(6), ttime, logUnits(2), nthrds + + ! Set up timers + setupClock = mpp_clock_id( 'Time-Step Setup ', flags=clock_flag_default, grain=CLOCK_COMPONENT ) + atmiClock = mpp_clock_id( 'ATMosphere Setup ', flags=clock_flag_default, grain=CLOCK_COMPONENT ) + radClock = mpp_clock_id( 'Radiation ', flags=clock_flag_default, grain=CLOCK_COMPONENT ) + physClock = mpp_clock_id( 'Physics ', flags=clock_flag_default, grain=CLOCK_COMPONENT ) + mpasClock = mpp_clock_id( 'MPAS Dycore ', flags=clock_flag_default, grain=CLOCK_COMPONENT ) + mpClock = mpp_clock_id( 'Microphysics ', flags=clock_flag_default, grain=CLOCK_COMPONENT ) + + ! Start timer for this procedure (init). + call mpp_clock_begin(atmiClock) + + ! Set model time + Atmos % Time_init = Time_init + Atmos % Time = Time + Atmos % Time_step = Time_step + call get_time (Atmos % Time_step, sec) + Cfg%dt_phys = real(sec) + + ! Get forecast start/stop times (year/month/day/hour/minute/second) + call get_date(Time_init,times(1),times(2),times(3),times(4),times(5),times(6)) + call get_date(Time_end, timee(1),timee(2),timee(3),timee(4),timee(5),timee(6)) + call get_time(Time_end - Time_init, ttime) + + ! Set MPI bookeeping parameters. + Cfg%me = mpp_pe() + Cfg%master = mpp_root_pe() + Cfg%mpi_comm = mpicomm + + ! Read in ATMosphere namelist. + if (file_exists('input.nml')) then + read(input_nml_file, nml=atmos_model_nml, iostat=io) + ierr = check_nml_error(io, 'atmos_model_nml') + endif + + ! Get tracer name(s) and type(s). + call get_number_tracers(MODEL_ATMOS, num_tracers=Cfg % nConstituents) + allocate (Cfg % tracer_names(Cfg % nConstituents), Cfg % tracer_types(Cfg % nConstituents)) + do i = 1, Cfg % nConstituents + call get_tracer_names(MODEL_ATMOS, i, Cfg % tracer_names(i)) + enddo + call get_atmos_tracer_types(Cfg % tracer_types) + + ! DJS2025: There are 9 tracers, but only 6 are water. How do we get to 6? + ! With FV3, this is set during dycore initialization. Set and Revisit later. + Cfg % nwat = 6 + + call get_number_tracers(MODEL_ATMOS, num_tracers=Cfg % nConstituents) + allocate (constituent_name(Cfg % nConstituents), is_water_species(Cfg % nConstituents)) + do i = 1, Cfg % nConstituents + call get_tracer_names(MODEL_ATMOS, i, constituent_name(i)) + enddo + is_water_species(:) = .false. + is_water_species(1:Cfg % nwat) = .true. + + ! Open (PIO) MPAS IC data file. + call ufs_mpas_open_init() + + ! Call MPAS initialization phase 1. + ! - Set up MPAS framework + ! - Read in MPAS namelists + ! - Set up MPAS logging + ! - Read in static data, setup MPAS invariant stream + ! - Setup physical constants used by MPAS dycore + logUnits(1) = stdout() + logUnits(2) = stdlog() + + ! DJS2025: This is for UWM RT logging only. Can be removed when MPAS output is added. + if (Cfg % master == Cfg % me) then + open(unit=mpas_logfile_handle, file='mpas_log.txt', action='write', status='unknown') + logunits(1) = mpas_logfile_handle + logunits(2) = mpas_logfile_handle + endif + + call ufs_mpas_init_phase1(Cfg, times, timee, ttime, calendar, logUnits) + + call ufs_mpas_define_scalars(mpas_from_ufs_cnst, ufs_from_mpas_cnst, ierr) + if (ierr /= 0) then + call mpp_error(FATAL,'ERROR: Set-up of constituents for MPAS-A dycore failed.') + end if + + ! Read in MPAS IC data. Populate MPAS data containers and MPAS "input" stream. + call dyn_mpas_read_write_stream( 'r', 'input-scalars') + + ! Complete the MPAS dycore initialization. + ! - Set up threading. + ! - Call MPAS core_atmosphere init. + call ufs_mpas_init_phase2(Cfg) + + !> ######################################################################################### + !> ######################################################################################### + !> END MPAS DYCORE INITIALIZATION + !> ######################################################################################### + !> ######################################################################################### + + ! Set domain decomposition needed for P2D step + ! Use 'theta', but any MPAS field defined on the cell center will work. + call get_mpas_pio_decomp('theta') + + !> ######################################################################################### + !> ######################################################################################### + !> BEGIN CCPP PHYSICS INITIALIZATION + !> ######################################################################################### + !> ######################################################################################### +#ifdef _OPENMP + nthrds = omp_get_max_threads() +#else + nthrds = 1 +#endif + ! Set file ID for log file + Cfg%nlunit = stdlog() + + ! Number of physics blocks + Atmos % nblks = nCellsGlobal / blocksize + if (mod(nCellsGlobal, blocksize) .gt. 0) Atmos % nblks = Atmos % nblks + 1 + + ! Physics block sizes. + Cfg % nblks = Atmos % nblks + allocate(Cfg % blksz(Atmos % nblks)) + Cfg % blksz(:) = blocksize + Cfg % blksz(Atmos % nblks) = nCellsGlobal - (Atmos % nblks - 1)*blocksize + + allocate(UFSATM_interstitial(nthrds+1)) + + ! Update time (UFS specific time formatting array) + Cfg%bdat(:) = 0 + call get_date (Time_init, Cfg%bdat(1), Cfg%bdat(2), Cfg%bdat(3), Cfg%bdat(5), Cfg%bdat(6), Cfg%bdat(7)) + Cfg%cdat(:) = 0 + call get_date (Time, Cfg%cdat(1), Cfg%cdat(2), Cfg%cdat(3), Cfg%cdat(5), Cfg%cdat(6), Cfg%cdat(7)) + + ! Allocate required to work around GNU compiler bug 100886 https://gcc.gnu.org/bugzilla/show_bug.cgi?id=100886 + allocate(Cfg%input_nml_file, mold=input_nml_file) + Cfg%input_nml_file => input_nml_file + Cfg%fn_nml='using internal file' + + ! Read in physics namelist and allocate data containers. + call MPAS_initialize(UFSATM_control, UFSATM_intdiag, UFSATM_grid, UFSATM_tbd, UFSATM_sfcprop, & + UFSATM_statein, UFSATM_cldprop, UFSATM_radtend, UFSATM_coupling, Cfg) + + ! Get longitude/latitude/area from MPAS to use in the physics. + UFSATM_grid % xlon = lonCellGlobal + UFSATM_grid % xlat = latCellGlobal + UFSATM_grid % xlon_d = lonCellGlobal*180./pi + UFSATM_grid % xlat_d = latCellGlobal*180./pi + UFSATM_grid % area = areaCellGlobal + + ! Populate UFSATM data containers with MPAS "input" stream. We need to do this becuase + ! we are calling the physics before the dynamical core. + call ufs_mpas_to_physics(UFSATM_statein) + + ! Initialize the CCPP framework + call CCPP_step (step="init", nblks=Atmos % nblks, ierr=ierr, dycore='mpas') + if (ierr/=0) call mpp_error(FATAL, 'Call to CCPP init step failed') + + ! Initialize the CCPP physics + call CCPP_step (step="physics_init", nblks=Atmos % nblks, ierr=ierr, dycore='mpas') + if (ierr/=0) call mpp_error(FATAL, 'Call to CCPP physics_init step failed') + + ! Initialize stochastic physics pattern generation / cellular automata + ! NOT YET IMPLEMENTED + + ! Initialize three-dimensional physics. + ! NOT YET IMPLEMENTED + + call mpp_clock_end(atmiClock) + ! + end subroutine atmos_model_init + + !> ######################################################################################### + !> Procedure to finalize model. + !> + !> ######################################################################################### + subroutine atmos_model_end(Atmos) + type (atmos_control_type), intent(inout) :: Atmos + ! Locals + integer :: ierr + + close(unit=mpas_logfile_handle) + + ! Finalize the CCPP physics. + call CCPP_step (step="finalize", nblks=Atmos % nblks, ierr=ierr, dycore='mpas') + if (ierr/=0) call mpp_error(FATAL, 'Call to CCPP finalize step failed') + + end subroutine atmos_model_end + + !> ######################################################################################### + !> Procedure to call atmospheric radiation and physics groups (CCPP). + !> + !> ######################################################################################### + subroutine atmos_model_radiation_physics(Atmos) + type (atmos_control_type), intent(inout) :: Atmos + ! Locals + integer :: ierr + + ! Call CCPP Timestep_initialize Group + call mpp_clock_begin(setupClock) + call CCPP_step (step="timestep_init", nblks=Atmos % nblks, ierr=ierr, dycore='mpas') + if (ierr/=0) call mpp_error(FATAL, 'Call to CCPP timestep_init step failed') + call mpp_clock_end(setupClock) + + ! Call CCPP Radiation Group + call mpp_clock_begin(radClock) + if (UFSATM_control%lsswr .or. UFSATM_control%lslwr) then + !call CCPP_step (step="radiation", nblks=Atmos % nblks, ierr=ierr, dycore='mpas') + if (ierr/=0) call mpp_error(FATAL, 'Call to CCPP radiation step failed') + endif + call mpp_clock_end(radClock) + + ! Call CCPP Physics Group + call mpp_clock_begin(physClock) + call CCPP_step (step="physics", nblks=Atmos % nblks, ierr=ierr, dycore='mpas') + if (ierr/=0) call mpp_error(FATAL, 'Call to CCPP physics step failed') + call mpp_clock_end(physClock) + + end subroutine atmos_model_radiation_physics + + !> ######################################################################################### + !> Procedure to call atmospheric dynamics (MPAS). + !> + !> ######################################################################################### + subroutine atmos_model_dynamics(Atmos) + use ufs_mpas_subdriver, only : ufs_mpas_run + use atmos_coupling_mod, only : ufs_physics_to_mpas, ufs_mpas_to_physics + use MPAS_init, only : MPAS_initialize + + type (atmos_control_type), intent(inout) :: Atmos + + ! Prepare MPAS dycore inputs with CCPP physics outputs. + call ufs_physics_to_mpas(UFSATM_stateout) + + ! Call MPAS dycore + call mpp_clock_begin(mpasClock) + call ufs_mpas_run() + call mpp_clock_end(mpasClock) + + ! Prepare CCPP physics inputs with MPAS dycore outputs. + call ufs_mpas_to_physics(UFSATM_statein) + + end subroutine atmos_model_dynamics + + !> ######################################################################################### + !> Procedure to call microphysics group (CCPP). + !> + !> ######################################################################################### + subroutine atmos_model_microphysics(Atmos) + type (atmos_control_type), intent(inout) :: Atmos + ! Locals + integer :: ierr + + ! Call CCPP Microphysics Group + call mpp_clock_begin(mpClock) + call CCPP_step (step="microphysics", nblks=Atmos % nblks, ierr=ierr, dycore='mpas') + if (ierr/=0) call mpp_error(FATAL, 'Call to CCPP microphysics step failed') + call mpp_clock_end(mpClock) + + ! Call CCPP Timestep_finalize Group + call mpp_clock_begin(setupClock) + call CCPP_step (step="timestep_finalize", nblks=Atmos % nblks, ierr=ierr, dycore='mpas') + if (ierr/=0) call mpp_error(FATAL, 'Call to CCPP timestep_finalize step failed') + call mpp_clock_end(setupClock) + + end subroutine atmos_model_microphysics + +end module atmos_model_mod diff --git a/mpas/module_fcst_grid_comp.F90 b/mpas/module_fcst_grid_comp.F90 new file mode 100644 index 0000000000..721ea8acc9 --- /dev/null +++ b/mpas/module_fcst_grid_comp.F90 @@ -0,0 +1,338 @@ +#define ESMF_ERR_ABORT(rc) \ +if (rc /= ESMF_SUCCESS) write(0,*) 'rc=',rc,__FILE__,__LINE__; if(ESMF_LogFoundError(rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) call ESMF_Finalize(endflag=ESMF_END_ABORT) +! ########################################################################################### +!> \file module_fcst_grid_comp.F90 +!> +!> ESMF forecast gridded component for MPAS ATMosphere. +!> +! ########################################################################################### +module module_fcst_grid_comp + use mpi_f08 + use esmf + use nuopc + use time_manager_mod, only: time_type, set_calendar_type, set_time, set_date, & + month_name, operator(+), operator(-), operator (<), & + operator (>), operator (/=), operator (/), operator (==), & + operator (*), THIRTY_DAY_MONTHS, JULIAN, GREGORIAN, NOLEAP, & + NO_CALENDAR, date_to_string, get_date, get_time + use atmos_model_mod, only: atmos_model_init, atmos_model_end, atmos_control_type + use atmos_model_mod, only: atmos_model_radiation_physics, atmos_model_dynamics, & + atmos_model_microphysics + use constants_mod, only: constants_init + use fms_mod, only: error_mesg, fms_init, fms_end, write_version_number, & + uppercase + use mpp_mod, only: mpp_init, mpp_pe, mpp_npes, mpp_root_pe, & + mpp_set_current_pelist, mpp_error, FATAL, WARNING, NOTE + use mpp_mod, only: mpp_clock_id, mpp_clock_begin + use sat_vapor_pres_mod, only: sat_vapor_pres_init + use diag_manager_mod, only: diag_manager_init, diag_manager_end, & + diag_manager_set_time_end + use module_mpas_config, only: dt_atmos, fcst_mpi_comm, fcst_ntasks, calendar + + implicit none + private + + !---- model defined-types ---- + type(atmos_control_type), save :: Atmos + integer :: n_atmsteps + + !----- coupled model data ----- + integer :: calendar_type = -99 + integer :: date_init(6) + + integer :: mype = 0 + + public SetServices + +contains + + ! ######################################################################################### + ! ESMF entrypoints for forecast grid-component. + ! ######################################################################################### + subroutine SetServices(fcst_comp, rc) + type(ESMF_GridComp) :: fcst_comp + integer, intent(out) :: rc + + rc = ESMF_SUCCESS + + ! Initialize + call ESMF_GridCompSetEntryPoint(fcst_comp, ESMF_METHOD_INITIALIZE, & + userRoutine=fcst_initialize, phase=1, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + ! Advertise + call ESMF_GridCompSetEntryPoint(fcst_comp, ESMF_METHOD_INITIALIZE, & + userRoutine=fcst_advertise, phase=2, rc=rc) + + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + ! Realize + call ESMF_GridCompSetEntryPoint(fcst_comp, ESMF_METHOD_INITIALIZE, & + userRoutine=fcst_realize, phase=3, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + ! Run Phase 1 + call ESMF_GridCompSetEntryPoint(fcst_comp, ESMF_METHOD_RUN, & + userRoutine=fcst_run_phase_1, phase=1, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + ! Finalize + call ESMF_GridCompSetEntryPoint(fcst_comp, ESMF_METHOD_FINALIZE, & + userRoutine=fcst_finalize, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + end subroutine SetServices + + ! ######################################################################################### + ! Initialize the ESMF forecast grid component. + ! ######################################################################################### + subroutine fcst_initialize(fcst_comp, importState, exportState, clock, rc) + type(esmf_GridComp) :: fcst_comp + type(ESMF_State) :: importState, exportState + type(esmf_Clock) :: clock + integer,intent(out) :: rc + + ! Locals + integer :: i, j, k, n + type(ESMF_VM) :: VM + type(ESMF_Time) :: CurrTime, StartTime, StopTime + type(ESMF_Config) :: cf + real(kind=8) :: tbeg1 + logical :: fexist + integer :: initClock, io_unit, calendar_type_res, date_res(6), date_init_res(6) + integer,dimension(6) :: date, date_end, days + type(time_type) :: Time_init, Time, Time_step, Time_end, Time_restart, Time_step_restart + + ! Initialize ESMF error message. + rc = ESMF_SUCCESS + + ! Timing info (debug mode) + tbeg1 = mpi_wtime() + + call ESMF_VMGetCurrent(vm=vm,rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + call ESMF_VMGet(vm=vm, localPet=mype, mpiCommunicator=fcst_mpi_comm%mpi_val, & + petCount=fcst_ntasks, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (mype == 0) write(*,*)'in fcst_initialize, fcst_ntasks=',fcst_ntasks + + CF = ESMF_ConfigCreate(rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + ! Load resoure file. + call ESMF_ConfigLoadFile(config=CF ,filename='model_configure' ,rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + call fms_init(fcst_mpi_comm%mpi_val) + call mpp_init() + initClock = mpp_clock_id( 'Initialization' ) + call mpp_clock_begin (initClock) !nesting problem + + call constants_init + call sat_vapor_pres_init + + select case( uppercase(trim(calendar)) ) + case( 'JULIAN' ) + calendar_type = JULIAN + case( 'GREGORIAN' ) + calendar_type = GREGORIAN + case( 'NOLEAP' ) + calendar_type = NOLEAP + case( 'THIRTY_DAY' ) + calendar_type = THIRTY_DAY_MONTHS + case( 'NO_CALENDAR' ) + calendar_type = NO_CALENDAR + case default + call mpp_error ( FATAL, 'fcst_initialize: calendar must be one of '// & + 'JULIAN|GREGORIAN|NOLEAP|THIRTY_DAY|NO_CALENDAR.' ) + end select + + call set_calendar_type (calendar_type) + + ! + ! Set atmos time. + ! + call ESMF_ClockGet(clock, CurrTime=CurrTime, StartTime=StartTime, & + StopTime=StopTime, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + date_init = 0 + call ESMF_TimeGet (StartTime, & + YY=date_init(1), MM=date_init(2), DD=date_init(3), & + H=date_init(4), M =date_init(5), S =date_init(6), rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + Time_init = set_date (date_init(1), date_init(2), date_init(3), & + date_init(4), date_init(5), date_init(6)) + if (mype == 0) write(*,'(A,6I5)') 'in fcst_initialize, StartTime=',date_init + + date=0 + call ESMF_TimeGet (CurrTime, & + YY=date(1), MM=date(2), DD=date(3), & + H=date(4), M =date(5), S =date(6), rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + Time = set_date (date(1), date(2), date(3), & + date(4), date(5), date(6)) + if (mype == 0) write(*,'(A,6I5)') 'in fcst_initialize, CurrTime =',date + + date_end=0 + call ESMF_TimeGet (StopTime, & + YY=date_end(1), MM=date_end(2), DD=date_end(3), & + H=date_end(4), M =date_end(5), S =date_end(6), rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + Time_end = set_date (date_end(1), date_end(2), date_end(3), & + date_end(4), date_end(5), date_end(6)) + if (mype == 0) write(*,'(A,6I5)') 'in fcst_initialize, StopTime =',date_end + + ! + ! If this is a restarted run ('INPUT/coupler.res' file exists, compare date and date_init + ! to the values in 'coupler.res'. + ! + if (mype == 0) then + inquire(FILE='INPUT/coupler.res', EXIST=fexist) + if (fexist) then ! file exists, this is a restart run + + open(newunit=io_unit, file='INPUT/coupler.res', status='old', action='read', err=998) + read (io_unit,*,err=999) calendar_type_res + read (io_unit,*) date_init_res + read (io_unit,*) date_res + close(io_unit) + + if(date_res(1) == 0 .and. date_init_res(1) /= 0) date_res = date_init_res + + if(mype == 0) write(*,'(A,6(I4))') 'in fcst_initialize, INPUT/coupler.res: date_init=',date_init_res + if(mype == 0) write(*,'(A,6(I4))') 'in fcst_initialize, INPUT/coupler.res: date =',date_res + + if (calendar_type /= calendar_type_res) then + write(0,'(A)') 'fcst_initialize ERROR: calendar_type /= calendar_type_res' + write(0,'(A,6(I4))')' calendar_type = ', calendar_type + write(0,'(A,6(I4))')' calendar_type_res = ', calendar_type_res + call ESMF_Finalize(endflag=ESMF_END_ABORT) + endif + + if (.not. ALL(date_init.EQ.date_init_res)) then + write(0,'(A)') 'fcst_initialize ERROR: date_init /= date_init_res' + write(0,'(A,6(I4))')' date_init = ', date_init + write(0,'(A,6(I4))')' date_init_res = ', date_init_res + call ESMF_Finalize(endflag=ESMF_END_ABORT) + endif + + if (.not. ALL(date.EQ.date_res)) then + write(0,'(A)') 'fcst_initialize ERROR: date /= date_res' + write(0,'(A,6(I4))')' date = ', date + write(0,'(A,6(I4))')' date_res = ', date_res + call ESMF_Finalize(endflag=ESMF_END_ABORT) + endif + +999 continue +998 continue + + endif ! fexist + endif ! mype == 0 + + call diag_manager_init (TIME_INIT=date) + call diag_manager_set_time_end(Time_end) + + Time_step = set_time (dt_atmos,0) + if (mype == 0) write(*,*)'fcst_initialize, time_init=', date_init,'time=',date,'time_end=',date_end,'dt_atmos=',dt_atmos + + ! ####################################################################################### + ! Initialize component models. + ! atmos_model_init() calls the MPAS dycore initialization. + ! ####################################################################################### + call atmos_model_init(Atmos, Time_init, Time, Time_end, Time_step, fcst_mpi_comm, calendar) + + ! Timing info (debug mode) + if (mype == 0) write(*,*)'PASS(fcst_initialize): Time is ', mpi_wtime() - tbeg1 + + end subroutine fcst_initialize + + ! ########################################################################################### + ! Advertise the ESMF forecast grid component. + ! ########################################################################################### + subroutine fcst_advertise(fcst_comp, importState, exportState, clock, rc) + type(esmf_GridComp) :: fcst_comp + type(ESMF_State) :: importState, exportState + type(esmf_Clock) :: clock + integer,intent(out) :: rc + + ! Initialize ESMF error message. + rc = ESMF_SUCCESS + + end subroutine fcst_advertise + + ! ########################################################################################### + ! Realize the ESMF forecast grid component. + ! ########################################################################################### + subroutine fcst_realize(fcst_comp, importState, exportState, clock, rc) + type(esmf_GridComp) :: fcst_comp + type(ESMF_State) :: importState, exportState + type(esmf_Clock) :: clock + integer,intent(out) :: rc + + ! Initialize ESMF error message. + rc = ESMF_SUCCESS + + end subroutine fcst_realize + + ! ########################################################################################### + ! Run phase(1) for the ESMF forecast grid component. + ! ########################################################################################### + subroutine fcst_run_phase_1(fcst_comp, importState, exportState, clock, rc) + type(ESMF_GridComp) :: fcst_comp + type(ESMF_State) :: importState, exportState + type(ESMF_Clock) :: clock + integer,intent(out) :: rc + + ! Locals + integer :: seconds + real(kind=8) :: mpi_wtime, tbeg1 + + ! Initialize ESMF error message. + rc = ESMF_SUCCESS + + ! Timing info (debug mode) + tbeg1 = mpi_wtime() + call get_time(Atmos%Time - Atmos%Time_init, seconds) + n_atmsteps = seconds/dt_atmos + + ! Call forecast integration subroutines... + call atmos_model_radiation_physics (Atmos) + call atmos_model_dynamics (Atmos) + call atmos_model_microphysics (Atmos) + + ! Timing info (debug mode) + if (mype == 0) write(*,'(A,I16,A,F16.6)')'PASS(fcstRUN phase 1), n_atmsteps = ', & + n_atmsteps,' time is ',mpi_wtime()-tbeg1 + end subroutine fcst_run_phase_1 + + ! ########################################################################################### + ! Finalize the ESMF forecast grid component. + ! ########################################################################################### + subroutine fcst_finalize(fcst_comp, importState, exportState, clock, rc) + type(esmf_GridComp) :: fcst_comp + type(ESMF_State) :: importState, exportState + type(esmf_Clock) :: clock + integer,intent(out) :: rc + + ! Locals + real(kind=8) :: mpi_wtime, tbeg1 + + ! Initialize ESMF error message. + rc = ESMF_SUCCESS + + ! Timing info (debug mode) + tbeg1 = mpi_wtime() + + call atmos_model_end (Atmos) + call diag_manager_end (Atmos%Time) + call fms_end + + ! Timing info (debug mode) + if (mype == 0) write(*,*)'PASS(fcst_finalize): total is ', mpi_wtime() - tbeg1 + + end subroutine fcst_finalize +end module module_fcst_grid_comp diff --git a/mpas/module_mpas_config.F90 b/mpas/module_mpas_config.F90 new file mode 100644 index 0000000000..2ab507bab2 --- /dev/null +++ b/mpas/module_mpas_config.F90 @@ -0,0 +1,95 @@ +! ######################################################################################### +! +! MPAS configuration information +! +! ######################################################################################### +module module_mpas_config + use MPAS_typedefs, only: r8 => kind_dbl_prec, r4 => kind_sngl_prec + use GFS_typedefs, only: pi => con_pi + use mpi_f08 + use pio, only : iosystem_desc_t, file_desc_t, io_desc_t + use esmf + + implicit none + + !> Atmosphere time step in seconds + integer :: dt_atmos + + !> Number of MPAS dycore calls per ATMosphere time step. + integer :: n_atmos + + !> MPI communicator for the forecast grid component + type(MPI_Comm) :: fcst_mpi_comm + + !> Total number of mpi tasks for the forecast grid components + integer :: fcst_ntasks + + !> The first integration step + integer :: first_kdt + + !> ID number for the coupled grids + integer :: cpl_grid_id + + !> Flag to decide if model writes out coupled diagnostic fields + logical :: cplprint_flag = .false. + + !> Flag to decide if write grid components is used + logical :: quilting = .false. + + !> Flag to decide if write grid component writes out restart files + logical :: quilting_restart = .false. + + !> Output frequency if this array has only two elements and the value of + !! the second eletment is -1. Otherwise, it is the specific output forecast + !! hours + real,dimension(:),allocatable :: output_fh + + !> Calendar type + character(17) :: calendar=' ' + + !> MPAS Initial Condition file (via UFSATM NML) + character(len=256) :: ic_filename + + !> MPAS Lateral Boundary Condition file (via UFSATM NML) + character(len=256) :: lbc_filename + + !> PIO + type(iosystem_desc_t), pointer :: pio_subsystem + integer :: pio_iotype + integer :: pio_ioformat + integer :: pio_stride + integer :: pio_numiotasks + type(file_desc_t), target :: pioid + type(io_desc_t) :: pio_iodesc + + !> MPAS Grid information + real(r8), target, allocatable :: zref(:) + real(r8), target, allocatable :: zref_edge(:) + real(r8), target, allocatable :: pref(:) + real(r8), target, allocatable :: pref_edge(:) + + !> sphere_radius is a global attribute in the MPAS initial file. It is needed to + !> normalize the cell areas to a unit sphere. + real(r8) :: sphere_radius + + integer :: maxNCells ! maximum number of cells for any task (nCellsSolve <= maxNCells) + integer :: maxEdges ! maximum number of edges per cell + integer :: nVertLevels ! number of vertical layers (midpoints) + + integer, pointer :: & + nCellsSolve, & ! number of cells that a task solves + nEdgesSolve, & ! number of edges (velocity) that a task solves + nVerticesSolve, & ! number of vertices (vorticity) that a task solves + nVertLevelsSolve + + !> Global gridded data + integer :: nCellsGlobal ! global number of cells/columns + integer :: nEdgesGlobal ! global number of edges + integer :: nVerticesGlobal ! global number of vertices + + !> GridCell Longitue/Latitue/Area + real(r4), allocatable :: latCellGlobal(:) + real(r4), allocatable :: lonCellGlobal(:) + real(r4), allocatable :: areaCellGlobal(:) + +end module module_mpas_config diff --git a/mpas/ufs_mpas_subdriver.F90 b/mpas/ufs_mpas_subdriver.F90 new file mode 100644 index 0000000000..889caf6d71 --- /dev/null +++ b/mpas/ufs_mpas_subdriver.F90 @@ -0,0 +1,3060 @@ +!> ########################################################################################### +!> \file ufs_mpas_subdriver.F90 +!> UFSATM subdriver for MPAS dynamical core. +!> +!> Routines from the subdrivers for MPAS-A and CAM-SIMA have been adopted/modified here for use +!> within the UFS Weather Model. +!> MPAS-A Subdriver: MPAS-Model/src/driver/mpas_subdriver.F +!> CAM-SIMA (external): src/dynamics/mpas/driver/dyn_mpas_subdriver.F90 +!> (https://github.com/ESCOMP/CAM-SIMA/blob/development/) +!> +!> Overview: +!> Initialization is broken down into two phases, with ufs_mpas_define_scalars() called in +!> between: +!> ufs_mpas_init_phase1: Initialize MPAS framework, Read in namelist, Read static data. +!> ufs_mpas_define_scalars: Set up scalars/tracers/constituents/... +!> ufs_mpas_init_phase2: Complete MPAS initialization +!> +!> Forward integration of the dycore is handled in ufs_mpas_run. The current forecast time, +!> forecast interval, and MPAS dycore time step are used to integrate the model forward in +!> time. Afterwards, atm_compute_output_diagnostics() is called to compute fields needed by +!> the Physics. +!> +!> Other public routines used the UFSATM driver +!> ufs_mpas_open_init: Open MPAS Initial Condition file, return PIO file handle. +!> +!> ########################################################################################### +module ufs_mpas_subdriver + use mpi_f08 + use mpas_derived_types, only : core_type, domain_type, mpas_Clock_type + use mpas_kind_types, only : StrKIND, rkind + use module_mpas_config, only : pio_subsystem, pio_stride, pio_numiotasks, pio_iodesc + use module_mpas_config, only : ic_filename, lbc_filename + use module_mpas_config, only : pio_iotype, fcst_mpi_comm, pioid + use module_mpas_config, only : zref, zref_edge, sphere_radius, pref, pref_edge + use module_mpas_config, only : maxNCells, maxEdges, nVertLevels + use module_mpas_config, only : nCellsGlobal, nEdgesGlobal, nVerticesGlobal + use module_mpas_config, only : nCellsSolve, nEdgesSolve, nVerticesSolve, nVertLevelsSolve + use module_mpas_config, only : dt_atmos, n_atmos + use module_mpas_config, only : latCellGlobal, lonCellGlobal, areaCellGlobal + implicit none + + private + + public :: MPAS_control_type + public :: ufs_mpas_init_phase1 + public :: ufs_mpas_define_scalars + public :: ufs_mpas_init_phase2 + public :: ufs_mpas_run + public :: ufs_mpas_open_init + public :: corelist, domain_ptr + public :: constituent_name + public :: is_water_species + public :: dyn_mpas_read_write_stream + + !> ######################################################################################### + !> + !> ######################################################################################### + type MPAS_control_type + + ! Namelist filename + character(len=64) :: fn_nml + + ! Full namelist for use with internal file reads + character(len=:), pointer, dimension(:) :: input_nml_file => null() + + ! MPI Bookkeeping + integer :: me !< current MPI-rank + integer :: master !< master MPI-rank + type(MPI_Comm) :: mpi_comm !< forecast tasks mpi communicator + + ! ESMF + integer :: fcst_ntasks !< total number of forecast tasks + + ! Log file identifier + integer :: nlunit !< fortran unit number for file opens + integer :: logunit !< fortran unit number for writing logfile + + ! UFS date(s) for model time. + integer :: bdat(8) !< model begin date in GFS format (same as idat) + integer :: cdat(8) !< model current date in GFS format (same as jdat) + + ! Spatial/Temporal parameters for physics/dynamics coupling. + real(rkind) :: dt_dycore !< dynamics time step in seconds + real(rkind) :: dt_phys !< physics time step in seconds + integer :: nblks !< Number of data (physics) blocks. + integer, pointer :: blksz(:) !< Block size for data blocking (default blksz(1)=[nCells]) + integer :: levs !< number of vertical levels + + ! + integer :: iau_offset !< iau running window length + logical :: restart !< flag whether this is a coldstart (.false.) or a warmstart/restart (.true.) + + ! Tracers + integer :: nConstituents !< Number of constituents (tracers). + integer :: nwat !< number of hydrometeors in dcyore (including water vapor) + character(len=32), pointer :: tracer_names(:) !< tracers names to dereference tracer id + integer, pointer :: tracer_types(:) !< tracers types: 0=generic, 1=chem,prog, 2=chem,diag + + end type MPAS_control_type + + !> ######################################################################################### + ! + !> ######################################################################################### + type :: var_info_type + private + character(64) :: name = '' + character(10) :: type = '' + integer :: rank = 0 + end type var_info_type + + !> ######################################################################################### + !> This list corresponds to the "invariant" stream in MPAS registry. + !> It consists of variables that are members of the "mesh" struct. + !> ######################################################################################### + type(var_info_type), parameter :: invariant_var_info_list(*) = [ & + var_info_type('angleEdge' , 'real' , 1), & + var_info_type('areaCell' , 'real' , 1), & + var_info_type('areaTriangle' , 'real' , 1), & + var_info_type('bdyMaskCell' , 'integer' , 1), & + var_info_type('bdyMaskEdge' , 'integer' , 1), & + var_info_type('bdyMaskVertex' , 'integer' , 1), & + var_info_type('cellTangentPlane' , 'real' , 3), & + var_info_type('cell_gradient_coef_x' , 'real' , 2), & + var_info_type('cell_gradient_coef_y' , 'real' , 2), & + var_info_type('cellsOnCell' , 'integer' , 2), & + var_info_type('cellsOnEdge' , 'integer' , 2), & + var_info_type('cellsOnVertex' , 'integer' , 2), & + var_info_type('cf1' , 'real' , 0), & + var_info_type('cf2' , 'real' , 0), & + var_info_type('cf3' , 'real' , 0), & + var_info_type('coeffs_reconstruct' , 'real' , 3), & + var_info_type('dcEdge' , 'real' , 1), & + var_info_type('defc_a' , 'real' , 2), & + var_info_type('defc_b' , 'real' , 2), & + var_info_type('deriv_two' , 'real' , 3), & + var_info_type('dss' , 'real' , 2), & + var_info_type('dvEdge' , 'real' , 1), & + var_info_type('dzu' , 'real' , 1), & + var_info_type('edgeNormalVectors' , 'real' , 2), & + var_info_type('edgesOnCell' , 'integer' , 2), & + var_info_type('edgesOnEdge' , 'integer' , 2), & + var_info_type('edgesOnVertex' , 'integer' , 2), & + var_info_type('fEdge' , 'real' , 1), & + var_info_type('fVertex' , 'real' , 1), & + var_info_type('fzm' , 'real' , 1), & + var_info_type('fzp' , 'real' , 1), & + var_info_type('indexToCellID' , 'integer' , 1), & + var_info_type('indexToEdgeID' , 'integer' , 1), & + var_info_type('indexToVertexID' , 'integer' , 1), & + var_info_type('kiteAreasOnVertex' , 'real' , 2), & + var_info_type('latCell' , 'real' , 1), & + var_info_type('latEdge' , 'real' , 1), & + var_info_type('latVertex' , 'real' , 1), & + var_info_type('localVerticalUnitVectors' , 'real' , 2), & + var_info_type('lonCell' , 'real' , 1), & + var_info_type('lonEdge' , 'real' , 1), & + var_info_type('lonVertex' , 'real' , 1), & + var_info_type('meshDensity' , 'real' , 1), & + var_info_type('nEdgesOnCell' , 'integer' , 1), & + var_info_type('nEdgesOnEdge' , 'integer' , 1), & + var_info_type('nominalMinDc' , 'real' , 0), & + var_info_type('qv_init' , 'real' , 1), & + var_info_type('rdzu' , 'real' , 1), & + var_info_type('rdzw' , 'real' , 1), & + var_info_type('t_init' , 'real' , 2), & + var_info_type('u_init' , 'real' , 1), & + var_info_type('v_init' , 'real' , 1), & + var_info_type('verticesOnCell' , 'integer' , 2), & + var_info_type('verticesOnEdge' , 'integer' , 2), & + var_info_type('weightsOnEdge' , 'real' , 2), & + var_info_type('xCell' , 'real' , 1), & + var_info_type('xEdge' , 'real' , 1), & + var_info_type('xVertex' , 'real' , 1), & + var_info_type('yCell' , 'real' , 1), & + var_info_type('yEdge' , 'real' , 1), & + var_info_type('yVertex' , 'real' , 1), & + var_info_type('zCell' , 'real' , 1), & + var_info_type('zEdge' , 'real' , 1), & + var_info_type('zVertex' , 'real' , 1), & + var_info_type('zb' , 'real' , 3), & + var_info_type('zb3' , 'real' , 3), & + var_info_type('zgrid' , 'real' , 2), & + var_info_type('zxu' , 'real' , 2), & + var_info_type('zz' , 'real' , 2) & + ] + + ! Whether a variable should be in input or restart can be determined by looking at + ! the `atm_init_coupled_diagnostics` subroutine in MPAS. + ! If a variable first appears on the LHS of an equation, it should be in restart. + ! If a variable first appears on the RHS of an equation, it should be in input. + ! The remaining ones of interest should be in output. + + !> ######################################################################################### + !> This list corresponds to the "input" stream in MPAS registry. + !> It consists of variables that are members of the "diag" and "state" struct. + !> Only variables that are specific to the "input" stream are included. + !> ######################################################################################### + type(var_info_type), parameter :: input_var_info_list(*) = [ & + var_info_type('Time' , 'real' , 0), & + var_info_type('initial_time' , 'character' , 0), & + var_info_type('rho' , 'real' , 2), & + var_info_type('rho_base' , 'real' , 2), & + var_info_type('scalars' , 'real' , 3), & + var_info_type('theta' , 'real' , 2), & + var_info_type('theta_base' , 'real' , 2), & + var_info_type('u' , 'real' , 2), & + var_info_type('w' , 'real' , 2), & + var_info_type('xtime' , 'character' , 0) & + ] + + !> ######################################################################################### + !> This list corresponds to the "restart" stream in MPAS registry. + !> It consists of variables that are members of the "diag" and "state" struct. + !> Only variables that are specific to the "restart" stream are included. + !> ######################################################################################### + type(var_info_type), parameter :: restart_var_info_list(*) = [ & + var_info_type('exner' , 'real' , 2), & + var_info_type('exner_base' , 'real' , 2), & + var_info_type('pressure_base' , 'real' , 2), & + var_info_type('pressure_p' , 'real' , 2), & + var_info_type('rho_p' , 'real' , 2), & + var_info_type('rho_zz' , 'real' , 2), & + var_info_type('rtheta_base' , 'real' , 2), & + var_info_type('rtheta_p' , 'real' , 2), & + var_info_type('ru' , 'real' , 2), & + var_info_type('ru_p' , 'real' , 2), & + var_info_type('rw' , 'real' , 2), & + var_info_type('rw_p' , 'real' , 2), & + var_info_type('theta_m' , 'real' , 2) & + ] + + !> ######################################################################################### + !> This list corresponds to the "output" stream in MPAS registry. + !> It consists of variables that are members of the "diag" struct. + !> Only variables that are specific to the "output" stream are included. + !> ######################################################################################### + type(var_info_type), parameter :: output_var_info_list(*) = [ & + var_info_type('divergence' , 'real' , 2), & + var_info_type('pressure' , 'real' , 2), & + var_info_type('relhum' , 'real' , 2), & + var_info_type('surface_pressure' , 'real' , 1), & + var_info_type('uReconstructMeridional' , 'real' , 2), & + var_info_type('uReconstructZonal' , 'real' , 2), & + var_info_type('vorticity' , 'real' , 2) & + ] + + !> ######################################################################################### + !> + !> ######################################################################################### + type(core_type), pointer :: corelist => null() + type(domain_type), pointer :: domain_ptr => null() + type(mpas_Clock_type), pointer :: clock => null() + + character(StrKIND), allocatable :: constituent_name(:) + integer, allocatable :: index_constituent_to_mpas_scalar(:) + integer, allocatable :: index_mpas_scalar_to_constituent(:) + logical, allocatable :: is_water_species(:) + +contains + !> ######################################################################################### + !> Convert one or more values of any intrinsic data types to a character string for pretty + !> printing. + !> If `value` contains more than one element, the elements will be stringified, delimited by `separator`, then concatenated. + !> If `value` contains exactly one element, the element will be stringified without using `separator`. + !> If `value` contains zero element or is of unsupported data types, an empty character string is produced. + !> If `separator` is not supplied, it defaults to ", " (i.e., a comma and a space). + !> (KCW, 2024-02-04) + !> Ported for UWM (DJS: 2025) + !> ######################################################################################### + pure function stringify(value, separator) + use, intrinsic :: iso_fortran_env, only: int32, int64, real32, real64 + + class(*), intent(in) :: value(:) + character(*), optional, intent(in) :: separator + character(:), allocatable :: stringify + + integer, parameter :: sizelimit = 1024 + + character(:), allocatable :: buffer, delimiter, format + character(:), allocatable :: value_c(:) + integer :: i, n, offset + + if (present(separator)) then + delimiter = separator + else + delimiter = ', ' + end if + + n = min(size(value), sizelimit) + + if (n == 0) then + stringify = '' + + return + end if + + select type (value) + type is (character(*)) + allocate(character(len(value) * n + len(delimiter) * (n - 1)) :: buffer) + + buffer(:) = '' + offset = 0 + + ! Workaround for a bug in GNU Fortran >= 12. This is perhaps the manifestation of GCC Bugzilla Bug 100819. + ! When a character string array is passed as the actual argument to an unlimited polymorphic dummy argument, + ! its array index and length parameter are mishandled. + allocate(character(len(value)) :: value_c(size(value))) + + value_c(:) = value(:) + + do i = 1, n + if (len(delimiter) > 0 .and. i > 1) then + buffer(offset + 1:offset + len(delimiter)) = delimiter + offset = offset + len(delimiter) + end if + + if (len_trim(adjustl(value_c(i))) > 0) then + buffer(offset + 1:offset + len_trim(adjustl(value_c(i)))) = trim(adjustl(value_c(i))) + offset = offset + len_trim(adjustl(value_c(i))) + end if + end do + + deallocate(value_c) + type is (integer(int32)) + allocate(character(11 * n + len(delimiter) * (n - 1)) :: buffer) + allocate(character(17 + len(delimiter) + floor(log10(real(n))) + 1) :: format) + + write(format, '(a, i0, 3a)') '(ss, ', n, '(i0, :, "', delimiter, '"))' + write(buffer, format) value + type is (integer(int64)) + allocate(character(20 * n + len(delimiter) * (n - 1)) :: buffer) + allocate(character(17 + len(delimiter) + floor(log10(real(n))) + 1) :: format) + + write(format, '(a, i0, 3a)') '(ss, ', n, '(i0, :, "', delimiter, '"))' + write(buffer, format) value + type is (logical) + allocate(character(1 * n + len(delimiter) * (n - 1)) :: buffer) + allocate(character(13 + len(delimiter) + floor(log10(real(n))) + 1) :: format) + + write(format, '(a, i0, 3a)') '(', n, '(l1, :, "', delimiter, '"))' + write(buffer, format) value + type is (real(real32)) + allocate(character(13 * n + len(delimiter) * (n - 1)) :: buffer) + + if (maxval(abs(value)) < 1.0e5_real32) then + allocate(character(20 + len(delimiter) + floor(log10(real(n))) + 1) :: format) + write(format, '(a, i0, 3a)') '(ss, ', n, '(f13.6, :, "', delimiter, '"))' + else + allocate(character(23 + len(delimiter) + floor(log10(real(n))) + 1) :: format) + write(format, '(a, i0, 3a)') '(ss, ', n, '(es13.6e2, :, "', delimiter, '"))' + end if + + write(buffer, format) value + type is (real(real64)) + allocate(character(13 * n + len(delimiter) * (n - 1)) :: buffer) + + if (maxval(abs(value)) < 1.0e5_real64) then + allocate(character(20 + len(delimiter) + floor(log10(real(n))) + 1) :: format) + write(format, '(a, i0, 3a)') '(ss, ', n, '(f13.6, :, "', delimiter, '"))' + else + allocate(character(23 + len(delimiter) + floor(log10(real(n))) + 1) :: format) + write(format, '(a, i0, 3a)') '(ss, ', n, '(es13.6e2, :, "', delimiter, '"))' + end if + + write(buffer, format) value + class default + stringify = '' + + return + end select + + stringify = trim(buffer) + end function stringify + + !> ######################################################################################### + !> Procedure to initialize UWM with MPAS dynamical core. + !> + !> ######################################################################################### + subroutine ufs_mpas_init_phase1(Cfg, time_start, time_end, total_time, calendar, logUnits) + ! MPAS + use mpas_pool_routines, only : mpas_pool_add_config, mpas_pool_get_subpool + use mpas_pool_routines, only : mpas_pool_add_dimension, mpas_pool_get_field + use mpas_pool_routines, only : mpas_pool_get_array, mpas_pool_get_config + use mpas_framework, only : mpas_framework_init_phase1, mpas_framework_init_phase2 + use mpas_domain_routines, only : mpas_allocate_domain, mpas_pool_get_dimension + use mpas_bootstrapping, only : mpas_bootstrap_framework_phase1 + use mpas_bootstrapping, only : mpas_bootstrap_framework_phase2 + use mpas_stream_inquiry, only : mpas_stream_inquiry_new_streaminfo + use mpas_derived_types, only : mpas_pool_type, mpas_IO_NETCDF, field3dReal + use mpas_kind_types, only : StrKIND, RKIND + use mpas_log, only : mpas_log_write + use atm_core_interface, only : atm_setup_core, atm_setup_domain + use mpas_constants, only : mpas_constants_compute_derived, pi => pii + use mpas_attlist, only : mpas_add_att + ! FMS + use field_manager_mod, only : MODEL_ATMOS + use fms2_io_mod, only : file_exists + use mpp_mod, only : FATAL, mpp_error + ! PIO + use pio, only : pio_global, pio_get_att + ! Arguments + type(mpas_control_type), intent(inout) :: Cfg + integer, intent(in ) :: time_start(6), time_end(6), logUnits(2) + integer, intent(in ) :: total_time + character(17), intent(in ) :: calendar + ! Locals + character(len=*), parameter :: subname = 'ufs_mpas_subdriver::ufs_mpas_init_phase1' + integer :: i, ndate1, ndate2, tod, ierr, ik, kk + type (mpas_pool_type), pointer :: state, mesh, tend + type (field3dReal), pointer :: scalarsField + character (len=StrKIND), pointer :: initial_time, config_start_time + integer, pointer :: num_scalars + + ! Setup MPAS infrastructure + allocate(corelist, stat=ierr) + if ( ierr /= 0 ) call mpp_error(FATAL,subname//": failed to allocate corelist array") + nullify(corelist % next) + + allocate(corelist % domainlist, stat=ierr) + if ( ierr /= 0 ) call mpp_error(FATAL,subname//": failed to allocate corelist%domainlist%next") + nullify(corelist % domainlist % next) + + domain_ptr => corelist % domainlist + domain_ptr % core => corelist + + call mpas_allocate_domain(domain_ptr) + domain_ptr % domainID = 0 + + ! Initialize MPAS infrastructure + call mpas_framework_init_phase1(domain_ptr % dminfo, external_comm=fcst_mpi_comm) + + call atm_setup_core(corelist) + call atm_setup_domain(domain_ptr) + + ! Set up the log manager as early as possible so we can use it for any errors/messages + ! during subsequent init steps. We need: + ! 1) domain_ptr to be allocated, + ! 2) dmpar_init complete to access dminfo, + ! 3) *_setup_core to assign the setup_log function pointer + domain_ptr % core % git_version = 'unknown' + domain_ptr % core % build_target = 'N/A' + ierr = domain_ptr % core % setup_log(domain_ptr % logInfo, domain_ptr, unitNumbers=logUnits) + if ( ierr /= 0 ) then + call mpp_error(FATAL,subname//": Log setup failed for MPAS-A dycore") + end if + + ! Read MPAS namelist. + if (file_exists('input.nml')) then + call read_mpas_namelist('input.nml', domain_ptr % configs, Cfg % mpi_comm, Cfg % master, Cfg % me) + else + call mpp_error(FATAL,subname//": Cannot find MPAS namelist file, input.nml") + end if + + ! Set forecast start time (config_start_time) + ndate1 = time_start(1)*10000 + time_start(2)*100 + time_start(3) + tod = time_start(4)*3600 + time_start(5)*60 + time_start(6) + call mpas_pool_add_config(domain_ptr % configs, 'config_start_time', date2yyyymmdd(ndate1)//'_'//sec2hms(tod)) + call mpas_log_write('config_start_time = '//date2yyyymmdd(ndate1)//'_'//sec2hms(tod)) + + ! Set forecast end time (config_stop_time) + ndate2 = time_end(1)*10000 + time_end(2)*100 + time_end(3) + tod = time_end(4)*3600 + time_end(5)*60 + time_end(6) + call mpas_pool_add_config(domain_ptr % configs, 'config_stop_time', date2yyyymmdd(ndate2)//'_'//sec2hms(tod)) + call mpas_log_write('config_stop_time = '//date2yyyymmdd(ndate2)//'_'//sec2hms(tod)) + + ! Set forecaste run time (config_run_duration) #DJS2025 this is not correct. need to fix, but works for current test. + tod = max(ndate2 - ndate1 - 1,0) + call mpas_pool_add_config(domain_ptr % configs, 'config_run_duration', trim(int2str(tod))//'_'//sec2hms(total_time)) + call mpas_log_write('config_run_duration = '//trim(int2str(tod))//'_'//sec2hms(total_time)) + + ! Set other MPAS required configuration information. + call mpas_pool_add_config(domain_ptr % configs, 'config_restart_timestamp_name', 'restart_timestamp') + call mpas_pool_add_config(domain_ptr % configs, 'config_IAU_option', 'off') + call mpas_pool_add_config(domain_ptr % configs, 'config_do_DAcycling', .false.) + call mpas_pool_add_config(domain_ptr % configs, 'config_halo_exch_method', 'mpas_halo') + + ! Initialize MPAS infrastructure (phase 2) + call mpas_framework_init_phase2(domain_ptr, io_system=pio_subsystem, calendar = trim(calendar)) + + ! Before defining packages, initialize the stream inquiry instance for the domain + domain_ptr % streamInfo => mpas_stream_inquiry_new_streaminfo() + if (.not. associated(domain_ptr % streamInfo)) then + call mpp_error(FATAL,subname//": Failed to instantiate streamInfo object for "//trim(domain_ptr % core % coreName)) + end if + + ierr = domain_ptr % core % define_packages(domain_ptr % packages) + if (ierr /= 0) then + call mpp_error(FATAL,subname//": Package definition failed for "//trim(domain_ptr % core % coreName)) + end if + + ierr = domain_ptr % core % setup_packages(domain_ptr % configs, domain_ptr % streamInfo, & + domain_ptr % packages, domain_ptr % iocontext) + if (ierr /= 0) then + call mpp_error(FATAL,subname//": Package setup failed for "//trim(domain_ptr % core % coreName)) + end if + + ierr = domain_ptr % core % setup_decompositions(domain_ptr % decompositions) + if (ierr /= 0) then + call mpp_error(FATAL,subname//": Decomposition setup failed for "//trim(domain_ptr % core % coreName)) + end if + + ierr = domain_ptr % core % setup_clock(domain_ptr % clock, domain_ptr % configs) + if (ierr /= 0) then + call mpp_error(FATAL,subname//": Clock setup failed for "//trim(domain_ptr % core % coreName)) + end if + + ! Adding a config named 'cam_pcnst' with the number of constituents will indicate to + ! MPAS-A setup code that it is operating as a UFS dycore, and that it is necessary to + ! allocate scalars separately from other Registry-defined fields + call mpas_pool_add_config(domain_ptr % configs, 'cam_pcnst', Cfg % nConstituents) + + ! Call MPAS framework bootstrap phase 1 + call mpas_bootstrap_framework_phase1(domain_ptr, "external mesh file", mpas_IO_NETCDF, pio_file_desc=pioid) + + ! Finalize the setup of blocks and fields + call mpas_bootstrap_framework_phase2(domain_ptr, pio_file_desc=pioid) + + ! Add num_scalars from "state" pool to "dimensions". + call mpas_pool_get_subpool(domain_ptr % blocklist % structs, 'state', state) + call mpas_pool_get_dimension(state, 'num_scalars', num_scalars) + call mpas_pool_add_dimension(domain_ptr % blocklist % dimensions, 'num_scalars', num_scalars) + nullify(num_scalars) + call mpas_pool_add_dimension(state, 'index_qv', 1) + call mpas_pool_add_dimension(state, 'moist_start', 1) + call mpas_pool_add_dimension(state, 'moist_end', Cfg % nwat) + + ! Read in static (invariant) data + call dyn_mpas_read_write_stream( 'r', 'invariant') + + ! Compute unit vectors giving the local north and east directions as well as + ! the unit normal vector for edges + call ufs_mpas_compute_unit_vectors() + + ! Access dimensions that are made public via this module + call mpas_pool_get_subpool(domain_ptr % blocklist % structs, 'mesh', mesh) + call mpas_pool_get_dimension(mesh, 'nCellsSolve', nCellsSolve) + call mpas_pool_get_dimension(mesh, 'nEdgesSolve', nEdgesSolve) + call mpas_pool_get_dimension(mesh, 'nVerticesSolve', nVerticesSolve) + call mpas_pool_get_dimension(mesh, 'nVertLevels', nVertLevelsSolve) ! MPAS always solves over the full column + + ! Read the global sphere_radius attribute. This is needed to normalize the cell areas. + ierr = pio_get_att(pioid, pio_global, 'sphere_radius', sphere_radius) + if( ierr /= 0 ) then + call mpp_error(FATAL,subname//": Could not find sphere_radius PIO attribute") + endif + + ! Query global grid dimensions from MPAS + call ufs_mpas_get_global_dims(nCellsGlobal, nEdgesGlobal, nVerticesGlobal, maxEdges, nVertLevels, maxNCells) + + ! Setup constants + call mpas_constants_compute_derived() + + ! Set MPAS mesh lon/lat/area. + allocate(latCellGlobal(nCellsGlobal), lonCellGlobal(nCellsGlobal), areaCellGlobal(nCellsGlobal)) + call ufs_mpas_get_global_coords(latCellGlobal, lonCellGlobal, areaCellGlobal) + + end subroutine ufs_mpas_init_phase1 + + !> ######################################################################################## + !> Procedure to initialize UWM with MPAS dynamical core. + !> + !> ######################################################################################## + subroutine ufs_mpas_init_phase2(Cfg) + use mpas_kind_types, only : StrKIND, RKIND + use mpas_derived_types, only : mpas_pool_type, mpas_Time_Type, field0DReal, field2dreal + use mpas_domain_routines, only : mpas_pool_get_dimension + use mpas_pool_routines, only : mpas_pool_get_subpool + use mpas_pool_routines, only : mpas_pool_initialize_time_levels, mpas_pool_get_config + use mpas_pool_routines, only : mpas_pool_get_array, mpas_pool_get_field + use mpas_atm_dimensions, only : mpas_atm_set_dims + use mpas_atm_threading, only : mpas_atm_threading_init + use mpp_mod, only : FATAL, mpp_error + use mpas_atm_halos, only : atm_build_halo_groups, exchange_halo_group + use atm_core, only : atm_mpas_init_block, core_clock => clock + use atm_time_integration, only : mpas_atm_dynamics_init + use mpas_timekeeping, only : mpas_get_clock_time, mpas_get_time, mpas_START_TIME + use mpas_log, only : mpas_log_write + use mpas_attlist, only : mpas_modify_att + use mpas_string_utils, only : mpas_string_replace + use mpas_field_routines, only : mpas_allocate_scratch_field + ! Arguments + type(mpas_control_type), intent(inout) :: Cfg + type(mpas_pool_type), pointer :: tend_physics_pool + ! Locals + character(len=*), parameter :: subname = 'ufs_mpas_subdriver::ufs_mpas_init_phase2' + type (mpas_pool_type), pointer :: state, mesh + integer :: ierr + integer, pointer :: nVertLevels1, maxEdges1, maxEdges2, num_scalars + real (kind=RKIND), pointer :: dt + logical, pointer :: config_do_restart + type (mpas_Time_Type) :: startTime + character(len=StrKIND) :: startTimeStamp + character (len=StrKIND), pointer :: xtime + character (len=StrKIND), pointer :: initial_time1, initial_time2 + type(field0dreal), pointer :: field_0d_real + type(field2dreal), pointer :: field_2d_real + + ! + ! Setup threading + ! + call mpas_log_write('Setting up OpenMP threading') + call mpas_atm_threading_init(domain_ptr%blocklist, ierr) + if ( ierr /= 0 ) then + call mpp_error(FATAL,subname//": Threading setup failed for core "//trim(domain_ptr % core % coreName)) + end if + + ! + ! Set up inner dimensions used by arrays in optimized dynamics routines + ! + call mpas_log_write('Setting up dimensions') + call mpas_pool_get_subpool(domain_ptr % blocklist % structs, 'state', state) + call mpas_pool_get_dimension(state, 'nVertLevels', nVertLevels1) + call mpas_pool_get_dimension(state, 'maxEdges', maxEdges1) + call mpas_pool_get_dimension(state, 'maxEdges2', maxEdges2) + call mpas_pool_get_dimension(state, 'num_scalars', num_scalars) + + call mpas_atm_set_dims(nVertLevels1, maxEdges1, maxEdges2, num_scalars) + Cfg % levs = nVertLevels1 + + ! + ! Set "local" clock to point to the clock contained in the domain type + ! + clock => domain_ptr % clock + core_clock => domain_ptr % clock + + ! + ! Build halo exchange groups and set method for exchanging halos in a group + ! + call mpas_log_write('Building halo exchange groups.') + + nullify(exchange_halo_group) + call atm_build_halo_groups(domain_ptr, ierr) + + if (ierr /= 0) then + call mpp_error(FATAL,subname//": failed to build MPAS-A halo exchange groups.") + end if + if (.not. associated(exchange_halo_group)) then + call mpp_error(FATAL,subname//": failed to build MPAS-A halo exchange groups.") + end if + + ! Variables in MPAS "state" pool have more than one time level. Copy the values from the first time level of + ! such variables into all subsequent time levels to initialize them. + call mpas_pool_get_config(domain_ptr % blocklist % configs, 'config_do_restart', config_do_restart) + call mpas_pool_get_config(domain_ptr % blocklist % configs, 'config_dt', dt) + + if (.not. config_do_restart) then + call mpas_log_write('Initializing time levels') + call mpas_pool_get_subpool(domain_ptr % blocklist % structs, 'state', state) + call mpas_pool_initialize_time_levels(state) + nullify(state) + end if + nullify (config_do_restart) + + call exchange_halo_group(domain_ptr, 'initialization:u',ierr=ierr) + if ( ierr /= 0 ) then + call mpp_error(FATAL,subname//'Failed to exchange halo layers for group "initialization:u"') + end if + + call mpas_log_write('Initializing atmospheric variables') + + ! How many calls to MPAS dycore for each ATMosphere time step? + Cfg%dt_dycore = dt + n_atmos = dt_atmos/dt + + ! + ! Set startTimeStamp based on the start time of the simulation clock + ! + startTime = mpas_get_clock_time(clock, mpas_START_TIME, ierr) + if ( ierr /= 0 ) then + call mpp_error(FATAL,subname//': Failed to get clock_time "mpas_START_TIME"') + end if + call mpas_get_time(startTime, dateTimeString=startTimeStamp, ierr=ierr) + if ( ierr /= 0 ) then + call mpp_error(FATAL,subname//': Failed to get time mpas_START_TIME"') + end if + + + call mpas_pool_get_subpool(domain_ptr % blocklist % structs, 'mesh', mesh) + !call mpas_pool_get_subpool(domain_ptr % blocklist % structs, 'state', state) + + call atm_mpas_init_block(domain_ptr % dminfo, domain_ptr % streamManager, domain_ptr % blocklist, mesh, dt) + + call mpas_pool_get_subpool(domain_ptr % blocklist % structs, 'state', state) + call mpas_pool_get_array(state, 'xtime', xtime, timelevel=1) + xtime = startTimeStamp + + ! Initialize initial_time in second time level. We need to do this because initial state + ! is read into time level 1, and if we write output from the set of state arrays that + ! represent the original time level 2, the initial_time field will be invalid. + call mpas_pool_get_array(state, 'initial_time', initial_time1, timelevel=1) + call mpas_pool_get_array(state, 'initial_time', initial_time2, timelevel=2) + initial_time2 = initial_time1 + + ! + ! Set time units to CF-compliant "seconds since ". + ! + call mpas_pool_get_subpool(domain_ptr % blocklist % structs, 'state', state) + call mpas_pool_get_field(state, 'Time', field_0d_real, timelevel=1) + + if (.not. associated(field_0d_real)) then + call mpp_error(FATAL,subname//'Failed to find variable "Time"') + end if + + call mpas_modify_att(field_0d_real % attlists(1) % attlist, 'units', & + 'seconds since ' // mpas_string_replace(initial_time1, '_', ' '), ierr=ierr) + + if (ierr /= 0) then + call mpp_error(FATAL,subname//'Failed to set time units') + end if + + call exchange_halo_group(domain_ptr, 'initialization:pv_edge,ru,rw',ierr=ierr) + if ( ierr /= 0 ) then + call mpp_error(FATAL,subname//'Failed to exchange halo layers for group "initialization:ru,rw"') + end if + + ! + ! Prepare the dynamics for integration + ! + call mpas_log_write('Initializing the dynamics') + call mpas_atm_dynamics_init(domain_ptr) + + ! + ! Some additional "scratch" fields are needed for interoperability with CAM-SIMA, but they are not initialized by + ! `mpas_atm_dynamics_init`. Initialize them below. + ! +! call mpas_pool_get_field(domain_ptr % blocklist % allfields, 'tend_uzonal', field_2d_real, timelevel=1) +! call mpas_allocate_scratch_field(field_2d_real) +! nullify(field_2d_real) + +! call mpas_pool_get_field(domain_ptr % blocklist % allfields, 'tend_umerid', field_2d_real, timelevel=1) +! call mpas_allocate_scratch_field(field_2d_real) +! nullify(field_2d_real) + + call mpas_log_write('Successful initialization of MPAS dynamical core') + + end subroutine ufs_mpas_init_phase2 + + !> ######################################################################################### + !> Routine to call MPAS dynamical core + !> Loop over dynamical time-step(s) and increment MPAS state (timelevel 1->2) + !> + !> ######################################################################################### + subroutine ufs_mpas_run() + ! MPAS + use atm_core, only : atm_do_timestep, atm_compute_output_diagnostics + use mpas_domain_routines, only : mpas_pool_get_dimension + use mpas_derived_types, only : mpas_Time_type, mpas_pool_type, MPAS_TimeInterval_type + use mpas_kind_types, only : StrKIND, RKIND, R8KIND + use mpas_constants, only : rvord + use mpas_pool_routines, only : mpas_pool_get_config, mpas_pool_get_subpool + use mpas_pool_routines, only : mpas_pool_shift_time_levels, mpas_pool_get_array + use mpas_log, only : mpas_log_write + use mpas_timer, only : mpas_timer_start, mpas_timer_stop + use mpas_timekeeping, only : mpas_advance_clock, mpas_get_clock_time, mpas_get_time + use mpas_timekeeping, only : mpas_NOW, mpas_is_clock_stop_time, mpas_dmpar_get_time + use mpas_timekeeping, only : mpas_set_timeInterval, operator(+), operator(<) + ! FMS + use mpp_mod, only : FATAL, mpp_error + ! Locals + character(len=*), parameter :: subname = 'ufs_mpas_run::ufs_mpas_run' + real (kind=RKIND), pointer :: config_dt + type (mpas_pool_type), pointer :: state, diag, mesh + type (mpas_Time_type) :: timeNow, timeStop + character(len=StrKIND) :: timeStamp + integer :: ierr, itime, itimestep + integer, pointer :: index_qv + integer, pointer :: nCellsSolve + real(kind=RKIND), dimension(:,:), pointer :: theta_m, rho_zz, zz, theta, rho + real(kind=RKIND), dimension(:,:,:), pointer :: scalars + real (kind=R8KIND) :: integ_start_time, integ_stop_time + logical, pointer :: config_apply_lbcs + type(mpas_timeinterval_type) :: mpas_time_interval + + call mpas_pool_get_subpool(domain_ptr % blocklist % structs, 'state', state) + call mpas_pool_get_subpool(domain_ptr % blocklist % structs, 'diag', diag) + call mpas_pool_get_subpool(domain_ptr % blocklist % structs, 'mesh', mesh) + + ! Eventually, dt should be domain specific + call mpas_pool_get_config( domain_ptr % blocklist % configs, 'config_dt', config_dt) + call MPAS_set_timeInterval(mpas_time_interval, S=dt_atmos, ierr=ierr) + if (ierr /= 0) then + call mpp_error(FATAL,subname//'Failed to set dynamics time step') + endif + + ! + ! Read initial boundary state + ! NOT YET IMPLEMENTED (Follow src/core_atmosphere/mpas_atm_core.F:atm_core_run()) + ! + call mpas_pool_get_config( domain_ptr % blocklist % configs, 'config_apply_lbcs', config_apply_lbcs) + if (config_apply_lbcs) then + + endif + + ! During integration, time level 1 stores the model state at the beginning of the + ! time step, and time level 2 stores the state advanced config_dt in time by timestep(...) + timeNow = mpas_get_clock_time(clock, mpas_NOW, ierr) + if (ierr /= 0) then + call mpp_error(FATAL,subname//': Failed to get clock_time for "mpas_NOW"') + endif + + timeStop = timeNow + mpas_time_interval + itimestep = 0 + do while (itimestep < 1)!(timeNow < timeStop) !DJS2025: Only one dycore inte + itimestep = itimestep + 1 + ! + call mpas_get_time(curr_time=timeNow, dateTimeString=timeStamp, ierr=ierr) + if ( ierr /= 0 ) then + call mpp_error(FATAL,subname//': Failed to get time mpas_NOW"') + end if + call mpas_log_write('') + call mpas_log_write(' MPAS dynamics start timestep '//trim(timeStamp)) + + ! Integrate forward one dycore time step + call mpas_timer_start('time integration') + call mpas_dmpar_get_time(integ_start_time) + call atm_do_timestep(domain_ptr, config_dt, itimestep) + call mpas_dmpar_get_time(integ_stop_time) + call mpas_timer_stop('time integration') + !call mpas_log_write(' Timing for integration step: $r s', realArgs=(/real(integ_stop_time - integ_start_time, kind=RKIND)/)) + + ! Move time level 2 fields back into time level 1 for next time step + call mpas_pool_get_subpool(domain_ptr % blocklist % structs, 'state', state) + call mpas_pool_shift_time_levels(state) + + ! Advance clock. + call mpas_advance_clock(clock) + timeNow = mpas_get_clock_time(clock, mpas_NOW, ierr) + if (ierr /= 0) then + call mpp_error(FATAL,subname//': Failed to get clock_time for "mpas_NOW"') + endif + + end do + + ! + ! Compute diagnostic fields from the final prognostic state + ! + call atm_compute_output_diagnostics(state, 1, diag, mesh) + + end subroutine ufs_mpas_run + + + !> ######################################################################################### + !> Procedure to open MPAS IC file. + !> + !> ######################################################################################### + subroutine ufs_mpas_open_init() + ! PIO + use pio, only : pio_openfile, pio_nowrite + ! FMS + use fms2_io_mod, only : file_exists + use mpp_mod, only : FATAL, mpp_error + ! Arguments + ! Locals + integer :: ierr + character(len=*), parameter :: subname = 'ufs_mpas_subdriver::ufs_mpas_open_init' + + ! Open MPAS Initial Condition file. + if (file_exists(ic_filename)) then + ierr = pio_openfile(pio_subsystem, pioid, pio_iotype, ic_filename, pio_nowrite) + if (ierr /= 0) then + call mpp_error(FATAL,subname//": Failed opening MPAS IC File, "//trim(ic_filename)) + end if + else + call mpp_error(FATAL,subname//": Cannot find MPAS IC file: "//trim(ic_filename)) + end if + end subroutine ufs_mpas_open_init + + !> ######################################################################################### + !> Procedure to read MPAS namelist(s). + !> + !> The namelist for MPAS are described in MPAS-Model/src/core_atmosphere/Registry.xml, this + !> is also where the default values defined below originate. + !> + !> ######################################################################################### + subroutine read_mpas_namelist(nml_file, configPool, mpicomm, master, me) + use mpi_f08, only: MPI_Comm, MPI_CHARACTER, MPI_INTEGER, MPI_REAL8, MPI_LOGICAL + use mpi_f08, only: mpi_bcast, mpi_barrier + use mpas_derived_types, only: mpas_pool_type + use mpas_kind_types, only: StrKIND, RKIND + use mpas_pool_routines, only: mpas_pool_add_config + use mpas_log, only : mpas_log_write + use mpas_typedefs, only: r8 => kind_dbl_prec + use fms_mod, only: check_nml_error + use mpp_mod, only: input_nml_file + ! Inputs + type(MPI_Comm), intent(in ) :: mpicomm + integer, intent(in ) :: master, me + character(len=*), intent(in ) :: nml_file + type(mpas_pool_type), intent(inout) :: configPool + + ! Namelist nhyd_model + character (len=StrKIND) :: mpas_time_integration = 'SRK3' + integer :: mpas_time_integration_order = 2 + real(r8) :: mpas_dt = 720.0_r8 + logical :: mpas_split_dynamics_transport = .true. + integer :: mpas_number_of_sub_steps = 2 + integer :: mpas_dynamics_split_steps = 3 + real(r8) :: mpas_h_mom_eddy_visc2 = 0.0_r8 + real(r8) :: mpas_h_mom_eddy_visc4 = 0.0_r8 + real(r8) :: mpas_v_mom_eddy_visc2 = 0.0_r8 + real(r8) :: mpas_h_theta_eddy_visc2 = 0.0_r8 + real(r8) :: mpas_h_theta_eddy_visc4 = 0.0_r8 + real(r8) :: mpas_v_theta_eddy_visc2 = 0.0_r8 + character (len=StrKIND) :: mpas_horiz_mixing = '2d_smagorinsky' + real(r8) :: mpas_len_disp = 120000.0_r8 + real(r8) :: mpas_visc4_2dsmag = 0.05_r8 + real(r8) :: mpas_del4u_div_factor = 10.0_r8 + integer :: mpas_w_adv_order = 3 + integer :: mpas_theta_adv_order = 3 + integer :: mpas_scalar_adv_order = 3 + integer :: mpas_u_vadv_order = 3 + integer :: mpas_w_vadv_order = 3 + integer :: mpas_theta_vadv_order = 3 + integer :: mpas_scalar_vadv_order = 3 + logical :: mpas_scalar_advection = .true. + logical :: mpas_positive_definite = .false. + logical :: mpas_monotonic = .true. + real(r8) :: mpas_coef_3rd_order = 0.25_r8 + real(r8) :: mpas_smagorinsky_coef = 0.125_r8 + logical :: mpas_mix_full = .true. + real(r8) :: mpas_epssm = 0.1_r8 + real(r8) :: mpas_smdiv = 0.1_r8 + real(r8) :: mpas_apvm_upwinding = 0.5_r8 + logical :: mpas_h_ScaleWithMesh = .true. + ! Namelist damping + real(r8) :: mpas_zd = 22000.0_r8 + real(r8) :: mpas_xnutr = 0.2_r8 + real(r8) :: mpas_cam_coef = 0.0_r8 + integer :: mpas_cam_damping_levels = 0 + logical :: mpas_rayleigh_damp_u = .true. + real(r8) :: mpas_rayleigh_damp_u_timescale_days = 5.0_r8 + integer :: mpas_number_rayleigh_damp_u_levels = 3 + ! Namelist limited_area + logical :: mpas_apply_lbcs = .false. + ! Namelist PIO + integer :: mpas_pio_num_iotasks = 1 + integer :: mpas_pio_stride = 1 + ! Namelist assimilation + logical :: mpas_jedi_da = .false. + ! Namelist decomposition + character (len=StrKIND) :: mpas_block_decomp_file_prefix = 'x1.40962.graph.info.part.' + ! Namelist restart + logical :: mpas_do_restart = .false. + ! Namelist printout + logical :: mpas_print_global_minmax_vel = .true. + logical :: mpas_print_detailed_minmax_vel = .true. + logical :: mpas_print_global_minmax_sca = .true. + + namelist /mpas_nhyd_model/ mpas_time_integration, mpas_time_integration_order, mpas_dt, & + mpas_split_dynamics_transport, mpas_number_of_sub_steps, mpas_dynamics_split_steps, & + mpas_h_mom_eddy_visc2, mpas_h_mom_eddy_visc4, mpas_v_mom_eddy_visc2, & + mpas_h_theta_eddy_visc2, mpas_h_theta_eddy_visc4, mpas_v_theta_eddy_visc2, & + mpas_horiz_mixing, mpas_len_disp, mpas_visc4_2dsmag, mpas_del4u_div_factor, & + mpas_w_adv_order, mpas_theta_adv_order, mpas_scalar_adv_order, mpas_u_vadv_order, & + mpas_w_vadv_order, mpas_theta_vadv_order, mpas_scalar_vadv_order, & + mpas_scalar_advection, mpas_positive_definite, mpas_monotonic, mpas_coef_3rd_order, & + mpas_smagorinsky_coef, mpas_mix_full, mpas_epssm, mpas_smdiv, mpas_apvm_upwinding, & + mpas_h_ScaleWithMesh + ! + namelist /mpas_damping/ mpas_zd, mpas_xnutr, mpas_cam_coef, mpas_cam_damping_levels, & + mpas_rayleigh_damp_u, mpas_rayleigh_damp_u_timescale_days, & + mpas_number_rayleigh_damp_u_levels + ! + namelist /mpas_limited_area/ mpas_apply_lbcs + ! + namelist /mpas_io/ mpas_pio_num_iotasks, mpas_pio_stride + ! + namelist /mpas_assimilation/ mpas_jedi_da + ! + namelist /mpas_decomposition/ mpas_block_decomp_file_prefix + ! + namelist /mpas_restart/ mpas_do_restart + ! + namelist /mpas_printout/ mpas_print_global_minmax_vel, mpas_print_detailed_minmax_vel, & + mpas_print_global_minmax_sca + + ! These configuration parameters must be set in the MPAS configPool, but can't be changed + ! in UFS. *From CAM src/dynamics/mpas/dyn_comp.F90* + integer :: config_num_halos = 2 + integer :: config_number_of_blocks = 0 + logical :: config_explicit_proc_decomp = .false. + character(len=StrKIND) :: config_proc_decomp_file_prefix = 'graph.info.part' + real(RKIND) :: config_relax_zone_divdamp_coef = 6 + + ! Locals + integer :: ierr, io, mpierr + + ! Read in namelists... + if (me == master) then + call mpas_log_write('Reading MPAS-A dynamical core namelist') + ! nhyd_model + read(input_nml_file, nml=mpas_nhyd_model, iostat=io) + ierr = check_nml_error(io, 'mpas_nhyd_model') + ! damping + read(input_nml_file, nml=mpas_damping, iostat=io) + ierr = check_nml_error(io, 'mpas_damping') + ! limited_area + read(input_nml_file, nml=mpas_limited_area, iostat=io) + ierr = check_nml_error(io, 'mpas_limited_area') + ! PIO + read(input_nml_file, nml=mpas_io, iostat=io) + ierr = check_nml_error(io, 'mpas_io') + ! assimilation + read(input_nml_file, nml=mpas_assimilation, iostat=io) + ierr = check_nml_error(io, 'mpas_assimilation') + ! decomposition + read(input_nml_file, nml=mpas_decomposition, iostat=io) + ierr = check_nml_error(io, 'mpas_decomposition') + ! restart + read(input_nml_file, nml=mpas_restart, iostat=io) + ierr = check_nml_error(io, 'mpas_restart') + ! printout + read(input_nml_file, nml=mpas_printout, iostat=io) + ierr = check_nml_error(io, 'mpas_printout') + endif + + ! Other processors waiting... + call mpi_barrier(mpicomm, mpierr) + + ! + ! MPI Broadcast to all + ! + call mpi_bcast(mpas_time_integration, StrKIND, mpi_character, master, mpicomm, mpierr) + call mpi_bcast(mpas_time_integration_order, 1, mpi_integer, master, mpicomm, mpierr) + call mpi_bcast(mpas_dt, 1, mpi_real8, master, mpicomm, mpierr) + call mpi_bcast(mpas_split_dynamics_transport, 1, mpi_logical, master, mpicomm, mpierr) + call mpi_bcast(mpas_number_of_sub_steps, 1, mpi_integer, master, mpicomm, mpierr) + call mpi_bcast(mpas_dynamics_split_steps, 1, mpi_integer, master, mpicomm, mpierr) + call mpi_bcast(mpas_h_mom_eddy_visc2, 1, mpi_real8, master, mpicomm, mpierr) + call mpi_bcast(mpas_h_mom_eddy_visc4, 1, mpi_real8, master, mpicomm, mpierr) + call mpi_bcast(mpas_v_mom_eddy_visc2, 1, mpi_real8, master, mpicomm, mpierr) + call mpi_bcast(mpas_h_theta_eddy_visc2, 1, mpi_real8, master, mpicomm, mpierr) + call mpi_bcast(mpas_h_theta_eddy_visc4, 1, mpi_real8, master, mpicomm, mpierr) + call mpi_bcast(mpas_v_theta_eddy_visc2, 1, mpi_real8, master, mpicomm, mpierr) + call mpi_bcast(mpas_horiz_mixing, StrKIND, mpi_character, master, mpicomm, mpierr) + call mpi_bcast(mpas_len_disp, 1, mpi_real8, master, mpicomm, mpierr) + call mpi_bcast(mpas_visc4_2dsmag, 1, mpi_real8, master, mpicomm, mpierr) + call mpi_bcast(mpas_del4u_div_factor, 1, mpi_real8, master, mpicomm, mpierr) + call mpi_bcast(mpas_w_adv_order, 1, mpi_integer, master, mpicomm, mpierr) + call mpi_bcast(mpas_theta_adv_order, 1, mpi_integer, master, mpicomm, mpierr) + call mpi_bcast(mpas_scalar_adv_order, 1, mpi_integer, master, mpicomm, mpierr) + call mpi_bcast(mpas_u_vadv_order, 1, mpi_integer, master, mpicomm, mpierr) + call mpi_bcast(mpas_w_vadv_order, 1, mpi_integer, master, mpicomm, mpierr) + call mpi_bcast(mpas_theta_vadv_order, 1, mpi_integer, master, mpicomm, mpierr) + call mpi_bcast(mpas_scalar_vadv_order, 1, mpi_integer, master, mpicomm, mpierr) + call mpi_bcast(mpas_scalar_advection, 1, mpi_logical, master, mpicomm, mpierr) + call mpi_bcast(mpas_positive_definite, 1, mpi_logical, master, mpicomm, mpierr) + call mpi_bcast(mpas_monotonic, 1, mpi_logical, master, mpicomm, mpierr) + call mpi_bcast(mpas_coef_3rd_order, 1, mpi_real8, master, mpicomm, mpierr) + call mpi_bcast(mpas_smagorinsky_coef, 1, mpi_real8, master, mpicomm, mpierr) + call mpi_bcast(mpas_mix_full, 1, mpi_logical, master, mpicomm, mpierr) + call mpi_bcast(mpas_epssm, 1, mpi_real8, master, mpicomm, mpierr) + call mpi_bcast(mpas_smdiv, 1, mpi_real8, master, mpicomm, mpierr) + call mpi_bcast(mpas_apvm_upwinding, 1, mpi_real8, master, mpicomm, mpierr) + call mpi_bcast(mpas_h_ScaleWithMesh, 1, mpi_logical, master, mpicomm, mpierr) + ! + call mpi_bcast(mpas_zd, 1, mpi_real8, master, mpicomm, mpierr) + call mpi_bcast(mpas_xnutr, 1, mpi_real8, master, mpicomm, mpierr) + call mpi_bcast(mpas_cam_coef, 1, mpi_real8, master, mpicomm, mpierr) + call mpi_bcast(mpas_cam_damping_levels, 1, mpi_integer, master, mpicomm, mpierr) + call mpi_bcast(mpas_rayleigh_damp_u, 1, mpi_logical, master, mpicomm, mpierr) + call mpi_bcast(mpas_rayleigh_damp_u_timescale_days, 1, mpi_real8, master, mpicomm, mpierr) + call mpi_bcast(mpas_number_rayleigh_damp_u_levels, 1, mpi_integer, master, mpicomm, mpierr) + ! + call mpi_bcast(mpas_apply_lbcs, 1, mpi_logical, master, mpicomm, mpierr) + ! + call mpi_bcast(mpas_pio_num_iotasks, 1, mpi_integer, master, mpicomm, mpierr) + call mpi_bcast(mpas_pio_stride, 1, mpi_integer, master, mpicomm, mpierr) + ! + call mpi_bcast(mpas_jedi_da, 1, mpi_logical, master, mpicomm, mpierr) + ! + call mpi_bcast(mpas_block_decomp_file_prefix, StrKIND, mpi_character, master, mpicomm, mpierr) + ! + call mpi_bcast(mpas_do_restart, 1, mpi_logical, master, mpicomm, mpierr) + ! + call mpi_bcast(mpas_print_global_minmax_vel, 1, mpi_logical, master, mpicomm, mpierr) + call mpi_bcast(mpas_print_detailed_minmax_vel, 1, mpi_logical, master, mpicomm, mpierr) + call mpi_bcast(mpas_print_global_minmax_sca, 1, mpi_logical, master, mpicomm, mpierr) + + ! + ! Set MPAS configuration information pool variables + ! + call mpas_pool_add_config(configPool, 'config_time_integration', mpas_time_integration) + call mpas_pool_add_config(configPool, 'config_time_integration_order', mpas_time_integration_order) + call mpas_pool_add_config(configPool, 'config_dt', real(mpas_dt,kind=RKIND)) + call mpas_pool_add_config(configPool, 'config_split_dynamics_transport', mpas_split_dynamics_transport) + call mpas_pool_add_config(configPool, 'config_number_of_sub_steps', mpas_number_of_sub_steps) + call mpas_pool_add_config(configPool, 'config_dynamics_split_steps', mpas_dynamics_split_steps) + call mpas_pool_add_config(configPool, 'config_h_mom_eddy_visc2', real(mpas_h_mom_eddy_visc2,kind=RKIND)) + call mpas_pool_add_config(configPool, 'config_h_mom_eddy_visc4', real(mpas_h_mom_eddy_visc4,kind=RKIND)) + call mpas_pool_add_config(configPool, 'config_v_mom_eddy_visc2', real(mpas_v_mom_eddy_visc2,kind=RKIND)) + call mpas_pool_add_config(configPool, 'config_h_theta_eddy_visc2', real(mpas_h_theta_eddy_visc2,kind=RKIND)) + call mpas_pool_add_config(configPool, 'config_h_theta_eddy_visc4', real(mpas_h_theta_eddy_visc4,kind=RKIND)) + call mpas_pool_add_config(configPool, 'config_v_theta_eddy_visc2', real(mpas_v_theta_eddy_visc2,kind=RKIND)) + call mpas_pool_add_config(configPool, 'config_horiz_mixing', mpas_horiz_mixing) + call mpas_pool_add_config(configPool, 'config_len_disp', real(mpas_len_disp,kind=RKIND)) + call mpas_pool_add_config(configPool, 'config_visc4_2dsmag', real(mpas_visc4_2dsmag,kind=RKIND)) + call mpas_pool_add_config(configPool, 'config_del4u_div_factor', real(mpas_del4u_div_factor,kind=RKIND)) + call mpas_pool_add_config(configPool, 'config_w_adv_order', mpas_w_adv_order) + call mpas_pool_add_config(configPool, 'config_theta_adv_order', mpas_theta_adv_order) + call mpas_pool_add_config(configPool, 'config_scalar_adv_order', mpas_scalar_adv_order) + call mpas_pool_add_config(configPool, 'config_u_vadv_order', mpas_u_vadv_order) + call mpas_pool_add_config(configPool, 'config_w_vadv_order', mpas_w_vadv_order) + call mpas_pool_add_config(configPool, 'config_theta_vadv_order', mpas_theta_vadv_order) + call mpas_pool_add_config(configPool, 'config_scalar_vadv_order', mpas_scalar_vadv_order) + call mpas_pool_add_config(configPool, 'config_scalar_advection', mpas_scalar_advection) + call mpas_pool_add_config(configPool, 'config_positive_definite', mpas_positive_definite) + call mpas_pool_add_config(configPool, 'config_monotonic', mpas_monotonic) + call mpas_pool_add_config(configPool, 'config_coef_3rd_order', real(mpas_coef_3rd_order,kind=RKIND)) + call mpas_pool_add_config(configPool, 'config_smagorinsky_coef', real(mpas_smagorinsky_coef,kind=RKIND)) + call mpas_pool_add_config(configPool, 'config_mix_full', mpas_mix_full) + call mpas_pool_add_config(configPool, 'config_epssm', real(mpas_epssm,kind=RKIND)) + call mpas_pool_add_config(configPool, 'config_smdiv', real(mpas_smdiv,kind=RKIND)) + call mpas_pool_add_config(configPool, 'config_apvm_upwinding', real(mpas_apvm_upwinding,kind=RKIND)) + call mpas_pool_add_config(configPool, 'config_h_ScaleWithMesh', mpas_h_ScaleWithMesh) + ! + call mpas_pool_add_config(configPool, 'config_zd', real(mpas_zd,kind=RKIND)) + call mpas_pool_add_config(configPool, 'config_xnutr', real(mpas_xnutr,kind=RKIND)) + call mpas_pool_add_config(configPool, 'config_mpas_cam_coef', real(mpas_cam_coef,kind=RKIND)) + call mpas_pool_add_config(configPool, 'config_number_cam_damping_levels', mpas_cam_damping_levels) + call mpas_pool_add_config(configPool, 'config_rayleigh_damp_u', mpas_rayleigh_damp_u) + call mpas_pool_add_config(configPool, 'config_rayleigh_damp_u_timescale_days', real(mpas_rayleigh_damp_u_timescale_days,kind=RKIND)) + call mpas_pool_add_config(configPool, 'config_number_rayleigh_damp_u_levels', mpas_number_rayleigh_damp_u_levels) + ! + call mpas_pool_add_config(configPool, 'config_apply_lbcs', mpas_apply_lbcs) + ! + call mpas_pool_add_config(configPool, 'config_pio_num_iotasks', mpas_pio_num_iotasks) + call mpas_pool_add_config(configPool, 'config_pio_stride', mpas_pio_stride) + ! + call mpas_pool_add_config(configPool, 'config_jedi_da', mpas_jedi_da) + ! + call mpas_pool_add_config(configPool, 'config_block_decomp_file_prefix', mpas_block_decomp_file_prefix) + ! + call mpas_pool_add_config(configPool, 'config_do_restart', mpas_do_restart) + ! + call mpas_pool_add_config(configPool, 'config_print_global_minmax_vel', mpas_print_global_minmax_vel) + call mpas_pool_add_config(configPool, 'config_print_detailed_minmax_vel', mpas_print_detailed_minmax_vel) + call mpas_pool_add_config(configPool, 'config_print_global_minmax_sca', mpas_print_global_minmax_sca) + + ! Set some configuration parameters that cannot be changed by UFSATM. *From CAM src/dynamics/mpas/dyn_comp.F90* + call mpas_pool_add_config(configPool, 'config_num_halos', config_num_halos) + call mpas_pool_add_config(configPool, 'config_number_of_blocks', config_number_of_blocks) + call mpas_pool_add_config(configPool, 'config_explicit_proc_decomp', config_explicit_proc_decomp) + call mpas_pool_add_config(configPool, 'config_proc_decomp_file_prefix', config_proc_decomp_file_prefix) + call mpas_pool_add_config(configPool, 'config_relax_zone_divdamp_coef', config_relax_zone_divdamp_coef) + + ! Display namelist information (master processor only) + if (me == master) then + call mpas_log_write('-------------------------------- MPAS-A dycore namelist ---------------------------------') + call mpas_log_write('') + call mpas_log_write(' mpas_time_integration = '//trim(mpas_time_integration)) + call mpas_log_write(' mpas_time_integration_order = '//int2str(mpas_time_integration_order)) + call mpas_log_write(' mpas_dt = '//int2str(int(mpas_dt))) + call mpas_log_write(' mpas_split_dynamics_transport = '//log2str(mpas_split_dynamics_transport)) + call mpas_log_write(' mpas_number_of_sub_steps = '//int2str(mpas_number_of_sub_steps)) + call mpas_log_write(' mpas_dynamics_split_steps = '//int2str(mpas_dynamics_split_steps)) + call mpas_log_write(' mpas_h_mom_eddy_visc2 = '//int2str(int(mpas_h_mom_eddy_visc2))) + call mpas_log_write(' mpas_h_mom_eddy_visc4 = '//int2str(int(mpas_h_mom_eddy_visc4))) + call mpas_log_write(' mpas_v_mom_eddy_visc2 = '//int2str(int(mpas_v_mom_eddy_visc2))) + call mpas_log_write(' mpas_h_theta_eddy_visc2 = '//int2str(int(mpas_h_theta_eddy_visc2))) + call mpas_log_write(' mpas_h_theta_eddy_visc4 = '//int2str(int(mpas_h_theta_eddy_visc4))) + call mpas_log_write(' mpas_v_theta_eddy_visc2 = '//int2str(int(mpas_v_theta_eddy_visc2))) + call mpas_log_write(' mpas_horiz_mixing = '//trim(mpas_horiz_mixing)) + call mpas_log_write(' mpas_len_disp = '//int2str(int(mpas_len_disp))) + call mpas_log_write(' mpas_visc4_2dsmag = '//int2str(int(mpas_visc4_2dsmag))) + call mpas_log_write(' mpas_del4u_div_factor = '//int2str(int(mpas_del4u_div_factor))) + call mpas_log_write(' mpas_w_adv_order = '//int2str(mpas_w_adv_order)) + call mpas_log_write(' mpas_theta_adv_order = '//int2str(mpas_theta_adv_order)) + call mpas_log_write(' mpas_scalar_adv_order = '//int2str(mpas_scalar_adv_order)) + call mpas_log_write(' mpas_u_vadv_order = '//int2str(mpas_u_vadv_order)) + call mpas_log_write(' mpas_w_vadv_order = '//int2str(mpas_w_vadv_order)) + call mpas_log_write(' mpas_theta_vadv_order = '//int2str(mpas_theta_vadv_order)) + call mpas_log_write(' mpas_scalar_vadv_order = '//int2str(mpas_scalar_vadv_order)) + call mpas_log_write(' mpas_scalar_advection = '//log2str(mpas_scalar_advection)) + call mpas_log_write(' mpas_positive_definite = '//log2str(mpas_positive_definite)) + call mpas_log_write(' mpas_monotonic = '//log2str(mpas_monotonic)) + call mpas_log_write(' mpas_coef_3rd_order = '//int2str(int(mpas_coef_3rd_order))) + call mpas_log_write(' mpas_smagorinsky_coef = '//int2str(int(mpas_smagorinsky_coef))) + call mpas_log_write(' mpas_mix_full = '//log2str(mpas_mix_full)) + call mpas_log_write(' mpas_epssm = '//int2str(int(mpas_epssm))) + call mpas_log_write(' mpas_smdiv = '//int2str(int(mpas_smdiv))) + call mpas_log_write(' mpas_apvm_upwinding = '//int2str(int(mpas_apvm_upwinding))) + call mpas_log_write(' mpas_h_ScaleWithMesh = '//log2str(mpas_h_ScaleWithMesh)) + call mpas_log_write(' mpas_zd = '//int2str(int(mpas_zd))) + call mpas_log_write(' mpas_xnutr = '//int2str(int(mpas_xnutr))) + call mpas_log_write(' mpas_cam_coef = '//int2str(int(mpas_cam_coef))) + call mpas_log_write(' mpas_cam_damping_levels = '//int2str(mpas_cam_damping_levels)) + call mpas_log_write(' mpas_rayleigh_damp_u = '//log2str(mpas_rayleigh_damp_u)) + call mpas_log_write(' mpas_rayleigh_damp_u_timescale_days = '//int2str(int(mpas_rayleigh_damp_u_timescale_days))) + call mpas_log_write(' mpas_number_rayleigh_damp_u_levels = '//int2str(mpas_number_rayleigh_damp_u_levels)) + call mpas_log_write(' mpas_apply_lbcs = '//log2str(mpas_apply_lbcs)) + call mpas_log_write(' mpas_pio_num_iotasks = '//int2str(mpas_pio_num_iotasks)) + call mpas_log_write(' mpas_pio_stride = '//int2str(mpas_pio_stride)) + call mpas_log_write(' mpas_jedi_da = '//log2str(mpas_jedi_da)) + call mpas_log_write(' mpas_block_decomp_file_prefix = '//trim(mpas_block_decomp_file_prefix)) + call mpas_log_write(' mpas_do_restart = '//log2str(mpas_do_restart)) + call mpas_log_write(' mpas_print_global_minmax_vel = '//log2str(mpas_print_global_minmax_vel)) + call mpas_log_write(' mpas_print_detailed_minmax_vel = '//log2str(mpas_print_detailed_minmax_vel)) + call mpas_log_write(' mpas_print_global_minmax_sca = '//log2str(mpas_print_global_minmax_sca)) + end if + end subroutine read_mpas_namelist + + !> ######################################################################################## + ! subroutine dyn_mpas_read_write_stream + ! + !> summary: Read or write an MPAS stream. + !> author: Kuan-Chih Wang + !> date: 2024-03-15 + !> + !> In the context of MPAS, the concept of a "pool" resembles a group of + !> (related) variables, while the concept of a "stream" resembles a file. + !> This subroutine reads or writes an MPAS stream. It provides the mechanism + !> for CAM-SIMA to input/output data to/from MPAS dynamical core. + !> Analogous to the `{read,write}_stream` subroutines in MPAS stream manager. + ! + !> ######################################################################################## + subroutine dyn_mpas_read_write_stream(stream_mode, stream_name) + ! Module(s) from external libraries. + use pio, only: file_desc_t + use mpp_mod, only : FATAL, mpp_error + ! Module(s) from MPAS. + use mpas_derived_types, only : mpas_pool_type, mpas_stream_noerr, mpas_stream_type + use mpas_io_streams, only : mpas_closestream, mpas_readstream, mpas_writestream + use mpas_pool_routines, only : mpas_pool_destroy_pool + use mpas_stream_manager, only : postread_reindex, prewrite_reindex, postwrite_reindex + use mpas_log, only : mpas_log_write + use mpas_atm_halos, only : exchange_halo_group + + character(*), intent(in) :: stream_mode + character(*), intent(in) :: stream_name + + character(*), parameter :: subname = 'dyn_mpas_subdriver::dyn_mpas_read_write_stream' + integer :: i, ierr + type(mpas_pool_type), pointer :: mpas_pool + type(mpas_stream_type), pointer :: mpas_stream + type(var_info_type), allocatable :: var_info_list(:) + + call mpas_log_write('') + + nullify(mpas_pool) + nullify(mpas_stream) + + call mpas_log_write( 'Initializing stream "' // trim(adjustl(stream_name)) // '"') + + call dyn_mpas_init_stream_with_pool(mpas_pool, mpas_stream, pioid, stream_mode, stream_name) + + if (.not. associated(mpas_pool)) then + call mpp_error(FATAL,subname//'Failed to initialize stream "' // trim(adjustl(stream_name)) // '"') + end if + + if (.not. associated(mpas_stream)) then + call mpp_error(FATAL,subname//'Failed to initialize stream "' // trim(adjustl(stream_name)) // '"') + end if + + select case (trim(adjustl(stream_mode))) + case ('r', 'read') + call mpas_log_write('Reading stream "' // trim(adjustl(stream_name)) // '"') + + call mpas_readstream(mpas_stream, 1, ierr=ierr) + + if (ierr /= mpas_stream_noerr) then + call mpp_error(FATAL,subname//'Failed to read stream "' // trim(adjustl(stream_name)) // '"') + end if + + ! Exchange halo layers because new data have just been read. + var_info_list = parse_stream_name(stream_name) + + do i = 1, size(var_info_list) + call dyn_mpas_exchange_halo(var_info_list(i) % name) + if ( ierr /= 0 ) then + call mpp_error(FATAL,subname//'Failed to exchange halo layers for group '//var_info_list(i) % name) + end if + end do + + ! For any connectivity arrays in this stream, convert global indexes to local indexes. + call postread_reindex(domain_ptr % blocklist % allfields, domain_ptr % packages, & + mpas_pool, mpas_pool) + case ('w', 'write') + call mpas_log_write('Writing stream "' // trim(adjustl(stream_name)) // '"') + + ! WARNING: + ! The `{pre,post}write_reindex` subroutines are STATEFUL because they store information inside their module + ! (i.e., module variables). They MUST be called in pairs, like below, to prevent undefined behaviors. + + ! For any connectivity arrays in this stream, temporarily convert local indexes to global indexes. + call prewrite_reindex(domain_ptr % blocklist % allfields, domain_ptr % packages, & + mpas_pool, mpas_pool) + + call mpas_writestream(mpas_stream, 1, ierr=ierr) + + if (ierr /= mpas_stream_noerr) then + call mpp_error(FATAL,subname//'Failed to write stream "' // trim(adjustl(stream_name)) // '"') + end if + + ! For any connectivity arrays in this stream, reset global indexes back to local indexes. + call postwrite_reindex(domain_ptr % blocklist % allfields, mpas_pool) + case default + call mpp_error(FATAL,subname//'Unsupported stream mode "' // trim(adjustl(stream_mode)) // '"') + end select + + call mpas_log_write('Closing stream "' // trim(adjustl(stream_name)) // '"') + + call mpas_closestream(mpas_stream, ierr=ierr) + + if (ierr /= mpas_stream_noerr) then + call mpp_error(FATAL,subname//'Failed to close stream "' // trim(adjustl(stream_name)) // '"') + end if + + ! Deallocate temporary pointers to avoid memory leaks. + call mpas_pool_destroy_pool(mpas_pool) + nullify(mpas_pool) + + deallocate(mpas_stream) + nullify(mpas_stream) + + call mpas_log_write(subname // ' completed') + end subroutine dyn_mpas_read_write_stream + + !> ######################################################################################## + ! subroutine dyn_mpas_exchange_halo + ! + !> summary: Update the halo layers of the named field. + !> author: Michael Duda + !> date: 16 January 2020 + !> + !> Given a field name that is defined in MPAS registry, this subroutine updates + !> the halo layers for that field. + !> Ported and refactored for CAM-SIMA. (KCW, 2024-03-18) + !> Ported and refactored for UWM (DJS: 2025) + ! + !> ######################################################################################## + subroutine dyn_mpas_exchange_halo(field_name) + ! Module(s) from MPAS. + use mpas_derived_types, only : field1dinteger, field2dinteger, field3dinteger, & + field1dreal, field2dreal, field3dreal, field4dreal, & + field5dreal, mpas_pool_field_info_type, mpas_pool_integer,& + mpas_pool_real + use mpas_dmpar, only : mpas_dmpar_exch_halo_field + use mpas_pool_routines, only : mpas_pool_get_field, mpas_pool_get_field_info + use mpp_mod, only : FATAL, mpp_error + use mpas_log, only : mpas_log_write + character(*), intent(in) :: field_name + + character(*), parameter :: subname = 'dyn_mpas_subdriver::dyn_mpas_exchange_halo' + type(field1dinteger), pointer :: field_1d_integer + type(field2dinteger), pointer :: field_2d_integer + type(field3dinteger), pointer :: field_3d_integer + type(field1dreal), pointer :: field_1d_real + type(field2dreal), pointer :: field_2d_real + type(field3dreal), pointer :: field_3d_real + type(field4dreal), pointer :: field_4d_real + type(field5dreal), pointer :: field_5d_real + type(mpas_pool_field_info_type) :: mpas_pool_field_info + + call mpas_log_write(subname // ' entered') + + nullify(field_1d_integer) + nullify(field_2d_integer) + nullify(field_3d_integer) + nullify(field_1d_real) + nullify(field_2d_real) + nullify(field_3d_real) + nullify(field_4d_real) + nullify(field_5d_real) + + call mpas_log_write('Inquiring field information for "' // trim(adjustl(field_name)) // '"') + + call mpas_pool_get_field_info(domain_ptr % blocklist % allfields, & + trim(adjustl(field_name)), mpas_pool_field_info) + + if (mpas_pool_field_info % fieldtype == -1 .or. & + mpas_pool_field_info % ndims == -1 .or. & + mpas_pool_field_info % nhalolayers == -1) then + call mpp_error(FATAL,subname//'Invalid field information for "' // trim(adjustl(field_name)) // '"') + end if + + ! No halo layers to exchange. This field is not decomposed. + if (mpas_pool_field_info % nhalolayers == 0) then + call mpas_log_write('Skipping field "' // trim(adjustl(field_name)) // '" due to not decomposed') + + return + end if + + call mpas_log_write('Exchanging halo layers for "' // trim(adjustl(field_name)) // '"') + + select case (mpas_pool_field_info % fieldtype) + case (mpas_pool_integer) + select case (mpas_pool_field_info % ndims) + case (1) + call mpas_pool_get_field(domain_ptr % blocklist % allfields, & + trim(adjustl(field_name)), field_1d_integer, timelevel=1) + + if (.not. associated(field_1d_integer)) then + call mpp_error(FATAL,subname//'Failed to find field "' // trim(adjustl(field_name)) // '"') + end if + + call mpas_dmpar_exch_halo_field(field_1d_integer) + + nullify(field_1d_integer) + case (2) + call mpas_pool_get_field(domain_ptr % blocklist % allfields, & + trim(adjustl(field_name)), field_2d_integer, timelevel=1) + + if (.not. associated(field_2d_integer)) then + call mpp_error(FATAL,subname//'Failed to find field "' // trim(adjustl(field_name)) // '"') + end if + + call mpas_dmpar_exch_halo_field(field_2d_integer) + + nullify(field_2d_integer) + case (3) + call mpas_pool_get_field(domain_ptr % blocklist % allfields, & + trim(adjustl(field_name)), field_3d_integer, timelevel=1) + + if (.not. associated(field_3d_integer)) then + call mpp_error(FATAL,subname//'Failed to find field "' // trim(adjustl(field_name)) // '"') + end if + + call mpas_dmpar_exch_halo_field(field_3d_integer) + + nullify(field_3d_integer) + case default + call mpp_error(FATAL,subname//'Unsupported field rank ' // stringify([mpas_pool_field_info % ndims])) + end select + case (mpas_pool_real) + select case (mpas_pool_field_info % ndims) + case (1) + call mpas_pool_get_field(domain_ptr % blocklist % allfields, & + trim(adjustl(field_name)), field_1d_real, timelevel=1) + + if (.not. associated(field_1d_real)) then + call mpp_error(FATAL,subname//'Failed to find field "' // trim(adjustl(field_name)) // '"') + end if + + call mpas_dmpar_exch_halo_field(field_1d_real) + + nullify(field_1d_real) + case (2) + call mpas_pool_get_field(domain_ptr % blocklist % allfields, & + trim(adjustl(field_name)), field_2d_real, timelevel=1) + + if (.not. associated(field_2d_real)) then + call mpp_error(FATAL,subname//'Failed to find field "' // trim(adjustl(field_name)) // '"') + end if + + call mpas_dmpar_exch_halo_field(field_2d_real) + + nullify(field_2d_real) + case (3) + call mpas_pool_get_field(domain_ptr % blocklist % allfields, & + trim(adjustl(field_name)), field_3d_real, timelevel=1) + + if (.not. associated(field_3d_real)) then + call mpp_error(FATAL,subname//'Failed to find field "' // trim(adjustl(field_name)) // '"') + end if + + call mpas_dmpar_exch_halo_field(field_3d_real) + + nullify(field_3d_real) + case (4) + call mpas_pool_get_field(domain_ptr % blocklist % allfields, & + trim(adjustl(field_name)), field_4d_real, timelevel=1) + + if (.not. associated(field_4d_real)) then + call mpp_error(FATAL,subname//'Failed to find field "' // trim(adjustl(field_name)) // '"') + end if + + call mpas_dmpar_exch_halo_field(field_4d_real) + + nullify(field_4d_real) + case (5) + call mpas_pool_get_field(domain_ptr % blocklist % allfields, & + trim(adjustl(field_name)), field_5d_real, timelevel=1) + + if (.not. associated(field_5d_real)) then + call mpp_error(FATAL,subname//'Failed to find field "' // trim(adjustl(field_name)) // '"') + end if + + call mpas_dmpar_exch_halo_field(field_5d_real) + + nullify(field_5d_real) + case default + call mpp_error(FATAL,subname//'Unsupported field rank ' // stringify([mpas_pool_field_info % ndims])) + end select + case default + call mpp_error(FATAL,subname//'Unsupported field type (Must be one of: integer, real)') + end select + + call mpas_log_write(subname // ' completed') + end subroutine dyn_mpas_exchange_halo + + !> ######################################################################################## + ! subroutine dyn_mpas_init_stream_with_pool + ! + !> summary: Initialize an MPAS stream with an accompanying MPAS pool. + !> author: Kuan-Chih Wang + !> date: 2024-03-14 + !> + !> In the context of MPAS, the concept of a "pool" resembles a group of + !> (related) variables, while the concept of a "stream" resembles a file. + !> This subroutine initializes an MPAS stream with an accompanying MPAS pool by + !> adding variable and attribute information to them. After that, MPAS is ready + !> to perform IO on them. + !> Analogous to the `build_stream` and `mpas_stream_mgr_add_field` + !> subroutines in MPAS stream manager. + !> + !> Ported and refactored for UWM (DJS: 2025) + ! + !> ######################################################################################## + subroutine dyn_mpas_init_stream_with_pool(mpas_pool, mpas_stream, pio_file, stream_mode, & + stream_name) + ! Module(s) from external libraries. + use pio, only: file_desc_t, pio_file_is_open + ! Module(s) from MPAS. + use mpas_derived_types, only : field0dchar, field1dchar, field0dinteger, field1dinteger,& + field2dinteger, field3dinteger, field0dreal, field1dreal,& + field2dreal, field3dreal, field4dreal, field5dreal, & + mpas_io_native_precision, mpas_io_pnetcdf, mpas_io_read, & + mpas_io_write, mpas_pool_type, mpas_stream_noerr, & + mpas_stream_type + use mpas_io_streams, only : mpas_createstream, mpas_streamaddfield + use mpas_pool_routines, only : mpas_pool_add_config, mpas_pool_create_pool, mpas_pool_get_field + use mpas_kind_types, only : StrKIND, RKIND + use mpp_mod, only : FATAL, mpp_error + use mpas_log, only : mpas_log_write + + type(mpas_pool_type), pointer, intent(out) :: mpas_pool + type(mpas_stream_type), pointer, intent(out) :: mpas_stream + type(file_desc_t), pointer, intent(in) :: pio_file + character(*), intent(in) :: stream_mode + character(*), intent(in) :: stream_name + + interface add_stream_attribute + procedure :: add_stream_attribute_0d + procedure :: add_stream_attribute_1d + end interface add_stream_attribute + + character(*), parameter :: subname = 'dyn_mpas_subdriver::dyn_mpas_init_stream_with_pool' + character(strkind) :: stream_filename + integer :: i, ierr, stream_format + !> Whether a variable is present on the file (i.e., `pio_file`). + logical, allocatable :: var_is_present(:) + !> Whether a variable is type, kind, and rank compatible with what MPAS expects on the file (i.e., `pio_file`). + logical, allocatable :: var_is_tkr_compatible(:) + type(field0dchar), pointer :: field_0d_char + type(field1dchar), pointer :: field_1d_char + type(field0dinteger), pointer :: field_0d_integer + type(field1dinteger), pointer :: field_1d_integer + type(field2dinteger), pointer :: field_2d_integer + type(field3dinteger), pointer :: field_3d_integer + type(field0dreal), pointer :: field_0d_real + type(field1dreal), pointer :: field_1d_real + type(field2dreal), pointer :: field_2d_real + type(field3dreal), pointer :: field_3d_real + type(field4dreal), pointer :: field_4d_real + type(field5dreal), pointer :: field_5d_real + type(var_info_type), allocatable :: var_info_list(:) + + call mpas_log_write(subname // ' entered') + + nullify(field_0d_char) + nullify(field_1d_char) + nullify(field_0d_integer) + nullify(field_1d_integer) + nullify(field_2d_integer) + nullify(field_3d_integer) + nullify(field_0d_real) + nullify(field_1d_real) + nullify(field_2d_real) + nullify(field_3d_real) + nullify(field_4d_real) + nullify(field_5d_real) + + call mpas_pool_create_pool(mpas_pool) + + allocate(mpas_stream, stat=ierr) + + if (ierr /= 0) then + call mpp_error(FATAL,subname//'Failed to allocate stream "' // trim(adjustl(stream_name)) // '"') + end if + + ! Not actually used because a PIO file descriptor is directly supplied. + stream_filename = 'external stream' + stream_format = mpas_io_pnetcdf + + call mpas_log_write('Checking PIO file descriptor') + + if (.not. associated(pio_file)) then + call mpp_error(FATAL,subname//'Invalid PIO file descriptor') + end if + + if (.not. pio_file_is_open(pio_file)) then + call mpp_error(FATAL,subname//'Invalid PIO file descriptor') + end if + + select case (trim(adjustl(stream_mode))) + case ('r', 'read') + call mpas_log_write('Creating stream "' // trim(adjustl(stream_name)) // '" for reading') + + call mpas_createstream( & + mpas_stream, domain_ptr % iocontext, stream_filename, stream_format, mpas_io_read, & + clobberrecords=.false., clobberfiles=.false., truncatefiles=.false., & + precision=mpas_io_native_precision, pio_file_desc=pio_file, ierr=ierr) + case ('w', 'write') + call mpas_log_write('Creating stream "' // trim(adjustl(stream_name)) // '" for writing') + + call mpas_createstream( & + mpas_stream, domain_ptr % iocontext, stream_filename, stream_format, mpas_io_write, & + clobberrecords=.false., clobberfiles=.false., truncatefiles=.false., & + precision=mpas_io_native_precision, pio_file_desc=pio_file, ierr=ierr) + case default + call mpp_error(FATAL,subname//'Unsupported stream mode "' // trim(adjustl(stream_mode)) // '"') + end select + + if (ierr /= mpas_stream_noerr) then + call mpp_error(FATAL,subname//'Failed to create stream "' // trim(adjustl(stream_name)) // '"') + end if + + var_info_list = parse_stream_name(stream_name) + + ! Add variables contained in `var_info_list` to stream. + do i = 1, size(var_info_list) + call mpas_log_write('var_info_list(' // stringify([i]) // ') % name = ' // stringify([var_info_list(i) % name])) + call mpas_log_write('var_info_list(' // stringify([i]) // ') % type = ' // stringify([var_info_list(i) % type])) + call mpas_log_write('var_info_list(' // stringify([i]) // ') % rank = ' // stringify([var_info_list(i) % rank])) + + if (trim(adjustl(stream_mode)) == 'r' .or. trim(adjustl(stream_mode)) == 'read') then + call dyn_mpas_check_variable_status(var_is_present, var_is_tkr_compatible, pio_file, var_info_list(i)) + + ! Do not hard crash the model if a variable is missing and cannot be read. + ! This can happen if users attempt to initialize/restart the model with data generated by + ! older versions of MPAS. Print a debug message to let users decide if this is acceptable. + if (.not. any(var_is_present)) then + call mpas_log_write('Skipping variable "' // trim(adjustl(var_info_list(i) % name)) // '" due to not present') + + cycle + end if + + if (any(var_is_present .and. .not. var_is_tkr_compatible)) then + call mpas_log_write('Skipping variable "' // trim(adjustl(var_info_list(i) % name)) // '" due to not TKR compatible') + + !cycle + end if + end if + + ! Add "" to pool with the value of `1`. + ! The existence of "" in pool causes it to be considered for IO in MPAS. + call mpas_pool_add_config(mpas_pool, trim(adjustl(var_info_list(i) % name)), 1) + ! Add ":packages" to pool with the value of an empty character string. + ! This causes "" to be always considered active for IO in MPAS. + !call mpas_pool_add_config(mpas_pool, trim(adjustl(var_info_list(i) % name) // ':packages'), '') + + ! Add "" to stream. + call mpas_log_write('Adding variable "' // trim(adjustl(var_info_list(i) % name)) // & + '" to stream "' // trim(adjustl(stream_name)) // '"') + + select case (trim(adjustl(var_info_list(i) % type))) + case ('character') + select case (var_info_list(i) % rank) + case (0) + call mpas_pool_get_field(domain_ptr % blocklist % allfields, & + trim(adjustl(var_info_list(i) % name)), field_0d_char, timelevel=1) + + if (.not. associated(field_0d_char)) then + call mpp_error(FATAL,subname//'Failed to find variable "' // trim(adjustl(var_info_list(i) % name)) // '"') + end if + + call mpas_streamaddfield(mpas_stream, field_0d_char, ierr=ierr) + + nullify(field_0d_char) + case (1) + call mpas_pool_get_field(domain_ptr % blocklist % allfields, & + trim(adjustl(var_info_list(i) % name)), field_1d_char, timelevel=1) + + if (.not. associated(field_1d_char)) then + call mpp_error(FATAL,subname//'Failed to find variable "' // trim(adjustl(var_info_list(i) % name)) // '"') + end if + + call mpas_streamaddfield(mpas_stream, field_1d_char, ierr=ierr) + + nullify(field_1d_char) + case default + call mpp_error(FATAL,subname//'Unsupported variable rank ' // stringify([var_info_list(i) % rank]) // & + ' for "' // trim(adjustl(var_info_list(i) % name)) // '"') + end select + case ('integer') + select case (var_info_list(i) % rank) + case (0) + call mpas_pool_get_field(domain_ptr % blocklist % allfields, & + trim(adjustl(var_info_list(i) % name)), field_0d_integer, timelevel=1) + + if (.not. associated(field_0d_integer)) then + call mpp_error(FATAL,subname//'Failed to find variable "' // trim(adjustl(var_info_list(i) % name)) // '"') + end if + + call mpas_streamaddfield(mpas_stream, field_0d_integer, ierr=ierr) + + nullify(field_0d_integer) + case (1) + call mpas_pool_get_field(domain_ptr % blocklist % allfields, & + trim(adjustl(var_info_list(i) % name)), field_1d_integer, timelevel=1) + + if (.not. associated(field_1d_integer)) then + call mpp_error(FATAL,subname//'Failed to find variable "' // trim(adjustl(var_info_list(i) % name)) // '"') + end if + + call mpas_streamaddfield(mpas_stream, field_1d_integer, ierr=ierr) + + nullify(field_1d_integer) + case (2) + call mpas_pool_get_field(domain_ptr % blocklist % allfields, & + trim(adjustl(var_info_list(i) % name)), field_2d_integer, timelevel=1) + + if (.not. associated(field_2d_integer)) then + call mpp_error(FATAL,subname//'Failed to find variable "' // trim(adjustl(var_info_list(i) % name)) // '"') + end if + + call mpas_streamaddfield(mpas_stream, field_2d_integer, ierr=ierr) + + nullify(field_2d_integer) + case (3) + call mpas_pool_get_field(domain_ptr % blocklist % allfields, & + trim(adjustl(var_info_list(i) % name)), field_3d_integer, timelevel=1) + + if (.not. associated(field_3d_integer)) then + call mpp_error(FATAL,subname//'Failed to find variable "' // trim(adjustl(var_info_list(i) % name)) // '"') + end if + + call mpas_streamaddfield(mpas_stream, field_3d_integer, ierr=ierr) + + nullify(field_3d_integer) + case default + call mpp_error(FATAL,subname//'Unsupported variable rank ' // stringify([var_info_list(i) % rank]) // & + ' for "' // trim(adjustl(var_info_list(i) % name)) // '"') + end select + case ('real') + select case (var_info_list(i) % rank) + case (0) + call mpas_pool_get_field(domain_ptr % blocklist % allfields, & + trim(adjustl(var_info_list(i) % name)), field_0d_real, timelevel=1) + + if (.not. associated(field_0d_real)) then + call mpp_error(FATAL,subname//'Failed to find variable "' // trim(adjustl(var_info_list(i) % name)) // '"') + end if + + call mpas_streamaddfield(mpas_stream, field_0d_real, ierr=ierr) + + nullify(field_0d_real) + case (1) + call mpas_pool_get_field(domain_ptr % blocklist % allfields, & + trim(adjustl(var_info_list(i) % name)), field_1d_real, timelevel=1) + + if (.not. associated(field_1d_real)) then + call mpp_error(FATAL,subname//'Failed to find variable "' // trim(adjustl(var_info_list(i) % name)) // '"') + end if + + call mpas_streamaddfield(mpas_stream, field_1d_real, ierr=ierr) + + nullify(field_1d_real) + case (2) + call mpas_pool_get_field(domain_ptr % blocklist % allfields, & + trim(adjustl(var_info_list(i) % name)), field_2d_real, timelevel=1) + + if (.not. associated(field_2d_real)) then + call mpp_error(FATAL,subname//'Failed to find variable "' // trim(adjustl(var_info_list(i) % name)) // '"') + end if + + call mpas_streamaddfield(mpas_stream, field_2d_real, ierr=ierr) + + nullify(field_2d_real) + case (3) + call mpas_pool_get_field(domain_ptr % blocklist % allfields, & + trim(adjustl(var_info_list(i) % name)), field_3d_real, timelevel=1) + + if (.not. associated(field_3d_real)) then + call mpp_error(FATAL,subname//'Failed to find variable "' // trim(adjustl(var_info_list(i) % name)) // '"') + end if + + call mpas_streamaddfield(mpas_stream, field_3d_real, ierr=ierr) + + nullify(field_3d_real) + case (4) + call mpas_pool_get_field(domain_ptr % blocklist % allfields, & + trim(adjustl(var_info_list(i) % name)), field_4d_real, timelevel=1) + + if (.not. associated(field_4d_real)) then + call mpp_error(FATAL,subname//'Failed to find variable "' // trim(adjustl(var_info_list(i) % name)) // '"') + end if + + call mpas_streamaddfield(mpas_stream, field_4d_real, ierr=ierr) + + nullify(field_4d_real) + case (5) + call mpas_pool_get_field(domain_ptr % blocklist % allfields, & + trim(adjustl(var_info_list(i) % name)), field_5d_real, timelevel=1) + + if (.not. associated(field_5d_real)) then + call mpp_error(FATAL,subname//'Failed to find variable "' // trim(adjustl(var_info_list(i) % name)) // '"') + end if + + call mpas_streamaddfield(mpas_stream, field_5d_real, ierr=ierr) + + nullify(field_5d_real) + case default + call mpp_error(FATAL,subname//'Unsupported variable rank ' // stringify([var_info_list(i) % rank]) // & + ' for "' // trim(adjustl(var_info_list(i) % name)) // '"') + end select + case default + call mpp_error(FATAL,subname//'Unsupported variable type "' // trim(adjustl(var_info_list(i) % type)) // & + '" for "' // trim(adjustl(var_info_list(i) % name)) // '"') + end select + + if (ierr /= mpas_stream_noerr) then + call mpp_error(FATAL,subname//'Failed to add variable "' // trim(adjustl(var_info_list(i) % name)) // & + '" to stream "' // trim(adjustl(stream_name)) // '"') + end if + end do + + if (trim(adjustl(stream_mode)) == 'w' .or. trim(adjustl(stream_mode)) == 'write') then + ! Add MPAS-specific attributes to stream. + + ! Attributes related to MPAS core (i.e., `core_type`). + call add_stream_attribute('conventions', domain_ptr % core % conventions) + call add_stream_attribute('core_name', domain_ptr % core % corename) + call add_stream_attribute('git_version', domain_ptr % core % git_version) + call add_stream_attribute('model_name', domain_ptr % core % modelname) + call add_stream_attribute('source', domain_ptr % core % source) + + ! Attributes related to MPAS domain (i.e., `domain_type`). + call add_stream_attribute('is_periodic', domain_ptr % is_periodic) + call add_stream_attribute('mesh_spec', domain_ptr % mesh_spec) + call add_stream_attribute('on_a_sphere', domain_ptr % on_a_sphere) + call add_stream_attribute('parent_id', domain_ptr % parent_id) + call add_stream_attribute('sphere_radius', domain_ptr % sphere_radius) + call add_stream_attribute('x_period', domain_ptr % x_period) + call add_stream_attribute('y_period', domain_ptr % y_period) + end if + + call mpas_log_write(subname // ' completed') + contains + !> Helper subroutine for adding a 0-d stream attribute by calling `mpas_writestreamatt` with error checking. + !> (KCW, 2024-03-14) + subroutine add_stream_attribute_0d(attribute_name, attribute_value) + ! Module(s) from MPAS. + use mpas_io_streams, only : mpas_writestreamatt + use mpas_log, only : mpas_log_write + character(*), intent(in) :: attribute_name + class(*), intent(in) :: attribute_value + + call mpas_log_write('Adding attribute "' // trim(adjustl(attribute_name)) // & + '" to stream "' // trim(adjustl(stream_name)) // '"') + + select type (attribute_value) + type is (character(*)) + call mpas_writestreamatt(mpas_stream, & + trim(adjustl(attribute_name)), trim(adjustl(attribute_value)), syncval=.false., ierr=ierr) + type is (integer) + call mpas_writestreamatt(mpas_stream, & + trim(adjustl(attribute_name)), attribute_value, syncval=.false., ierr=ierr) + type is (logical) + if (attribute_value) then + ! Logical `.true.` becomes character string "YES". + call mpas_writestreamatt(mpas_stream, & + trim(adjustl(attribute_name)), 'YES', syncval=.false., ierr=ierr) + else + ! Logical `.false.` becomes character string "NO". + call mpas_writestreamatt(mpas_stream, & + trim(adjustl(attribute_name)), 'NO', syncval=.false., ierr=ierr) + end if + type is (real(rkind)) + call mpas_writestreamatt(mpas_stream, & + trim(adjustl(attribute_name)), attribute_value, syncval=.false., ierr=ierr) + class default + call mpp_error(FATAL,subname//'Unsupported attribute type (Must be one of: character, integer, logical, real)') + end select + + if (ierr /= mpas_stream_noerr) then + call mpp_error(FATAL,subname//'Failed to add attribute "' // trim(adjustl(attribute_name)) // & + '" to stream "' // trim(adjustl(stream_name)) // '"') + end if + end subroutine add_stream_attribute_0d + + !> Helper subroutine for adding a 1-d stream attribute by calling `mpas_writestreamatt` with error checking. + !> (KCW, 2024-03-14) + subroutine add_stream_attribute_1d(attribute_name, attribute_value) + ! Module(s) from MPAS. + use mpas_io_streams, only : mpas_writestreamatt + use mpas_log, only : mpas_log_write + character(*), intent(in) :: attribute_name + class(*), intent(in) :: attribute_value(:) + + call mpas_log_write('Adding attribute "' // trim(adjustl(attribute_name)) // & + '" to stream "' // trim(adjustl(stream_name)) // '"') + + select type (attribute_value) + type is (integer) + call mpas_writestreamatt(mpas_stream, & + trim(adjustl(attribute_name)), attribute_value, syncval=.false., ierr=ierr) + type is (real(rkind)) + call mpas_writestreamatt(mpas_stream, & + trim(adjustl(attribute_name)), attribute_value, syncval=.false., ierr=ierr) + class default + call mpp_error(FATAL,subname//'Unsupported attribute type (Must be one of: integer, real)') + end select + + if (ierr /= mpas_stream_noerr) then + call mpp_error(FATAL,subname//'Failed to add attribute "' // trim(adjustl(attribute_name)) // & + '" to stream "' // trim(adjustl(stream_name)) // '"') + end if + end subroutine add_stream_attribute_1d + end subroutine dyn_mpas_init_stream_with_pool + + !> Parse a stream name, which consists of one or more stream name fragments, and return the corresponding variable information + !> as a list of `var_info_type`. Multiple stream name fragments should be separated by "+" (i.e., a plus, meaning "addition" + !> operation) or "-" (i.e., a minus, meaning "subtraction" operation). + !> A stream name fragment can be a predefined stream name (e.g., "invariant", "input", etc.) or a single variable name. + !> For example, a stream name of "invariant+input+restart" means the union of variables in the "invariant", "input", and + !> "restart" streams. + !> Duplicate variable information in the resulting list is discarded. + !> (KCW, 2024-06-01) + pure function parse_stream_name(stream_name) result(var_info_list) + character(*), intent(in) :: stream_name + type(var_info_type), allocatable :: var_info_list(:) + + character(*), parameter :: supported_stream_name_operator = '+-' + character(1) :: stream_name_operator + character(:), allocatable :: stream_name_fragment + character(len(invariant_var_info_list % name)), allocatable :: var_name_list(:) + integer :: i, j, n, offset + type(var_info_type), allocatable :: var_info_list_buffer(:) + + n = len_trim(stream_name) + + if (n == 0) then + ! Empty character string means empty list. + var_info_list = parse_stream_name_fragment('') + + return + end if + + i = scan(stream_name, supported_stream_name_operator) + + if (i == 0) then + ! No operators are present in the stream name. It is just a single stream name fragment. + stream_name_fragment = stream_name + var_info_list = parse_stream_name_fragment(stream_name_fragment) + + return + end if + + offset = 0 + var_info_list = parse_stream_name_fragment('') + + do while (.true.) + ! Extract operator from the stream name. + if (offset > 0) then + stream_name_operator = stream_name(offset:offset) + else + stream_name_operator = '+' + end if + + ! Extract stream name fragment from the stream name. + if (i > 1) then + stream_name_fragment = stream_name(offset + 1:offset + i - 1) + else + stream_name_fragment = '' + end if + + ! Process the stream name fragment according to the operator. + if (len_trim(stream_name_fragment) > 0) then + var_info_list_buffer = parse_stream_name_fragment(stream_name_fragment) + + select case (stream_name_operator) + case ('+') + var_info_list = [var_info_list, var_info_list_buffer] + case ('-') + do j = 1, size(var_info_list_buffer) + var_name_list = var_info_list % name + var_info_list = pack(var_info_list, var_name_list /= var_info_list_buffer(j) % name) + end do + case default + ! Do nothing for unknown operators. Should not happen at all. + end select + end if + + offset = offset + i + + ! Terminate loop when everything in the stream name has been processed. + if (offset + 1 > n) then + exit + end if + + i = scan(stream_name(offset + 1:), supported_stream_name_operator) + + ! Run the loop one last time for the remaining stream name fragment. + if (i == 0) then + i = n - offset + 1 + end if + end do + + ! Discard duplicate variable information by names. + var_name_list = var_info_list % name + var_info_list = var_info_list(index_unique(var_name_list)) + end function parse_stream_name + + !> Parse a stream name fragment and return the corresponding variable information as a list of `var_info_type`. + !> A stream name fragment can be a predefined stream name (e.g., "invariant", "input", etc.) or a single variable name. + !> (KCW, 2024-06-01) + pure function parse_stream_name_fragment(stream_name_fragment) result(var_info_list) + character(*), intent(in) :: stream_name_fragment + type(var_info_type), allocatable :: var_info_list(:) + + character(len(invariant_var_info_list % name)), allocatable :: var_name_list(:) + type(var_info_type), allocatable :: var_info_list_buffer(:) + + select case (trim(adjustl(stream_name_fragment))) + case ('') + allocate(var_info_list(0)) + case ('invariant') + allocate(var_info_list, source=invariant_var_info_list) + case ('input') + allocate(var_info_list, source=input_var_info_list) + case ('restart') + allocate(var_info_list, source=restart_var_info_list) + case ('output') + allocate(var_info_list, source=output_var_info_list) + case default + allocate(var_info_list(0)) + + var_name_list = invariant_var_info_list % name + + if (any(var_name_list == trim(adjustl(stream_name_fragment)))) then + var_info_list_buffer = pack(invariant_var_info_list, var_name_list == trim(adjustl(stream_name_fragment))) + var_info_list = [var_info_list, var_info_list_buffer] + end if + + var_name_list = input_var_info_list % name + + if (any(var_name_list == trim(adjustl(stream_name_fragment)))) then + var_info_list_buffer = pack(input_var_info_list, var_name_list == trim(adjustl(stream_name_fragment))) + var_info_list = [var_info_list, var_info_list_buffer] + end if + + var_name_list = restart_var_info_list % name + + if (any(var_name_list == trim(adjustl(stream_name_fragment)))) then + var_info_list_buffer = pack(restart_var_info_list, var_name_list == trim(adjustl(stream_name_fragment))) + var_info_list = [var_info_list, var_info_list_buffer] + end if + + var_name_list = output_var_info_list % name + + if (any(var_name_list == trim(adjustl(stream_name_fragment)))) then + var_info_list_buffer = pack(output_var_info_list, var_name_list == trim(adjustl(stream_name_fragment))) + var_info_list = [var_info_list, var_info_list_buffer] + end if + end select + end function parse_stream_name_fragment + + !> Return the index of unique elements in `array`, which can be any intrinsic data types, as an integer array. + !> If `array` contains zero element or is of unsupported data types, an empty integer array is produced. + !> For example, `index_unique([1, 2, 3, 1, 2, 3, 4, 5])` returns `[1, 2, 3, 7, 8]`. + !> (KCW, 2024-03-22) + pure function index_unique(array) + use, intrinsic :: iso_fortran_env, only: int32, int64, real32, real64 + + class(*), intent(in) :: array(:) + integer, allocatable :: index_unique(:) + + character(:), allocatable :: array_c(:) + integer :: i, n + logical :: mask_unique(size(array)) + + n = size(array) + + if (n == 0) then + allocate(index_unique(0)) + + return + end if + + mask_unique = .false. + + select type (array) + type is (character(*)) + ! Workaround for a bug in GNU Fortran >= 12. This is perhaps the manifestation of GCC Bugzilla Bug 100819. + ! When a character string array is passed as the actual argument to an unlimited polymorphic dummy argument, + ! its array index and length parameter are mishandled. + allocate(character(len(array)) :: array_c(size(array))) + + array_c(:) = array(:) + + do i = 1, n + if (.not. any(array_c(i) == array_c .and. mask_unique)) then + mask_unique(i) = .true. + end if + end do + + deallocate(array_c) + type is (integer(int32)) + do i = 1, n + if (.not. any(array(i) == array .and. mask_unique)) then + mask_unique(i) = .true. + end if + end do + type is (integer(int64)) + do i = 1, n + if (.not. any(array(i) == array .and. mask_unique)) then + mask_unique(i) = .true. + end if + end do + type is (logical) + do i = 1, n + if (.not. any((array(i) .eqv. array) .and. mask_unique)) then + mask_unique(i) = .true. + end if + end do + type is (real(real32)) + do i = 1, n + if (.not. any(array(i) == array .and. mask_unique)) then + mask_unique(i) = .true. + end if + end do + type is (real(real64)) + do i = 1, n + if (.not. any(array(i) == array .and. mask_unique)) then + mask_unique(i) = .true. + end if + end do + class default + allocate(index_unique(0)) + + return + end select + + index_unique = pack([(i, i = 1, n)], mask_unique) + end function index_unique + + !> ######################################################################################## + ! subroutine dyn_mpas_check_variable_status + ! + !> summary: Check and return variable status on the given file. + !> author: Kuan-Chih Wang + !> date: 2024-06-04 + !> + !> On the given file (i.e., `pio_file`), this subroutine checks whether the + !> given variable (i.e., `var_info`) is present, and whether it is "TKR" + !> compatible with what MPAS expects. "TKR" means type, kind, and rank. + !> This subroutine can handle both ordinary variables and variable arrays. + !> They are indicated by the `var` and `var_array` elements, respectively, + !> in MPAS registry. For an ordinary variable, the checks are performed on + !> itself. Otherwise, for a variable array, the checks are performed on its + !> constituent parts instead. + ! + !> ######################################################################################## + subroutine dyn_mpas_check_variable_status(var_is_present, var_is_tkr_compatible, pio_file,& + var_info) + ! Module(s) from external libraries. + use pio, only: file_desc_t, pio_file_is_open, pio_char, pio_int, pio_real, pio_double, & + pio_inq_varid, pio_inq_varndims, pio_inq_vartype, pio_noerr + ! Module(s) from MPAS. + use mpas_derived_types, only : field0dchar, field1dchar, field0dinteger, field1dinteger,& + field2dinteger, field3dinteger, field0dreal, field1dreal,& + field2dreal, field3dreal, field4dreal, field5dreal + use mpas_kind_types, only : r4kind, r8kind + use mpas_pool_routines, only : mpas_pool_get_field + use mpas_log, only : mpas_log_write + use mpas_kind_types, only : StrKIND, RKIND + use mpp_mod, only : FATAL, mpp_error + + logical, allocatable, intent(out) :: var_is_present(:) + logical, allocatable, intent(out) :: var_is_tkr_compatible(:) + type(file_desc_t), pointer, intent(in) :: pio_file + type(var_info_type), intent(in) :: var_info + + character(*), parameter :: subname = 'dyn_mpas_subdriver::dyn_mpas_check_variable_status' + character(strkind), allocatable :: var_name_list(:) + integer :: i, ierr, varid, varndims, vartype + type(field0dchar), pointer :: field_0d_char + type(field1dchar), pointer :: field_1d_char + type(field0dinteger), pointer :: field_0d_integer + type(field1dinteger), pointer :: field_1d_integer + type(field2dinteger), pointer :: field_2d_integer + type(field3dinteger), pointer :: field_3d_integer + type(field0dreal), pointer :: field_0d_real + type(field1dreal), pointer :: field_1d_real + type(field2dreal), pointer :: field_2d_real + type(field3dreal), pointer :: field_3d_real + type(field4dreal), pointer :: field_4d_real + type(field5dreal), pointer :: field_5d_real + + call mpas_log_write(subname // ' entered') + + nullify(field_0d_char) + nullify(field_1d_char) + nullify(field_0d_integer) + nullify(field_1d_integer) + nullify(field_2d_integer) + nullify(field_3d_integer) + nullify(field_0d_real) + nullify(field_1d_real) + nullify(field_2d_real) + nullify(field_3d_real) + nullify(field_4d_real) + nullify(field_5d_real) + + ! Extract a list of variable names to check on the file. + ! For an ordinary variable, this list just contains its name. + ! For a variable array, this list contains the names of its constituent parts. + select case (trim(adjustl(var_info % type))) + case ('character') + select case (var_info % rank) + case (0) + call mpas_pool_get_field(domain_ptr % blocklist % allfields, & + trim(adjustl(var_info % name)), field_0d_char, timelevel=1) + + if (.not. associated(field_0d_char)) then + call mpp_error(FATAL,subname//'Failed to find variable "' // trim(adjustl(var_info % name))) + end if + + if (field_0d_char % isvararray .and. associated(field_0d_char % constituentnames)) then + allocate(var_name_list(size(field_0d_char % constituentnames)), stat=ierr) + + if (ierr /= 0) then + call mpp_error(FATAL,subname//'Failed to allocate var_name_list') + end if + + var_name_list(:) = field_0d_char % constituentnames(:) + end if + + nullify(field_0d_char) + case (1) + call mpas_pool_get_field(domain_ptr % blocklist % allfields, & + trim(adjustl(var_info % name)), field_1d_char, timelevel=1) + + if (.not. associated(field_1d_char)) then + call mpp_error(FATAL,subname//'Failed to find variable "' // trim(adjustl(var_info % name))) + end if + + if (field_1d_char % isvararray .and. associated(field_1d_char % constituentnames)) then + allocate(var_name_list(size(field_1d_char % constituentnames)), stat=ierr) + + if (ierr /= 0) then + call mpp_error(FATAL,subname//'Failed to allocate var_name_list') + end if + + var_name_list(:) = field_1d_char % constituentnames(:) + end if + + nullify(field_1d_char) + case default + call mpp_error(FATAL,subname//'Unsupported variable rank ' // stringify([var_info % rank]) // & + ' for "' // trim(adjustl(var_info % name)) // '"') + end select + case ('integer') + select case (var_info % rank) + case (0) + call mpas_pool_get_field(domain_ptr % blocklist % allfields, & + trim(adjustl(var_info % name)), field_0d_integer, timelevel=1) + + if (.not. associated(field_0d_integer)) then + call mpp_error(FATAL,subname//'Failed to find variable "' // trim(adjustl(var_info % name)) // '"') + end if + + if (field_0d_integer % isvararray .and. associated(field_0d_integer % constituentnames)) then + allocate(var_name_list(size(field_0d_integer % constituentnames)), stat=ierr) + + if (ierr /= 0) then + call mpp_error(FATAL,subname//'Failed to allocate var_name_list') + end if + + var_name_list(:) = field_0d_integer % constituentnames(:) + end if + + nullify(field_0d_integer) + case (1) + call mpas_pool_get_field(domain_ptr % blocklist % allfields, & + trim(adjustl(var_info % name)), field_1d_integer, timelevel=1) + + if (.not. associated(field_1d_integer)) then + call mpp_error(FATAL,subname//'Failed to find variable "' // trim(adjustl(var_info % name)) // '"') + end if + + if (field_1d_integer % isvararray .and. associated(field_1d_integer % constituentnames)) then + allocate(var_name_list(size(field_1d_integer % constituentnames)), stat=ierr) + + if (ierr /= 0) then + call mpp_error(FATAL,subname//'Failed to allocate var_name_list') + end if + + var_name_list(:) = field_1d_integer % constituentnames(:) + end if + + nullify(field_1d_integer) + case (2) + call mpas_pool_get_field(domain_ptr % blocklist % allfields, & + trim(adjustl(var_info % name)), field_2d_integer, timelevel=1) + + if (.not. associated(field_2d_integer)) then + call mpp_error(FATAL,subname//'Failed to find variable "' // trim(adjustl(var_info % name)) // '"') + end if + + if (field_2d_integer % isvararray .and. associated(field_2d_integer % constituentnames)) then + allocate(var_name_list(size(field_2d_integer % constituentnames)), stat=ierr) + + if (ierr /= 0) then + call mpp_error(FATAL,subname//'Failed to allocate var_name_list') + end if + + var_name_list(:) = field_2d_integer % constituentnames(:) + end if + + nullify(field_2d_integer) + case (3) + call mpas_pool_get_field(domain_ptr % blocklist % allfields, & + trim(adjustl(var_info % name)), field_3d_integer, timelevel=1) + + if (.not. associated(field_3d_integer)) then + call mpp_error(FATAL,subname//'Failed to find variable "' // trim(adjustl(var_info % name)) // '"') + end if + + if (field_3d_integer % isvararray .and. associated(field_3d_integer % constituentnames)) then + allocate(var_name_list(size(field_3d_integer % constituentnames)), stat=ierr) + + if (ierr /= 0) then + call mpp_error(FATAL,subname//'Failed to allocate var_name_list') + end if + + var_name_list(:) = field_3d_integer % constituentnames(:) + end if + + nullify(field_3d_integer) + case default + call mpp_error(FATAL,subname//'Unsupported variable rank ' // stringify([var_info % rank]) // & + ' for "' // trim(adjustl(var_info % name)) // '"') + end select + case ('real') + select case (var_info % rank) + case (0) + call mpas_pool_get_field(domain_ptr % blocklist % allfields, & + trim(adjustl(var_info % name)), field_0d_real, timelevel=1) + + if (.not. associated(field_0d_real)) then + call mpp_error(FATAL,subname//'Failed to find variable "' // trim(adjustl(var_info % name)) // '"') + end if + + if (field_0d_real % isvararray .and. associated(field_0d_real % constituentnames)) then + allocate(var_name_list(size(field_0d_real % constituentnames)), stat=ierr) + + if (ierr /= 0) then + call mpp_error(FATAL,subname//'Failed to allocate var_name_list') + end if + + var_name_list(:) = field_0d_real % constituentnames(:) + end if + + nullify(field_0d_real) + case (1) + call mpas_pool_get_field(domain_ptr % blocklist % allfields, & + trim(adjustl(var_info % name)), field_1d_real, timelevel=1) + + if (.not. associated(field_1d_real)) then + call mpp_error(FATAL,subname//'Failed to find variable "' // trim(adjustl(var_info % name)) // '"') + end if + + if (field_1d_real % isvararray .and. associated(field_1d_real % constituentnames)) then + allocate(var_name_list(size(field_1d_real % constituentnames)), stat=ierr) + + if (ierr /= 0) then + call mpp_error(FATAL,subname//'Failed to allocate var_name_list') + end if + + var_name_list(:) = field_1d_real % constituentnames(:) + end if + + nullify(field_1d_real) + case (2) + call mpas_pool_get_field(domain_ptr % blocklist % allfields, & + trim(adjustl(var_info % name)), field_2d_real, timelevel=1) + + if (.not. associated(field_2d_real)) then + call mpp_error(FATAL,subname//'Failed to find variable "' // trim(adjustl(var_info % name)) // '"') + end if + + if (field_2d_real % isvararray .and. associated(field_2d_real % constituentnames)) then + allocate(var_name_list(size(field_2d_real % constituentnames)), stat=ierr) + + if (ierr /= 0) then + call mpp_error(FATAL,subname//'Failed to allocate var_name_list') + end if + + var_name_list(:) = field_2d_real % constituentnames(:) + end if + + nullify(field_2d_real) + case (3) + call mpas_pool_get_field(domain_ptr % blocklist % allfields, & + trim(adjustl(var_info % name)), field_3d_real, timelevel=1) + + if (.not. associated(field_3d_real)) then + call mpp_error(FATAL,subname//'Failed to find variable "' // trim(adjustl(var_info % name)) // '"') + end if + + if (field_3d_real % isvararray .and. associated(field_3d_real % constituentnames)) then + allocate(var_name_list(size(field_3d_real % constituentnames)), stat=ierr) + + if (ierr /= 0) then + call mpp_error(FATAL,subname//'Failed to allocate var_name_list') + end if + + var_name_list(:) = field_3d_real % constituentnames(:) + end if + + nullify(field_3d_real) + case (4) + call mpas_pool_get_field(domain_ptr % blocklist % allfields, & + trim(adjustl(var_info % name)), field_4d_real, timelevel=1) + + if (.not. associated(field_4d_real)) then + call mpp_error(FATAL,subname//'Failed to find variable "' // trim(adjustl(var_info % name)) // '"') + end if + + if (field_4d_real % isvararray .and. associated(field_4d_real % constituentnames)) then + allocate(var_name_list(size(field_4d_real % constituentnames)), stat=ierr) + + if (ierr /= 0) then + call mpp_error(FATAL,subname//'Failed to allocate var_name_list') + end if + + var_name_list(:) = field_4d_real % constituentnames(:) + end if + + nullify(field_4d_real) + case (5) + call mpas_pool_get_field(domain_ptr % blocklist % allfields, & + trim(adjustl(var_info % name)), field_5d_real, timelevel=1) + + if (.not. associated(field_5d_real)) then + call mpp_error(FATAL,subname//'Failed to find variable "' // trim(adjustl(var_info % name)) // '"') + end if + + if (field_5d_real % isvararray .and. associated(field_5d_real % constituentnames)) then + allocate(var_name_list(size(field_5d_real % constituentnames)), stat=ierr) + + if (ierr /= 0) then + call mpp_error(FATAL,subname//'Failed to allocate var_name_list') + end if + + var_name_list(:) = field_5d_real % constituentnames(:) + end if + + nullify(field_5d_real) + case default + call mpp_error(FATAL,subname//'Unsupported variable rank ' // stringify([var_info % rank]) // & + ' for "' // trim(adjustl(var_info % name)) // '"') + end select + case default + call mpp_error(FATAL,subname//'Unsupported variable type "' // trim(adjustl(var_info % type)) // & + '" for "' // trim(adjustl(var_info % name)) // '"') + end select + + if (.not. allocated(var_name_list)) then + allocate(var_name_list(1), stat=ierr) + + if (ierr /= 0) then + call mpp_error(FATAL,subname//'Failed to allocate var_name_list') + end if + + var_name_list(1) = var_info % name + end if + + allocate(var_is_present(size(var_name_list)), stat=ierr) + + if (ierr /= 0) then + call mpp_error(FATAL,subname//'Failed to allocate var_is_present') + end if + + var_is_present(:) = .false. + + allocate(var_is_tkr_compatible(size(var_name_list)), stat=ierr) + + if (ierr /= 0) then + call mpp_error(FATAL,subname//'Failed to allocate var_is_tkr_compatible') + end if + + var_is_tkr_compatible(:) = .false. + + if (.not. associated(pio_file)) then + return + end if + + if (.not. pio_file_is_open(pio_file)) then + return + end if + + call mpas_log_write('Checking variable "' // trim(adjustl(var_info % name)) // & + '" for presence and TKR compatibility') + + do i = 1, size(var_name_list) + ! Check if the variable is present on the file. + ierr = pio_inq_varid(pio_file, trim(adjustl(var_name_list(i))), varid) + + if (ierr /= pio_noerr) then + cycle + end if + + var_is_present(i) = .true. + + ! Check if the variable is "TK"R compatible between MPAS and the file. + ierr = pio_inq_vartype(pio_file, varid, vartype) + + if (ierr /= pio_noerr) then + cycle + end if + + select case (trim(adjustl(var_info % type))) + case ('character') + if (vartype /= pio_char) then + cycle + end if + case ('integer') + if (vartype /= pio_int) then + cycle + end if + case ('real') + ! When MPAS dynamical core is compiled at single precision, pairing it with double precision input data + ! is not allowed to prevent loss of precision. + if (rkind == r4kind .and. vartype /= pio_real) then + cycle + end if + + ! When MPAS dynamical core is compiled at double precision, pairing it with single and double precision + ! input data is allowed. + if (rkind == r8kind .and. vartype /= pio_real .and. vartype /= pio_double) then + cycle + end if + case default + cycle + end select + + ! Check if the variable is TK"R" compatible between MPAS and the file. + ierr = pio_inq_varndims(pio_file, varid, varndims) + + if (ierr /= pio_noerr) then + cycle + end if + + if (varndims /= var_info % rank) then + cycle + end if + + var_is_tkr_compatible(i) = .true. + end do + + call mpas_log_write('var_name_list = ' // stringify(var_name_list)) + call mpas_log_write('var_is_present = ' // stringify(var_is_present)) + call mpas_log_write('var_is_tkr_compatible = ' // stringify(var_is_tkr_compatible)) + + call mpas_log_write(subname // ' completed') + end subroutine dyn_mpas_check_variable_status + + !> ######################################################################################## + !> + !> \brief Computes local unit north, east, and edge-normal vectors + !> \author Michael Duda + !> \date 15 January 2020 + !> \details + !> This routine computes the local unit north and east vectors at all cell + !> centers, storing the resulting fields in the mesh pool as 'north' and + !> 'east'. It also computes the edge-normal unit vectors by calling + !> the mpas_initialize_vectors routine. Before this routine is called, + !> the mesh pool must contain 'latCell' and 'lonCell' fields that are valid + !> for all cells (not just solve cells), plus any fields that are required + !> by the mpas_initialize_vectors routine. + !> + !> \update: Dustin Swales April 2025 - Modified for use in UWM + !> + !> ######################################################################################## + subroutine ufs_mpas_compute_unit_vectors() + use mpas_pool_routines, only : mpas_pool_get_subpool, mpas_pool_get_dimension, mpas_pool_get_array + use mpas_derived_types, only : mpas_pool_type + use mpas_kind_types, only : RKIND + use mpas_vector_operations, only : mpas_initialize_vectors + + type (mpas_pool_type), pointer :: meshPool + real(kind=RKIND), dimension(:), pointer :: latCell, lonCell + real(kind=RKIND), dimension(:,:), pointer :: east, north + integer, pointer :: nCells + integer :: iCell + + call mpas_pool_get_subpool(domain_ptr % blocklist % structs, 'mesh', meshPool) + call mpas_pool_get_dimension(meshPool, 'nCells', nCells) + call mpas_pool_get_array(meshPool, 'latCell', latCell) + call mpas_pool_get_array(meshPool, 'lonCell', lonCell) + call mpas_pool_get_array(meshPool, 'east', east) + call mpas_pool_get_array(meshPool, 'north', north) + + do iCell = 1, nCells + east(1,iCell) = -sin(lonCell(iCell)) + east(2,iCell) = cos(lonCell(iCell)) + east(3,iCell) = 0.0_RKIND + + ! Normalize + east(1:3,iCell) = east(1:3,iCell) / sqrt(sum(east(1:3,iCell) * east(1:3,iCell))) + + north(1,iCell) = -cos(lonCell(iCell))*sin(latCell(iCell)) + north(2,iCell) = -sin(lonCell(iCell))*sin(latCell(iCell)) + north(3,iCell) = cos(latCell(iCell)) + + ! Normalize + north(1:3,iCell) = north(1:3,iCell) / sqrt(sum(north(1:3,iCell) * north(1:3,iCell))) + + end do + + call mpas_initialize_vectors(meshPool) + + end subroutine ufs_mpas_compute_unit_vectors + + !> ######################################################################################## + !> + !> \brief Define the names of constituents at run-time + !> \author Michael Duda + !> \date 21 May 2020 + !> \details + !> Given an array of constituent names, which must have size equal to the number + !> of scalars that were set in the call to ufs_mpas_init_phase1, and given + !> a function to identify which scalars are moisture species, this routine defines + !> scalar constituents for the MPAS-A dycore. + !> Because the MPAS-A dycore expects all moisture constituents to appear in + !> a contiguous range of constituent indices, this routine may in general need + !> to reorder the constituents; to allow for mapping of indices between UFS + !> physics and the MPAS-A dycore, this routine returns index mapping arrays + !> mpas_from_ufs_cnst and ufs_from_mpas_cnst. + !> + !> \update: Dustin Swales April 2025 - Modified for use in UWM + !> + !> ######################################################################################## + subroutine ufs_mpas_define_scalars(mpas_from_ufs_cnst, ufs_from_mpas_cnst, ierr) + use mpas_derived_types, only : mpas_pool_type, field3dReal + use mpas_pool_routines, only : mpas_pool_get_subpool, mpas_pool_get_field, & + mpas_pool_get_dimension, mpas_pool_add_dimension + use mpas_attlist, only : mpas_add_att + use mpas_log, only : mpas_log_write + use mpas_derived_types, only : MPAS_LOG_ERR + ! FMS + use mpp_mod, only : FATAL, mpp_error + + ! Arguments + integer, dimension(:), pointer :: mpas_from_ufs_cnst, ufs_from_mpas_cnst + integer, intent(out) :: ierr + + ! Local variables + character(len=*), parameter :: subname = 'ufs_mpas_subdriver::ufs_mpas_define_scalars' + integer :: i, j, timeLevs + integer, pointer :: num_scalars + integer :: num_moist + integer :: idx_passive + type (mpas_pool_type), pointer :: statePool + type (mpas_pool_type), pointer :: tendPool + type (field3dReal), pointer :: scalarsField + character(len=128) :: tempstr + character :: moisture_char + + ierr = 0 + + ! + ! Define scalars + ! + nullify(statePool) + call mpas_pool_get_subpool(domain_ptr % blocklist % structs, 'state', statePool) + + if (.not. associated(statePool)) then + call mpas_log_write(trim(subname)//': ERROR: The ''state'' pool was not found.', & + messageType=MPAS_LOG_ERR) + ierr = 1 + return + end if + + nullify(num_scalars) + call mpas_pool_get_dimension(statePool, 'num_scalars', num_scalars) + + ! + ! The num_scalars dimension should have been defined by atm_core_interface::atm_allocate_scalars, and + ! if this dimension does not exist, something has gone wrong + ! + if (.not. associated(num_scalars)) then + call mpas_log_write(trim(subname)//': ERROR: The ''num_scalars'' dimension does not exist in the ''state'' pool.', & + messageType=MPAS_LOG_ERR) + ierr = 1 + return + end if + + ! + ! If at runtime there are not num_scalars names in the array of constituent names provided by UFS, + ! something has gone wrong + ! + if (size(constituent_name) /= num_scalars) then + call mpas_log_write(trim(subname)//': ERROR: The number of constituent names is not equal to the num_scalars dimension', & + messageType=MPAS_LOG_ERR) + call mpas_log_write('size(constituent_name) = $i, num_scalars = $i', intArgs=[size(constituent_name), num_scalars], & + messageType=MPAS_LOG_ERR) + ierr = 1 + return + end if + + ! + ! In UFS, the first scalar (if there are any) is always sphum (specific humidity); if this is not + ! the case, something has gone wrong + ! + if (size(constituent_name) > 0) then + if (trim(constituent_name(1)) /= 'sphum') then + call mpas_log_write(trim(subname)//': ERROR: The first constituent is not sphum', messageType=MPAS_LOG_ERR) + ierr = 1 + return + end if + end if + + ! + ! Determine which of the constituents are moisture species + ! + allocate(mpas_from_ufs_cnst(num_scalars), stat=ierr) + if( ierr /= 0 ) call mpp_error(FATAL,subname//':failed to allocate mpas_from_ufs_cnst array') + mpas_from_ufs_cnst(:) = 0 + num_moist = 0 + do i = 1, size(constituent_name) + if (is_water_species(i)) then + num_moist = num_moist + 1 + mpas_from_ufs_cnst(num_moist) = i + end if + end do + + ! + ! If UFS has no scalars, let the only scalar in MPAS be 'qv' (a moisture species) + ! + if (num_scalars == 1 .and. size(constituent_name) == 0) then + num_moist = 1 + end if + + ! + ! Assign non-moisture constituents to mpas_from_ufs_cnst(num_moist+1:size(constituent_name)) + ! + idx_passive = num_moist + 1 + do i = 1, size(constituent_name) + + ! If UFS constituent i is not already mapped as a moist constituent + if (.not. is_water_species(i)) then + mpas_from_ufs_cnst(idx_passive) = i + idx_passive = idx_passive + 1 + end if + end do + + ! + ! Create inverse map, ufs_from_mpas_cnst + ! + allocate(ufs_from_mpas_cnst(num_scalars), stat=ierr) + if( ierr /= 0 ) call mpp_error(FATAL,subname//':failed to allocate ufs_from_mpas_cnst array') + ufs_from_mpas_cnst(:) = 0 + + do i = 1, size(constituent_name) + ufs_from_mpas_cnst(mpas_from_ufs_cnst(i)) = i + end do + + timeLevs = 2 + + do i = 1, timeLevs + nullify(scalarsField) + call mpas_pool_get_field(statePool, 'scalars', scalarsField, timeLevel=i) + + if (.not. associated(scalarsField)) then + call mpas_log_write(trim(subname)//': ERROR: The ''scalars'' field was not found in the ''state'' pool', & + messageType=MPAS_LOG_ERR) + ierr = 1 + return + end if + + if (i == 1) call mpas_pool_add_dimension(statePool, 'index_qv', 1) + scalarsField % constituentNames(1) = 'qv' + call mpas_add_att(scalarsField % attLists(1) % attList, 'units', 'kg kg^{-1}') + call mpas_add_att(scalarsField % attLists(1) % attList, 'long_name', 'Water vapor mixing ratio') + + do j = 2, size(constituent_name) + scalarsField % constituentNames(j) = trim(constituent_name(mpas_from_ufs_cnst(j))) + end do + + end do + + call mpas_pool_add_dimension(statePool, 'moist_start', 1) + call mpas_pool_add_dimension(statePool, 'moist_end', num_moist) + + ! + ! Print a tabular summary of the mapping between constituent indices + ! + call mpas_log_write('') + call mpas_log_write(' i MPAS constituent mpas_from_ufs_cnst(i) i UFS constituent ufs_from_mpas_cnst(i)') + call mpas_log_write('------------------------------------------ ------------------------------------------') + do i = 1, min(num_scalars, size(constituent_name)) + if (i <= num_moist) then + moisture_char = '*' + else + moisture_char = ' ' + end if + write(tempstr, '(i3,1x,a16,1x,i18,8x,i3,1x,a16,1x,i18)') i, trim(scalarsField % constituentNames(i))//moisture_char, & + mpas_from_ufs_cnst(i), & + i, trim(constituent_name(i)), & + ufs_from_mpas_cnst(i) + call mpas_log_write(trim(tempstr)) + end do + call mpas_log_write('------------------------------------------ ------------------------------------------') + call mpas_log_write('* = constituent used as a moisture species in MPAS-A dycore') + call mpas_log_write('') + + + ! + ! Define scalars_tend + ! + nullify(tendPool) + call mpas_pool_get_subpool(domain_ptr % blocklist % structs, 'tend', tendPool) + + if (.not. associated(tendPool)) then + call mpas_log_write(trim(subname)//': ERROR: The ''tend'' pool was not found.', & + messageType=MPAS_LOG_ERR) + ierr = 1 + return + end if + + timeLevs = 1 + + do i = 1, timeLevs + nullify(scalarsField) + call mpas_pool_get_field(tendPool, 'scalars_tend', scalarsField, timeLevel=i) + + if (.not. associated(scalarsField)) then + call mpas_log_write(trim(subname)//': ERROR: The ''scalars_tend'' field was not found in the ''tend'' pool', & + messageType=MPAS_LOG_ERR) + ierr = 1 + return + end if + + if (i == 1) call mpas_pool_add_dimension(tendPool, 'index_qv', 1) + scalarsField % constituentNames(1) = 'tend_qv' + call mpas_add_att(scalarsField % attLists(1) % attList, 'units', 'kg m^{-3} s^{-1}') + call mpas_add_att(scalarsField % attLists(1) % attList, 'long_name', 'Tendency of water vapor mixing ratio') + + do j = 2, size(constituent_name) + scalarsField % constituentNames(j) = 'tend_'//trim(constituent_name(mpas_from_ufs_cnst(j))) + end do + end do + + call mpas_pool_add_dimension(tendPool, 'moist_start', 1) + call mpas_pool_add_dimension(tendPool, 'moist_end', num_moist) + + end subroutine ufs_mpas_define_scalars + + !> ######################################################################################## + !> + !> \brief Returns global mesh dimensions + !> \author Michael Duda + !> \date 22 August 2019 + !> \details + !> This routine returns on all tasks the number of global cells, edges, + !> vertices, maxEdges, vertical layers, and the maximum number of cells owned by any task. + !> + !> \update: Dustin Swales April 2025 - Modified for use in UWM + !> + !> ######################################################################################## + subroutine ufs_mpas_get_global_dims(nCellsGlobal, nEdgesGlobal, nVerticesGlobal, maxEdges,& + nVertLevels, maxNCells) + use mpas_pool_routines, only : mpas_pool_get_subpool, mpas_pool_get_dimension + use mpas_derived_types, only : mpas_pool_type + use mpas_dmpar, only : mpas_dmpar_sum_int, mpas_dmpar_max_int + + integer, intent(out) :: nCellsGlobal + integer, intent(out) :: nEdgesGlobal + integer, intent(out) :: nVerticesGlobal + integer, intent(out) :: maxEdges + integer, intent(out) :: nVertLevels + integer, intent(out) :: maxNCells + + integer, pointer :: nCellsSolve + integer, pointer :: nEdgesSolve + integer, pointer :: nVerticesSolve + integer, pointer :: maxEdgesLocal + integer, pointer :: nVertLevelsLocal + + type (mpas_pool_type), pointer :: meshPool + + call mpas_pool_get_subpool(domain_ptr % blocklist % structs, 'mesh', meshPool) + call mpas_pool_get_dimension(meshPool, 'nCellsSolve', nCellsSolve) + call mpas_pool_get_dimension(meshPool, 'nEdgesSolve', nEdgesSolve) + call mpas_pool_get_dimension(meshPool, 'nVerticesSolve', nVerticesSolve) + call mpas_pool_get_dimension(meshPool, 'maxEdges', maxEdgesLocal) + call mpas_pool_get_dimension(meshPool, 'nVertLevels', nVertLevelsLocal) + + call mpas_dmpar_sum_int(domain_ptr % dminfo, nCellsSolve, nCellsGlobal) + call mpas_dmpar_sum_int(domain_ptr % dminfo, nEdgesSolve, nEdgesGlobal) + call mpas_dmpar_sum_int(domain_ptr % dminfo, nVerticesSolve, nVerticesGlobal) + + maxEdges = maxEdgesLocal + nVertLevels = nVertLevelsLocal + + call mpas_dmpar_max_int(domain_ptr % dminfo, nCellsSolve, maxNCells) + + end subroutine ufs_mpas_get_global_dims + + !> ######################################################################################## + !> + !> \brief Returns global coordinate arrays + !> \author Michael Duda + !> \date 22 August 2019 + !> \details + !> This routine returns on all tasks arrays of latitude, longitude, and cell + !> area for all (global) cells. + !> + !> It is assumed that latCellGlobal, lonCellGlobal, and areaCellGlobal have + !> been allocated by the caller with a size equal to the global number of + !> cells in the mesh. + !> + !> \update: Dustin Swales April 2025 - Modified for use in UWM + !> + !> ######################################################################################## + subroutine ufs_mpas_get_global_coords(latCellGlobal, lonCellGlobal, areaCellGlobal) + use mpas_pool_routines, only : mpas_pool_get_subpool, mpas_pool_get_dimension, mpas_pool_get_array + use mpas_derived_types, only : mpas_pool_type + use mpas_kind_types, only : RKIND + use mpas_dmpar, only : mpas_dmpar_sum_int, mpas_dmpar_max_real_array + use mpp_mod, only : FATAL, mpp_error + real (kind=RKIND), dimension(:), intent(out) :: latCellGlobal + real (kind=RKIND), dimension(:), intent(out) :: lonCellGlobal + real (kind=RKIND), dimension(:), intent(out) :: areaCellGlobal + + integer :: iCell + + integer, pointer :: nCellsSolve + integer, dimension(:), pointer :: indexToCellID + + type (mpas_pool_type), pointer :: meshPool + integer :: nCellsGlobal,ierr + + real (kind=RKIND), dimension(:), pointer :: latCell + real (kind=RKIND), dimension(:), pointer :: lonCell + real (kind=RKIND), dimension(:), pointer :: areaCell + real (kind=RKIND), dimension(:), pointer :: temp + + character(len=*), parameter :: subname = 'ufs_mpas_subdriver::ufs_mpas_get_global_coords' + + + call mpas_pool_get_subpool(domain_ptr % blocklist % structs, 'mesh', meshPool) + call mpas_pool_get_dimension(meshPool, 'nCellsSolve', nCellsSolve) + call mpas_pool_get_array(meshPool, 'indexToCellID', indexToCellID) + call mpas_pool_get_array(meshPool, 'latCell', latCell) + call mpas_pool_get_array(meshPool, 'lonCell', lonCell) + call mpas_pool_get_array(meshPool, 'areaCell', areaCell) + + call mpas_dmpar_sum_int(domain_ptr % dminfo, nCellsSolve, nCellsGlobal) + + ! check: size(latCellGlobal) ?= nCellsGlobal + allocate(temp(nCellsGlobal), stat=ierr) + if( ierr /= 0 ) call mpp_error(FATAL,subname//':failed to allocate temp array') + + ! + ! latCellGlobal + ! + temp(:) = -huge(temp(0)) + do iCell=1,nCellsSolve + temp(indexToCellID(iCell)) = latCell(iCell) + end do + + call mpas_dmpar_max_real_array(domain_ptr % dminfo, nCellsGlobal, temp, latCellGlobal) + + ! + ! lonCellGlobal + ! + temp(:) = -huge(temp(0)) + do iCell=1,nCellsSolve + temp(indexToCellID(iCell)) = lonCell(iCell) + end do + + call mpas_dmpar_max_real_array(domain_ptr % dminfo, nCellsGlobal, temp, lonCellGlobal) + + ! + ! areaCellGlobal + ! + temp(:) = -huge(temp(0)) + do iCell=1,nCellsSolve + temp(indexToCellID(iCell)) = areaCell(iCell) + end do + + call mpas_dmpar_max_real_array(domain_ptr % dminfo, nCellsGlobal, temp, areaCellGlobal) + + deallocate(temp) + + end subroutine ufs_mpas_get_global_coords + + ! ########################################################################################## + ! + ! ########################################################################################## + character(len=10) function date2yyyymmdd (date) + ! Input arguments + integer, intent(in) :: date + + ! Local workspace + integer :: year ! year of yyyy-mm-dd + integer :: month ! month of yyyy-mm-dd + integer :: day ! day of yyyy-mm-dd + + year = date / 10000 + month = (date - year*10000) / 100 + day = date - year*10000 - month*100 + + write(date2yyyymmdd,80) year, month, day +80 format(i4.4,'-',i2.2,'-',i2.2) + + end function date2yyyymmdd + ! ######################################################################################### + ! + ! ######################################################################################### + character(len=8) function sec2hms (seconds) + ! Input arguments + integer, intent(in) :: seconds + + ! Local workspace + integer :: hours ! hours of hh:mm:ss + integer :: minutes ! minutes of hh:mm:ss + integer :: secs ! seconds of hh:mm:ss + + hours = seconds / 3600 + minutes = (seconds - hours*3600) / 60 + secs = (seconds - hours*3600 - minutes*60) + + write(sec2hms,80) hours, minutes, secs +80 format(i2.2,':',i2.2,':',i2.2) + + end function sec2hms + + ! ######################################################################################### + ! + ! ######################################################################################### + character(len=10) function int2str(n) + ! return default integer as a left justified string + ! arguments + integer, intent(in) :: n + + write(int2str,'(i0)') n + + end function int2str + + character(len=10) function log2str(n) + ! return default integer as a left justified string + ! arguments + logical, intent(in) :: n + + if (n) then + write(log2str,'(a4)') 'TRUE' + else + write(log2str,'(a4)') 'FALSE' + endif + + end function log2str + +end module ufs_mpas_subdriver diff --git a/tests/test_fv3_cap.F90 b/tests/test_fv3_cap.F90 index 81e944b0a4..50ed973af9 100644 --- a/tests/test_fv3_cap.F90 +++ b/tests/test_fv3_cap.F90 @@ -1,5 +1,5 @@ program test_output_hours - use fv3atm_cap_mod, only: OutputHours_FrequencyInput, OutputHours_ArrayInput + use ufsatm_cap_mod, only: OutputHours_FrequencyInput, OutputHours_ArrayInput use module_fv3_config, only: dt_atmos, output_fh use module_fv3_io_def, only: lflname_fulltime diff --git a/tests/test_post_nems_routines.F90 b/tests/test_post_nems_routines.F90 index 371c3bc8dd..7f43e079fd 100644 --- a/tests/test_post_nems_routines.F90 +++ b/tests/test_post_nems_routines.F90 @@ -5,6 +5,7 @@ program test_post_nems_routines use ctlblk_mod, only : komax,hyb_sigp,d3d_on,gocart_on, & rdaod,nasa_on,gccpp_on,d2d_chem,modelname,submodelname, lsm + use post_nems_routines, only : read_postnmlt implicit none diff --git a/fv3/fv3_cap.F90 b/ufsatm_cap.F90 similarity index 81% rename from fv3/fv3_cap.F90 rename to ufsatm_cap.F90 index 0c1cc754b5..a2e7ed727f 100644 --- a/fv3/fv3_cap.F90 +++ b/ufsatm_cap.F90 @@ -1,6 +1,6 @@ -!--------------- FV3 ATM solo model ---------------- +!--------------- UFS ATM solo model ---------------- ! -!*** The FV3 atmosphere grid component nuopc cap +!*** The UFS ATMosphere grid component nuopc cap ! ! Author: Jun Wang@noaa.gov ! @@ -10,9 +10,10 @@ ! 24 Jul 2017: J. Wang initialization and time stepping changes for coupling ! 02 Nov 2017: J. Wang Use Gerhard's transferable RouteHandle ! 20 May 2025: D. Sarmiento Handle output hour array in seperate subroutines -! +! 06 Jun 2025: D. Swales Generalization for MPAS dynamical core +! -module fv3atm_cap_mod +module ufsatm_cap_mod use ESMF use NUOPC @@ -28,11 +29,20 @@ module fv3atm_cap_mod label_Finalize, & NUOPC_ModelGet ! +#ifdef FV3 use module_fv3_config, only: quilting, quilting_restart, output_fh, & dt_atmos, & calendar, cpl_grid_id, & cplprint_flag, first_kdt - +#endif +#ifdef MPAS + use module_mpas_config, only: output_fh, dt_atmos, calendar, & + fcst_mpi_comm, pio_ioformat, pio_iotype, & + pio_subsystem, pio_stride, & + pio_numiotasks, pio_iodesc, cpl_grid_id, & + cplprint_flag, first_kdt, quilting, & + quilting_restart +#endif use module_fv3_io_def, only: num_pes_fcst,write_groups, & num_files, filename_base, & wrttasks_per_group, n_group, & @@ -42,7 +52,9 @@ module fv3atm_cap_mod ! use module_fcst_grid_comp, only: fcstSS => SetServices - use module_wrt_grid_comp, only: wrtSS => SetServices + use module_wrt_grid_comp, only: wrtSS => SetServices, & + dstOutsideMaskValue, & + generate_dst_field_mask, add_dst_mask ! use module_cplfields, only: importFieldsValid, queryImportFields @@ -89,14 +101,14 @@ module fv3atm_cap_mod contains !----------------------------------------------------------------------- -!------------------- Solo fv3atm code starts here ---------------------- +!------------------- Solo ufsatm code starts here ---------------------- !----------------------------------------------------------------------- subroutine SetServices(gcomp, rc) type(ESMF_GridComp) :: gcomp integer, intent(out) :: rc - character(len=*),parameter :: subname='(fv3atm_cap:SetServices)' + character(len=*),parameter :: subname='(ufsatm_cap:SetServices)' rc = ESMF_SUCCESS @@ -117,13 +129,14 @@ subroutine SetServices(gcomp, rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return ! checking the import fields is a bit more complex because of coldstart option +#ifdef FV3 call ESMF_MethodRemove(gcomp, label_CheckImport, rc=rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return call NUOPC_CompSpecialize(gcomp, specLabel=label_CheckImport, & - specRoutine=fv3_checkimport, rc=rc) + specRoutine=ufsatm_checkimport, rc=rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - +#endif ! setup Run/Advance phase: phase1 call NUOPC_CompSetEntryPoint(gcomp, ESMF_METHOD_RUN, & phaseLabelList=(/"phase1"/), userRoutine=routine_Run, rc=rc) @@ -132,7 +145,7 @@ subroutine SetServices(gcomp, rc) call NUOPC_CompSpecialize(gcomp, specLabel=label_Advance, & specPhaseLabel="phase1", specRoutine=ModelAdvance_phase1, rc=rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - +#ifdef FV3 ! setup Run/Advance phase: phase2 call NUOPC_CompSetEntryPoint(gcomp, ESMF_METHOD_RUN, & phaseLabelList=(/"phase2"/), userRoutine=routine_Run, rc=rc) @@ -142,7 +155,7 @@ subroutine SetServices(gcomp, rc) specPhaseLabel="phase2", specRoutine=ModelAdvance_phase2, rc=rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - ! specializations to set fv3 cap run clock (model clock) + ! specializations to set ufsatm cap run clock (model clock) call ESMF_MethodRemove(gcomp, label=label_SetRunClock, rc=rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return @@ -152,7 +165,7 @@ subroutine SetServices(gcomp, rc) ! specializations required to support 'inline' run sequences call NUOPC_CompSpecialize(gcomp, specLabel=label_CheckImport, & - specPhaseLabel="phase1", specRoutine=fv3_checkimport, rc=rc) + specPhaseLabel="phase1", specRoutine=ufsatm_checkimport, rc=rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return call NUOPC_CompSpecialize(gcomp, specLabel=label_TimestampExport, & @@ -162,7 +175,7 @@ subroutine SetServices(gcomp, rc) call NUOPC_CompSpecialize(gcomp, specLabel=label_CheckImport, & specPhaseLabel="phase2", specRoutine=NUOPC_NoOp, rc=rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - +#endif ! model finalize method(s) call NUOPC_CompSpecialize(gcomp, specLabel=label_Finalize, & specRoutine=ModelFinalize, rc=rc) @@ -173,7 +186,14 @@ end subroutine SetServices !----------------------------------------------------------------------------- subroutine InitializeAdvertise(gcomp, rc) - +#ifdef MPAS + use pio, only: pio_init, pio_setdebuglevel + use pio, only: PIO_REARR_BOX, PIO_REARR_SUBSET + use pio, only: PIO_64BIT_OFFSET, PIO_64BIT_DATA + use pio, only: PIO_IOTYPE_NETCDF, PIO_IOTYPE_PNETCDF + use pio, only: PIO_IOTYPE_NETCDF4C, PIO_IOTYPE_NETCDF4P +#endif + use mpi_f08, only: MPI_Wtime type(ESMF_GridComp) :: gcomp integer, intent(out) :: rc @@ -212,8 +232,8 @@ subroutine InitializeAdvertise(gcomp, rc) type(ESMF_FieldBundle) :: mirrorFB type(ESMF_Field), allocatable :: fieldList(:) - character(len=*),parameter :: subname='(fv3_cap:InitializeAdvertise)' - real(kind=8) :: MPI_Wtime, timeis, timerhs + character(len=*),parameter :: subname='(ufsatm_cap:InitializeAdvertise)' + real(kind=8) :: timeis, timerhs, time_rh_fb_start, time_rh_start integer :: wrttasks_per_group_from_parent, wrtLocalPet, num_threads character(len=64) :: rh_filename @@ -224,6 +244,15 @@ subroutine InitializeAdvertise(gcomp, rc) type(ESMF_StaggerLoc) :: staggerloc character(len=20) :: cvalue character(ESMF_MAXSTR) :: output_grid + ! PIO + integer :: pio_root + integer :: pio_rearranger + integer :: pio_debug_level + logical :: needs_dst_mask + logical :: top_parent_is_global + integer :: ngrids + type(ESMF_Grid) :: src_grid, dst_grid + type(ESMF_Field), allocatable :: dst_field_mask(:) ! !------------------------------------------------------------------------ ! @@ -232,9 +261,15 @@ subroutine InitializeAdvertise(gcomp, rc) call ESMF_GridCompGet(gcomp, name=gc_name, vm=vm,rc=rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - +#ifdef FV3 call ESMF_VMGet(vm, petCount=petcount, localpet=mype, rc=rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return +#endif +#ifdef MPAS + call ESMF_VMGet(vm=vm, localPet=mype, mpiCommunicator=fcst_mpi_comm%mpi_val, & + petCount=petcount, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return +#endif ! num_threads is needed to compute actual wrttasks_per_group_from_parent call ESMF_InfoGetFromHost(gcomp, info=info, rc=rc) @@ -245,13 +280,13 @@ subroutine InitializeAdvertise(gcomp, rc) ! query for importState and exportState call NUOPC_ModelGet(gcomp, driverClock=clock, importState=importState, exportState=exportState, rc=rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - +#ifdef FV3 call ESMF_AttributeGet(gcomp, name="cpl_grid_id", value=value, defaultValue="1", & convention="NUOPC", purpose="Instance", rc=rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return cpl_grid_id = ESMF_UtilString2Int(value, rc=rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - +#endif call ESMF_AttributeGet(gcomp, name="ProfileMemory", value=value, defaultValue="false", & convention="NUOPC", purpose="Instance", rc=rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return @@ -279,6 +314,158 @@ subroutine InitializeAdvertise(gcomp, rc) write(msgString,'(A,i6)') trim(subname)//' dbug = ',dbug call ESMF_LogWrite(trim(msgString), ESMF_LOGMSG_INFO) +#ifdef MPAS + ! ####################################################################################### + ! + ! PIO + ! + ! ####################################################################################### + ! pio_netcdf_format + call NUOPC_CompAttributeGet(gcomp, name='pio_netcdf_format', value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + if (isPresent .and. isSet) then + cvalue = ESMF_UtilStringUpperCase(cvalue) + if (trim(cvalue) .eq. 'CLASSIC') then + pio_ioformat = 0 + else if (trim(cvalue) .eq. '64BIT_OFFSET') then + pio_ioformat = PIO_64BIT_OFFSET + else if (trim(cvalue) .eq. '64BIT_DATA') then + pio_ioformat = PIO_64BIT_DATA + else + call ESMF_LogWrite(trim("need to provide valid option for pio_ioformat (CLASSIC|64BIT_OFFSET|64BIT_DATA)"), ESMF_LOGMSG_INFO) + return + end if + else + cvalue = '64BIT_OFFSET' + pio_ioformat = PIO_64BIT_OFFSET + end if + + ! pio_typename + call NUOPC_CompAttributeGet(gcomp, name='pio_typename', value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + if (isPresent .and. isSet) then + cvalue = ESMF_UtilStringUpperCase(cvalue) + if (trim(cvalue) .eq. 'NETCDF') then + pio_iotype = PIO_IOTYPE_NETCDF + else if (trim(cvalue) .eq. 'PNETCDF') then + pio_iotype = PIO_IOTYPE_PNETCDF + else if (trim(cvalue) .eq. 'NETCDF4C') then + pio_iotype = PIO_IOTYPE_NETCDF4C + else if (trim(cvalue) .eq. 'NETCDF4P') then + pio_iotype = PIO_IOTYPE_NETCDF4P + else + call ESMF_LogWrite(trim("need to provide valid option for pio_typename (NETCDF|PNETCDF|NETCDF4C|NETCDF4P)"), ESMF_LOGMSG_INFO) + return + end if + else + cvalue = 'NETCDF' + pio_iotype = PIO_IOTYPE_NETCDF + end if + + ! pio_root + call NUOPC_CompAttributeGet(gcomp, name='pio_root', value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + if (isPresent .and. isSet) then + read(cvalue,*) pio_root + if (pio_root < 0) then + pio_root = 1 + endif + pio_root = min(pio_root, petCount-1) + else + pio_root = 1 + end if + + ! pio_stride + call NUOPC_CompAttributeGet(gcomp, name='pio_stride', value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + if (isPresent .and. isSet) then + read(cvalue,*) pio_stride + else + pio_stride = -99 + end if + + ! pio_numiotasks + call NUOPC_CompAttributeGet(gcomp, name='pio_numiotasks', value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + if (isPresent .and. isSet) then + read(cvalue,*) pio_numiotasks + else + pio_numiotasks = -99 + end if + + ! check for parallel IO, it requires at least two io pes + if (petCount > 1 .and. pio_numiotasks == 1 .and. & + (pio_iotype .eq. PIO_IOTYPE_PNETCDF .or. pio_iotype .eq. PIO_IOTYPE_NETCDF4P)) then + pio_numiotasks = 2 + pio_stride = min(pio_stride, petCount/2) + endif + + if (pio_root + (pio_stride)*(pio_numiotasks-1) >= petCount .or. & + pio_stride <= 0 .or. pio_numiotasks <= 0 .or. pio_root < 0 .or. pio_root > petCount-1) then + if (petCount < 100) then + pio_stride = max(1, petCount/4) + else if(petCount < 1000) then + pio_stride = max(1, petCount/8) + else + pio_stride = max(1, petCount/16) + end if + if(pio_stride > 1) then + pio_numiotasks = petCount/pio_stride + pio_root = min(1, petCount-1) + else + pio_numiotasks = petCount + pio_root = 0 + end if + end if + + ! pio_rearranger + call NUOPC_CompAttributeGet(gcomp, name='pio_rearranger', value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + if (isPresent .and. isSet) then + cvalue = ESMF_UtilStringUpperCase(cvalue) + if (trim(cvalue) .eq. 'BOX') then + pio_rearranger = PIO_REARR_BOX + else if (trim(cvalue) .eq. 'SUBSET') then + pio_rearranger = PIO_REARR_SUBSET + else + call ESMF_LogWrite(trim("need to provide valid option for pio_rearranger (BOX|SUBSET)"), ESMF_LOGMSG_INFO) + return + end if + else + cvalue = 'SUBSET' + pio_rearranger = PIO_REARR_SUBSET + end if + + ! Initialize PIO + allocate(pio_subsystem) + call pio_init(mype, fcst_mpi_comm%mpi_val, pio_numiotasks, 0, pio_stride, pio_rearranger, pio_subsystem, base=pio_root) + + ! PIO debug related options + ! pio_debug_level + call NUOPC_CompAttributeGet(gcomp, name='pio_debug_level', value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + if (isPresent .and. isSet) then + read(cvalue,*) pio_debug_level + if (pio_debug_level < 0 .or. pio_debug_level > 6) then + call ESMF_LogWrite(trim("MPAS_NUOPC_CAP: need to provide valid option for pio_debug_level (0-6)"), ESMF_LOGMSG_INFO) + return + end if + else + pio_debug_level = 0 + end if + + ! set PIO debug level + call pio_setdebuglevel(pio_debug_level) + +#endif + ! set cpl_scalars from config. Default to null values for standalone flds_scalar_name = '' flds_scalar_num = 0 @@ -419,7 +606,7 @@ subroutine InitializeAdvertise(gcomp, rc) ! set up fcst grid component ! !---------------------------------------------------------------------- -!*** create fv3 atm tasks and quilt servers +!*** create ufsatm tasks and quilt servers !----------------------------------------------------------------------- ! ! create fcst grid component @@ -434,10 +621,10 @@ subroutine InitializeAdvertise(gcomp, rc) do j=1, num_pes_fcst fcstPetList(j) = j - 1 enddo - fcstComp = ESMF_GridCompCreate(petList=fcstPetList, name='fv3_fcst', rc=rc) + fcstComp = ESMF_GridCompCreate(petList=fcstPetList, name='ufsatm_fcst', rc=rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return ! - ! copy attributes from fv3cap component to fcstComp + ! copy attributes from ufscap component to fcstComp call ESMF_InfoGetFromHost(gcomp, info=parentInfo, rc=rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return call ESMF_InfoGetFromHost(fcstComp, info=childInfo, rc=rc) @@ -471,12 +658,13 @@ subroutine InitializeAdvertise(gcomp, rc) ! determine number elements in fcstState call ESMF_StateGet(fcstState, itemCount=FBCount, rc=rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - if(mype == 0) print *,'fv3_cap: field bundles in fcstComp export state, FBCount= ',FBcount + if(mype == 0) print *,'ufsatm_cap: field bundles in fcstComp export state, FBCount= ',FBcount ! ! set start time for output output_startfh = 0. ! ! query the is_moving array from the fcstState (was set by fcstComp.Initialize() above) +#ifdef FV3 call ESMF_InfoGetFromHost(fcstState, info=info, rc=rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return call ESMF_InfoGetAlloc(info, key="is_moving", values=is_moving, rc=rc) @@ -494,7 +682,7 @@ subroutine InitializeAdvertise(gcomp, rc) write(msgString,'(A,8L4)') trim(subname)//" is_moving = ", is_moving call ESMF_LogWrite(trim(msgString), ESMF_LOGMSG_INFO, rc=rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - +#endif ! !----------------------------------------------------------------------- !*** create and initialize Write component(s). @@ -563,7 +751,7 @@ subroutine InitializeAdvertise(gcomp, rc) ! print *,'af wrtComp(i)=',i,'name=',trim(cwrtcomp),'rc=',rc if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return -! copy attributes from fv3cap component to wrtComp +! copy attributes from ufsatm_cap component to wrtComp call ESMF_InfoGetFromHost(wrtComp(i), info=childInfo, rc=rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return call ESMF_InfoUpdate(lhs=childInfo, rhs=parentInfo, rc=rc) @@ -729,8 +917,75 @@ subroutine InitializeAdvertise(gcomp, rc) endif + call ESMF_AttributeGet(wrtState(i), convention="NetCDF", purpose="FV3", & + name="ngrids", value=ngrids, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + call ESMF_AttributeGet(wrtState(i), convention="NetCDF", purpose="FV3", & + name="top_parent_is_global", value=top_parent_is_global, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + allocate(dst_field_mask(ngrids)) + ! loop over all FieldBundle in the states and precompute Regrid operation + if (mype == 0) print*, 'computing regrid/redist routehandles' + time_rh_start = MPI_Wtime() do j=1, FBcount + time_rh_fb_start = MPI_Wtime() + + ! Destination grid mask needs to be created only for: + ! 1) regional (non global) forecast (source) grid + ! 2) non-moving forecast grids, moving forecast grid will be remapped in the write grid component + ! 3) non-native grid (non cubed_sphere_grid) history bundles + call ESMF_AttributeGet(fcstFB(j), convention="NetCDF", purpose="FV3", name="grid_id", value=grid_id, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + call ESMF_StateGet(wrtState(i), itemName="output_"//trim(fcstItemNameList(j)), fieldbundle=wrtFB(j,i), rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + + call ESMF_AttributeGet(wrtFB(j,i), convention="NetCDF", purpose="FV3-nooutput", & + name="output_grid", value=output_grid, isPresent=isPresent, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + needs_dst_mask = .TRUE. + needs_dst_mask = needs_dst_mask .AND. .not. (grid_id == 1 .and. top_parent_is_global) ! 1) regional (non global) forecast (source) grid + needs_dst_mask = needs_dst_mask .AND. .not. is_moving_fb(j) ! 2) non-moving forecast grids + needs_dst_mask = needs_dst_mask .AND. .not. (trim(output_grid) == "restart_grid" .or. trim(output_grid) == "cubed_sphere_grid") ! 3) non-native grid (non cubed_sphere_grid) history bundles + + if (mype == 0) then + write(*,'(A,I2,1X,A32, A,I2, A,A24, A,L2, A,L2 )') ' FB: ',j, fcstItemNameList(j), & + ' grid_id ', grid_id, & + ' output_grid: ', output_grid, & + ' is_moving: ', is_moving_fb(j), & + ' needs_dst_mask: ', needs_dst_mask + endif + + ! only on write group 1, RH's on groups > 1 are computed from RH on group 1 + if (needs_dst_mask .and. i==1) then + + call ESMF_StateGet(wrtState(i), itemName="output_"//trim(fcstItemNameList(j)), fieldbundle=wrtFB(j,i), rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + call ESMF_FieldBundleGet(wrtFB(j,i), grid=dst_grid, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + if (.not. ESMF_FieldIsCreated(dst_field_mask(grid_id))) then + if (mype == 0) print *, ' generate destination mask for grid ', grid_id + call ESMF_FieldBundleGet(fcstFB(j), grid=src_grid, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + call generate_dst_field_mask(src_grid, dst_grid, dst_field_mask(grid_id), rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + else + if (mype == 0) print *, ' use already generated destination mask for grid ', grid_id + endif + + call add_dst_mask(dst_grid, dst_field_mask(grid_id), dstOutsideMaskValue, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + end if ! .not. is_moving_fb(j) + ! decide between Redist() and Regrid() if (is_moving_fb(j)) then ! this is a moving domain -> use a static Redist() to move data to wrtComp(:) @@ -763,7 +1018,7 @@ subroutine InitializeAdvertise(gcomp, rc) call ESMF_StateGet(wrtState(i), & itemName="output_"//trim(fcstItemNameList(j)), & fieldbundle=wrtFB(j,i), rc=rc) - if(mype == 0) print *,'af get wrtfb=',"output_"//trim(fcstItemNameList(j)),' rc=',rc + ! if(mype == 0) print *,'af get wrtfb=',"output_"//trim(fcstItemNameList(j)),' rc=',rc if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return call ESMF_AttributeGet(wrtFB(j,i), convention="NetCDF", purpose="FV3-nooutput", & @@ -805,7 +1060,7 @@ subroutine InitializeAdvertise(gcomp, rc) inquire(FILE=trim(rh_filename), EXIST=rh_file_exist) if (rh_file_exist .and. use_saved_routehandles) then - if(mype==0) print *,'in fv3cap init, routehandle file ',trim(rh_filename), ' exists' + if(mype==0) print *,'in ufsatm_cap init, routehandle file ',trim(rh_filename), ' exists' write(msgString,*) "Calling into ESMF_RouteHandleCreate(from file)...", trim(rh_filename) call ESMF_LogWrite(msgString, ESMF_LOGMSG_INFO, rc=rc) @@ -830,7 +1085,7 @@ subroutine InitializeAdvertise(gcomp, rc) routehandle=routehandle(j,1), & rc=rc) if (rc /= ESMF_SUCCESS) then - call ESMF_LogWrite('fv3_cap.F90:InitializeAdvertise error in ESMF_FieldBundleRedistStore', ESMF_LOGMSG_ERROR, rc=rc) + call ESMF_LogWrite('ufsatm_cap.F90:InitializeAdvertise error in ESMF_FieldBundleRedistStore', ESMF_LOGMSG_ERROR, rc=rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return ! call ESMF_Finalize(endflag=ESMF_END_ABORT) endif @@ -846,11 +1101,12 @@ subroutine InitializeAdvertise(gcomp, rc) call ESMF_TraceRegionEnter("ESMF_FieldBundleRegridStore()", rc=rc) call ESMF_FieldBundleRegridStore(fcstFB(j), wrtFB(j,1), & + dstMaskValues=(/dstOutsideMaskValue/), & regridMethod=regridmethod, routehandle=routehandle(j,1), & unmappedaction=ESMF_UNMAPPEDACTION_IGNORE, & srcTermProcessing=isrcTermProcessing, rc=rc) if (rc /= ESMF_SUCCESS) then - call ESMF_LogWrite('fv3_cap.F90:InitializeAdvertise error in ESMF_FieldBundleRegridStore', ESMF_LOGMSG_ERROR, rc=rc) + call ESMF_LogWrite('ufsatm_cap.F90:InitializeAdvertise error in ESMF_FieldBundleRegridStore', ESMF_LOGMSG_ERROR, rc=rc) call ESMF_Finalize(endflag=ESMF_END_ABORT) endif call ESMF_TraceRegionExit("ESMF_FieldBundleRegridStore()", rc=rc) @@ -869,7 +1125,7 @@ subroutine InitializeAdvertise(gcomp, rc) call ESMF_RouteHandleWrite(routehandle(j,1), fileName=trim(rh_filename), rc=rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return call ESMF_TraceRegionExit("ESMF_RouteHandleWrite()", rc=rc) - if(mype==0) print *,'in fv3cap init, saved routehandle file ',trim(rh_filename) + if(mype==0) print *,'in ufsatm_cap init, saved routehandle file ',trim(rh_filename) write(msgString,*) "... returned from ESMF_RouteHandleWrite." call ESMF_LogWrite(msgString, ESMF_LOGMSG_INFO, rc=rc) @@ -902,11 +1158,24 @@ subroutine InitializeAdvertise(gcomp, rc) write(msgString,"(A,I2.2,',',I2.2,A)") "... returned from RH creation for wrtFB(",j,i, ")." call ESMF_LogWrite(msgString, ESMF_LOGMSG_INFO, rc=rc) endif + + if (mype == 0) write(*,'(A,I2,F12.6)') ' done computing routehandle for field bundle: ',j,MPI_Wtime()-time_rh_fb_start enddo ! j=1, FBcount + if (mype == 0) write(*,'(A,F12.6)') ' done computing all routehandles: ',MPI_Wtime()-time_rh_start + + if (allocated(dst_field_mask)) then + do ii=1,ngrids + if (ESMF_FieldIsCreated(dst_field_mask(ii))) then + call ESMF_FieldDestroy(dst_field_mask(ii), rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + endif + end do + deallocate(dst_field_mask) + endif ! end write_groups enddo ! i=1, write_groups - if(mype==0) print *,'in fv3cap init, time wrtcrt/regrdst',MPI_Wtime()-timerhs + if(mype==0) print *,'in ufsatm_cap init, time wrtcrt/regrdst',MPI_Wtime()-timerhs deallocate(petList) deallocate(originPetList) deallocate(targetPetList) @@ -918,7 +1187,7 @@ subroutine InitializeAdvertise(gcomp, rc) if(iau_offset > 0) then output_startfh = iau_offset endif - if(mype==0) print *,'in fv3 cap init, output_startfh=',output_startfh,' iau_offset=',iau_offset + if(mype==0) print *,'in ufsatm cap init, output_startfh=',output_startfh,' iau_offset=',iau_offset ! !----------------------------------------------------------------------- !*** SET THE FIRST WRITE GROUP AS THE FIRST ONE TO ACT. @@ -930,6 +1199,7 @@ subroutine InitializeAdvertise(gcomp, rc) endif ! !-- set up output forecast time if output_fh is specified +#ifdef FV3 if (noutput_fh > 0 ) then !--- use output_fh to sepcify output forecast time loutput_fh = .true. @@ -961,7 +1231,7 @@ subroutine InitializeAdvertise(gcomp, rc) endif ! end loutput_fh endif if(mype==0) print *,'output_fh=',output_fh(1:size(output_fh)),'lflname_fulltime=',lflname_fulltime - +#endif if ( quilting ) then do i=1, write_groups call ESMF_InfoGetFromHost(wrtState(i), info=info, rc=rc) @@ -983,13 +1253,13 @@ subroutine InitializeAdvertise(gcomp, rc) call ESMF_ConfigDestroy(cf, rc=rc) if (ESMF_LogFoundError(rcToCheck=urc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__, rcToReturn=rc)) return - if(write_runtimelog .and. lprint) print *,'in fv3_cap, init time=',MPI_Wtime()-timeis,mype + if(write_runtimelog .and. lprint) print *,'in ufsatm_cap, init time=',MPI_Wtime()-timeis,mype !----------------------------------------------------------------------- ! end subroutine InitializeAdvertise !----------------------------------------------------------------------------- - !> This will calculate output hours if the user has stated a + !> This will calculate output hours if the user has stated a !> an fhzero frequency. !> !> @param[inout] nfhmax maximum number of forecast hours @@ -1030,7 +1300,7 @@ subroutine OutputHours_ArrayInput(noutput_fh,output_startfh) integer :: ist, i integer, intent(inout) :: noutput_fh real, intent(inout) :: output_startfh - + if( output_startfh == 0) then ! If the output time in output_fh array contains first time stamp output, ! check the rest of output time, otherwise, check all the output time. @@ -1061,16 +1331,18 @@ subroutine OutputHours_ArrayInput(noutput_fh,output_startfh) end subroutine OutputHours_ArrayInput subroutine InitializeRealize(gcomp, rc) + use mpi_f08, only : MPI_Wtime + type(ESMF_GridComp) :: gcomp integer, intent(out) :: rc ! local variables - character(len=*),parameter :: subname='(fv3atm_cap:InitializeRealize)' + character(len=*),parameter :: subname='(ufsatm_cap:InitializeRealize)' type(ESMF_Clock) :: clock type(ESMF_State) :: importState, exportState integer :: urc - real(8) :: MPI_Wtime, timeirs + real(8) :: timeirs rc = ESMF_SUCCESS timeirs = MPI_Wtime() @@ -1091,42 +1363,46 @@ subroutine InitializeRealize(gcomp, rc) timere = 0. timep2re = 0. - if(write_runtimelog .and. lprint) print *,'in fv3_cap, initirealz time=',MPI_Wtime()-timeirs,mype + if(write_runtimelog .and. lprint) print *,'in ufsatm_cap, initirealz time=',MPI_Wtime()-timeirs,mype end subroutine InitializeRealize !----------------------------------------------------------------------------- subroutine ModelAdvance(gcomp, rc) + + use mpi_f08, only : MPI_Wtime type(ESMF_GridComp) :: gcomp integer, intent(out) :: rc - real(kind=8) :: MPI_Wtime, timers + real(kind=8) :: timers !----------------------------------------------------------------------------- rc = ESMF_SUCCESS timers = MPI_Wtime() - if(write_runtimelog .and. timere>0. .and. lprint) print *,'in fv3_cap, time between fv3 run step=', timers-timere,mype + if(write_runtimelog .and. timere>0. .and. lprint) print *,'in ufsatm_cap, time between atmosphere run step=', timers-timere,mype - if (profile_memory) call ESMF_VMLogMemInfo("Entering FV3 ModelAdvance: ") + if (profile_memory) call ESMF_VMLogMemInfo("Entering UFSATM ModelAdvance: ") call ModelAdvance_phase1(gcomp, rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - +#ifdef FV3 call ModelAdvance_phase2(gcomp, rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - - if (profile_memory) call ESMF_VMLogMemInfo("Leaving FV3 ModelAdvance: ") +#endif + if (profile_memory) call ESMF_VMLogMemInfo("Leaving UFSATM ModelAdvance: ") timere = MPI_Wtime() - if(write_runtimelog .and. lprint) print *,'in fv3_cap, time in fv3 run step=', timere-timers, mype + if(write_runtimelog .and. lprint) print *,'in ufsatm_cap, time in atmosphere run step=', timere-timers, mype end subroutine ModelAdvance !----------------------------------------------------------------------------- subroutine ModelAdvance_phase1(gcomp, rc) + use mpi_f08, only : MPI_Wtime + type(ESMF_GridComp) :: gcomp integer, intent(out) :: rc @@ -1134,31 +1410,31 @@ subroutine ModelAdvance_phase1(gcomp, rc) type(ESMF_Clock) :: clock integer :: urc logical :: fcstpe - character(len=*),parameter :: subname='(fv3_cap:ModelAdvance_phase1)' + character(len=*),parameter :: subname='(ufsatm_cap:ModelAdvance_phase1)' character(240) :: msgString - real(kind=8) :: MPI_Wtime, timep1rs, timep1re + real(kind=8) :: timep1rs, timep1re !----------------------------------------------------------------------------- rc = ESMF_SUCCESS timep1rs = MPI_Wtime() - if(write_runtimelog .and. timep2re>0. .and. lprint) print *,'in fv3_cap, time between fv3 run phase2 and phase1 ', timep1rs-timep2re,mype + if(write_runtimelog .and. timep2re>0. .and. lprint) print *,'in ufsatm_cap, time between ufsatm run phase2 and phase1 ', timep1rs-timep2re,mype - if(profile_memory) call ESMF_VMLogMemInfo("Entering FV3 ModelAdvance_phase1: ") + if(profile_memory) call ESMF_VMLogMemInfo("Entering UFSATM ModelAdvance_phase1: ") call ESMF_GridCompGet(gcomp, clock=clock, rc=rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return call ESMF_ClockPrint(clock, options="currTime", & - preString="entering FV3_ADVANCE phase1 with clock current: ", & + preString="entering UFSATM_ADVANCE phase1 with clock current: ", & unit=msgString) call ESMF_LogWrite(msgString, ESMF_LOGMSG_INFO) call ESMF_ClockPrint(clock, options="startTime", & - preString="entering FV3_ADVANCE phase1 with clock start: ", & + preString="entering UFSATM_ADVANCE phase1 with clock start: ", & unit=msgString) call ESMF_LogWrite(msgString, ESMF_LOGMSG_INFO) call ESMF_ClockPrint(clock, options="stopTime", & - preString="entering FV3_ADVANCE phase1 with clock stop: ", & + preString="entering UFSATM_ADVANCE phase1 with clock stop: ", & unit=msgString) call ESMF_LogWrite(msgString, ESMF_LOGMSG_INFO) @@ -1174,14 +1450,16 @@ subroutine ModelAdvance_phase1(gcomp, rc) endif timep1re = MPI_Wtime() - if(write_runtimelog .and. lprint) print *,'in fv3_cap,modeladvance phase1 time ', timep1re-timep1rs,mype - if (profile_memory) call ESMF_VMLogMemInfo("Leaving FV3 ModelAdvance_phase1: ") + if(write_runtimelog .and. lprint) print *,'in ufsatm_cap,modeladvance phase1 time ', timep1re-timep1rs,mype + if (profile_memory) call ESMF_VMLogMemInfo("Leaving UFSATM ModelAdvance_phase1: ") end subroutine ModelAdvance_phase1 !----------------------------------------------------------------------------- subroutine ModelAdvance_phase2(gcomp, rc) + use mpi_f08, only : MPI_Wtime + type(ESMF_GridComp) :: gcomp integer, intent(out) :: rc @@ -1194,14 +1472,14 @@ subroutine ModelAdvance_phase2(gcomp, rc) integer :: na, j, urc integer :: nfseconds logical :: fcstpe - character(len=*),parameter :: subname='(fv3_cap:ModelAdvance_phase2)' + character(len=*),parameter :: subname='(ufsatm_cap:ModelAdvance_phase2)' character(240) :: msgString type(ESMF_Clock) :: clock, clock_out integer :: fieldCount - real(kind=8) :: MPI_Wtime, timep2rs + real(kind=8) :: timep2rs character(len=ESMF_MAXSTR) :: fb_name type(ESMF_Info) :: info @@ -1210,7 +1488,7 @@ subroutine ModelAdvance_phase2(gcomp, rc) rc = ESMF_SUCCESS timep2rs = MPI_Wtime() - if(profile_memory) call ESMF_VMLogMemInfo("Entering FV3 ModelAdvance_phase2: ") + if(profile_memory) call ESMF_VMLogMemInfo("Entering UFSATM ModelAdvance_phase2: ") call ESMF_GridCompGet(gcomp, clock=clock, rc=rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return @@ -1262,6 +1540,7 @@ subroutine ModelAdvance_phase2(gcomp, rc) if (fieldCount > 0) then call ESMF_FieldBundleSMM(fcstFB(j), wrtFB(j,n_group), & routehandle=routehandle(j, n_group), & + zeroregionflag=(/ESMF_REGION_SELECT/), & termorderflag=(/ESMF_TERMORDER_SRCSEQ/), rc=rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return end if @@ -1311,15 +1590,15 @@ subroutine ModelAdvance_phase2(gcomp, rc) endif ! quilting call ESMF_ClockPrint(clock, options="currTime", & - preString="leaving FV3_ADVANCE phase2 with clock current: ", & + preString="leaving UFSATM_ADVANCE phase2 with clock current: ", & unit=msgString) call ESMF_LogWrite(msgString, ESMF_LOGMSG_INFO) call ESMF_ClockPrint(clock, options="startTime", & - preString="leaving FV3_ADVANCE phase2 with clock start: ", & + preString="leaving UFSATM_ADVANCE phase2 with clock start: ", & unit=msgString) call ESMF_LogWrite(msgString, ESMF_LOGMSG_INFO) call ESMF_ClockPrint(clock, options="stopTime", & - preString="leaving FV3_ADVANCE phase2 with clock stop: ", & + preString="leaving UFSATM_ADVANCE phase2 with clock stop: ", & unit=msgString) call ESMF_LogWrite(msgString, ESMF_LOGMSG_INFO) @@ -1334,8 +1613,8 @@ subroutine ModelAdvance_phase2(gcomp, rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return timep2re = MPI_Wtime() - if(write_runtimelog .and. lprint) print *,'in fv3_cap,modeladvance phase2 time ', timep2re-timep2rs, mype - if(profile_memory) call ESMF_VMLogMemInfo("Leaving FV3 ModelAdvance_phase2: ") + if(write_runtimelog .and. lprint) print *,'in ufsatm_cap,modeladvance phase2 time ', timep2re-timep2rs, mype + if(profile_memory) call ESMF_VMLogMemInfo("Leaving UFSATM ModelAdvance_phase2: ") end subroutine ModelAdvance_phase2 @@ -1375,7 +1654,7 @@ end subroutine ModelSetRunClock !----------------------------------------------------------------------------- - subroutine fv3_checkimport(gcomp, rc) + subroutine ufsatm_checkimport(gcomp, rc) !*** Check the import state fields @@ -1384,7 +1663,7 @@ subroutine fv3_checkimport(gcomp, rc) integer, intent(out) :: rc ! local variables - character(len=*),parameter :: subname='(fv3atm_cap:fv3_checkimport)' + character(len=*),parameter :: subname='(ufsatmatm_cap:ufsatm_checkimport)' integer :: n, nf type(ESMF_Clock) :: clock type(ESMF_Time) :: currTime, invalidTime @@ -1408,7 +1687,7 @@ subroutine fv3_checkimport(gcomp, rc) date(1:6) = 0 call ESMF_TimeGet(time=currTime,yy=date(1),mm=date(2),dd=date(3),h=date(4), & m=date(5),s=date(6),rc=rc) -! if(mype==0) print *,'in fv3_checkimport, currtime=',date(1:6) +! if(mype==0) print *,'in ufsatm_checkimport, currtime=',date(1:6) ! set up invalid time (by convention) call ESMF_TimeSet(invalidTime, yy=99999999, mm=01, dd=01, & @@ -1424,7 +1703,7 @@ subroutine fv3_checkimport(gcomp, rc) importFieldsValid(:) = .true. if (associated(fieldList)) then -! if(mype==0) print *,'in fv3_checkimport, inside associated(fieldList)' +! if(mype==0) print *,'in ufsatm_checkimport, inside associated(fieldList)' do n = 1,size(fieldList) call ESMF_FieldGet(fieldList(n), name=fldname, rc=rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return @@ -1455,14 +1734,14 @@ subroutine fv3_checkimport(gcomp, rc) return end if end if - write(msgString,'(A,2i4,l3)') "fv3_checkimport "//trim(fldname),n,nf,importFieldsValid(nf) + write(msgString,'(A,2i4,l3)') "ufsatm_checkimport "//trim(fldname),n,nf,importFieldsValid(nf) call ESMF_LogWrite(msgString,ESMF_LOGMSG_INFO,rc=rc) enddo deallocate(fieldList) endif - end subroutine fv3_checkimport + end subroutine ufsatm_checkimport !----------------------------------------------------------------------------- @@ -1473,7 +1752,7 @@ subroutine TimestampExport_phase1(gcomp, rc) integer, intent(out) :: rc ! local variables - character(len=*),parameter :: subname='(fv3atm_cap:TimestampExport_phase1)' + character(len=*),parameter :: subname='(ufsatm_cap:TimestampExport_phase1)' type(ESMF_Clock) :: driverClock, modelClock type(ESMF_State) :: exportState @@ -1497,16 +1776,17 @@ end subroutine TimestampExport_phase1 !----------------------------------------------------------------------------- subroutine ModelFinalize(gcomp, rc) + use mpi_f08, only : MPI_Wtime ! input arguments type(ESMF_GridComp) :: gcomp integer, intent(out) :: rc ! local variables - character(len=*),parameter :: subname='(fv3atm_cap:ModelFinalize)' + character(len=*),parameter :: subname='(ufsatm_cap:ModelFinalize)' integer :: i, urc type(ESMF_VM) :: vm - real(kind=8) :: MPI_Wtime, timeffs + real(kind=8) :: timeffs ! !----------------------------------------------------------------------------- !*** finialize forecast @@ -1545,10 +1825,10 @@ subroutine ModelFinalize(gcomp, rc) call ESMF_GridCompDestroy(fcstComp, rc=rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return ! - if(write_runtimelog .and. lprint) print *,'in fv3_cap, finalize time=',MPI_Wtime()-timeffs, mype + if(write_runtimelog .and. lprint) print *,'in ufsatm_cap, finalize time=',MPI_Wtime()-timeffs, mype end subroutine ModelFinalize ! !----------------------------------------------------------------------------- -end module fv3atm_cap_mod +end module ufsatm_cap_mod diff --git a/ufsatm_util.F90 b/ufsatm_util.F90 new file mode 100644 index 0000000000..e532b10714 --- /dev/null +++ b/ufsatm_util.F90 @@ -0,0 +1,121 @@ +! ########################################################################################### +!> \file ufsatm_util.F90 +!> +!> This module contaion code that could be shared across dynamical core atmospheric drivers. +!> +! ########################################################################################### +module mod_ufsatm_util + implicit none + public :: get_atmos_tracer_types +contains + ! ######################################################################################### + ! + ! + ! Identify and return usage and type id of atmospheric tracers. + ! Ids are defined as: + ! 0 = generic tracer + ! 1 = chemistry - prognostic + ! 2 = chemistry - diagnostic + ! + ! Tracers are identified via the additional 'tracer_usage' keyword and + ! their optional 'type' qualifier. A tracer is assumed prognostic if + ! 'type' is not provided. See examples from the field_table file below: + ! + ! Prognostic tracer: + ! ------------------ + ! "TRACER", "atmos_mod", "so2" + ! "longname", "so2 mixing ratio" + ! "units", "ppm" + ! "tracer_usage", "chemistry" + ! "profile_type", "fixed", "surface_value=5.e-6" / + ! + ! Diagnostic tracer: + ! ------------------ + ! "TRACER", "atmos_mod", "pm25" + ! "longname", "PM2.5" + ! "units", "ug/m3" + ! "tracer_usage", "chemistry", "type=diagnostic" + ! "profile_type", "fixed", "surface_value=5.e-6" / + ! + ! For atmospheric chemistry, the order of both prognostic and diagnostic + ! tracers is validated against the model's internal assumptions. + ! + ! + ! ######################################################################################### + subroutine get_atmos_tracer_types(tracer_types) + + use field_manager_mod, only: parse + use tracer_manager_mod, only: query_method + use field_manager_mod, only: MODEL_ATMOS + use mpp_mod, only: mpp_error, FATAL + use tracer_manager_mod, only: get_number_tracers + + integer, intent(out) :: tracer_types(:) + + !--- local variables + logical :: found + integer :: n, num_tracers, num_types + integer :: id_max, id_min, id_num, ip_max, ip_min, ip_num + character(len=32) :: tracer_usage + character(len=128) :: control, tracer_type + + !--- begin + + !--- validate array size + call get_number_tracers(MODEL_ATMOS, num_tracers=num_tracers) + + if (size(tracer_types) < num_tracers) & + call mpp_error(FATAL, 'insufficient size of tracer type array') + + !--- initialize tracer indices + id_min = num_tracers + 1 + id_max = -id_min + ip_min = id_min + ip_max = id_max + id_num = 0 + ip_num = 0 + + do n = 1, num_tracers + tracer_types(n) = 0 + found = query_method('tracer_usage',MODEL_ATMOS,n,tracer_usage,control) + if (found) then + if (trim(tracer_usage) == 'chemistry') then + !--- set default to prognostic + tracer_type = 'prognostic' + num_types = parse(control, 'type', tracer_type) + select case (trim(tracer_type)) + case ('diagnostic') + tracer_types(n) = 2 + id_num = id_num + 1 + id_max = n + if (id_num == 1) id_min = n + case ('prognostic') + tracer_types(n) = 1 + ip_num = ip_num + 1 + ip_max = n + if (ip_num == 1) ip_min = n + end select + end if + end if + end do + + if (ip_num > 0) then + !--- check if prognostic tracers are contiguous + if (ip_num > ip_max - ip_min + 1) & + call mpp_error(FATAL, 'prognostic chemistry tracers must be contiguous') + end if + + if (id_num > 0) then + !--- check if diagnostic tracers are contiguous + if (id_num > id_max - id_min + 1) & + call mpp_error(FATAL, 'diagnostic chemistry tracers must be contiguous') + end if + + !--- prognostic tracers must precede diagnostic ones + if (ip_max > id_min) & + call mpp_error(FATAL, 'diagnostic chemistry tracers must follow prognostic ones') + + end subroutine get_atmos_tracer_types + ! + +end module mod_ufsatm_util diff --git a/upp b/upp index 8f6caa9d04..1a16f94d20 160000 --- a/upp +++ b/upp @@ -1 +1 @@ -Subproject commit 8f6caa9d04f1caf2ca8ce1b362aedcebf9b14c8c +Subproject commit 1a16f94d20898ff2ffcc3c8b6e036a9a16ed19a4