diff --git a/CMakeLists.txt b/CMakeLists.txt
index ec9721ba6..718ba11b4 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -43,7 +43,6 @@ add_library(fv3atm
cpl/module_cap_cpl.F90
io/FV3GFS_io.F90
io/module_write_netcdf.F90
- io/module_write_netcdf_parallel.F90
io/module_fv3_io_def.F90
io/module_write_internal_state.F90
io/module_wrt_grid_comp.F90
diff --git a/atmos_cubed_sphere b/atmos_cubed_sphere
index fa86482e4..7ce7aa94b 160000
--- a/atmos_cubed_sphere
+++ b/atmos_cubed_sphere
@@ -1 +1 @@
-Subproject commit fa86482e48f1d5b594acb369e68b8488de84dc66
+Subproject commit 7ce7aa94b33b5f3cb351867df50a2ad624bb405f
diff --git a/atmos_model.F90 b/atmos_model.F90
index 3ac2555e9..291c2bf69 100644
--- a/atmos_model.F90
+++ b/atmos_model.F90
@@ -74,7 +74,7 @@ module atmos_model_mod
use atmosphere_mod, only: atmosphere_scalar_field_halo
use atmosphere_mod, only: atmosphere_get_bottom_layer
use atmosphere_mod, only: set_atmosphere_pelist
-use atmosphere_mod, only: Atm, mygrid
+use atmosphere_mod, only: Atm, mygrid, get_nth_domain_info
use block_control_mod, only: block_control_type, define_blocks_packed
use DYCORE_typedefs, only: DYCORE_data_type, DYCORE_diag_type
@@ -113,6 +113,7 @@ module atmos_model_mod
public atmos_model_exchange_phase_1, atmos_model_exchange_phase_2
public atmos_model_restart
public get_atmos_model_ungridded_dim
+public atmos_model_get_nth_domain_info
public addLsmask2grid
public setup_exportdata
!-----------------------------------------------------------------------
@@ -125,6 +126,8 @@ module atmos_model_mod
integer :: layout(2) ! computer task laytout
logical :: regional ! true if domain is regional
logical :: nested ! true if there is a nest
+ integer :: ngrids !
+ integer :: mygrid !
integer :: mlon, mlat
integer :: iau_offset ! iau running window length
logical :: pe ! current pe.
@@ -165,7 +168,6 @@ module atmos_model_mod
! DYCORE containers
!-------------------
type(DYCORE_data_type), allocatable :: DYCORE_Data(:) ! number of blocks
-type(DYCORE_diag_type) :: DYCORE_Diag(25)
!----------------
! GFS containers
@@ -262,7 +264,7 @@ subroutine update_atmos_radiation_physics (Atmos)
if (ierr/=0) call mpp_error(FATAL, 'Call to CCPP timestep_init step failed')
if (GFS_Control%do_sppt .or. GFS_Control%do_shum .or. GFS_Control%do_skeb .or. &
- GFS_Control%lndp_type > 0 .or. GFS_Control%do_ca ) then
+ GFS_Control%lndp_type > 0 .or. GFS_Control%do_ca .or. GFS_Control%do_spp) then
!--- call stochastic physics pattern generation / cellular automata
call stochastic_physics_wrapper(GFS_control, GFS_data, Atm_block, ierr)
if (ierr/=0) call mpp_error(FATAL, 'Call to stochastic_physics_wrapper failed')
@@ -380,7 +382,7 @@ subroutine update_atmos_radiation_physics (Atmos)
if(GFS_control%print_diff_pgr) then
call atmos_timestep_diagnostics(Atmos)
endif
-
+
! Update flag for first time step of time integration
GFS_control%first_time_step = .false.
@@ -444,7 +446,7 @@ subroutine atmos_timestep_diagnostics(Atmos)
enddo
pcount = pcount+count
enddo
-
+
! Sum pgr stats from psum/pcount and convert to hPa/hour global avg:
sendbuf(1:2) = (/ psum, pcount /)
call MPI_Allreduce(sendbuf,recvbuf,2,MPI_DOUBLE_PRECISION,MPI_SUM,GFS_Control%communicator,ierror)
@@ -454,7 +456,7 @@ subroutine atmos_timestep_diagnostics(Atmos)
sendbuf(1:2) = (/ maxabs, dble(GFS_Control%me) /)
call MPI_Allreduce(sendbuf,recvbuf,1,MPI_2DOUBLE_PRECISION,MPI_MAXLOC,GFS_Control%communicator,ierror)
call MPI_Bcast(pmaxloc,size(pmaxloc),MPI_DOUBLE_PRECISION,nint(recvbuf(2)),GFS_Control%communicator,ierror)
-
+
if(GFS_Control%me == GFS_Control%master) then
2933 format('At forecast hour ',F9.3,' mean abs pgr change is ',F16.8,' hPa/hr')
2934 format(' max abs change ',F15.10,' bar at tile=',I0,' i=',I0,' j=',I0)
@@ -491,23 +493,17 @@ subroutine atmos_model_init (Atmos, Time_init, Time, Time_step)
type (atmos_data_type), intent(inout) :: Atmos
type (time_type), intent(in) :: Time_init, Time, Time_step
!--- local variables ---
- integer :: unit, ntdiag, ntfamily, i, j, k
- integer :: mlon, mlat, nlon, nlat, nlev, sec, dt
+ integer :: unit, i
+ integer :: mlon, mlat, nlon, nlat, nlev, sec
integer :: ierr, io, logunit
- integer :: idx, tile_num
+ integer :: tile_num
integer :: isc, iec, jsc, jec
- integer :: isd, ied, jsd, jed
- integer :: blk, ibs, ibe, jbs, jbe
real(kind=GFS_kind_phys) :: dt_phys
- real, allocatable :: q(:,:,:,:), p_half(:,:,:)
- character(len=80) :: control
- character(len=64) :: filename, filename2, pelist_name
- character(len=132) :: text
- logical :: p_hydro, hydro, fexist
+ logical :: p_hydro, hydro
logical, save :: block_message = .true.
type(GFS_init_type) :: Init_parm
integer :: bdat(8), cdat(8)
- integer :: ntracers, maxhf, maxh
+ integer :: ntracers
character(len=32), allocatable, target :: tracer_names(:)
integer, allocatable, target :: tracer_types(:)
integer :: nthrds, nb
@@ -533,7 +529,7 @@ subroutine atmos_model_init (Atmos, Time_init, Time, Time_step)
call atmosphere_resolution (nlon, nlat, global=.false.)
call atmosphere_resolution (mlon, mlat, global=.true.)
call alloc_atmos_data_type (nlon, nlat, Atmos)
- call atmosphere_domain (Atmos%domain, Atmos%layout, Atmos%regional, Atmos%nested, Atmos%pelist)
+ call atmosphere_domain (Atmos%domain, Atmos%layout, Atmos%regional, Atmos%nested, Atmos%ngrids, Atmos%mygrid, Atmos%pelist)
call atmosphere_diag_axes (Atmos%axes)
call atmosphere_etalvls (Atmos%ak, Atmos%bk, flip=flip_vc)
call atmosphere_grid_bdry (Atmos%lon_bnd, Atmos%lat_bnd, global=.false.)
@@ -547,7 +543,7 @@ subroutine atmos_model_init (Atmos, Time_init, Time, Time_step)
!----------------------------------------------------------------------------------------------
! initialize atmospheric model - must happen AFTER atmosphere_init so that nests work correctly
- IF ( file_exists('input.nml')) THEN
+ if (file_exists('input.nml')) then
read(input_nml_file, nml=atmos_model_nml, iostat=io)
ierr = check_nml_error(io, 'atmos_model_nml')
endif
@@ -635,19 +631,10 @@ subroutine atmos_model_init (Atmos, Time_init, Time, Time_step)
Init_parm%restart = Atm(mygrid)%flagstruct%warm_start
Init_parm%hydrostatic = Atm(mygrid)%flagstruct%hydrostatic
-#ifdef INTERNAL_FILE_NML
! allocate required to work around GNU compiler bug 100886 https://gcc.gnu.org/bugzilla/show_bug.cgi?id=100886
allocate(Init_parm%input_nml_file, mold=input_nml_file)
Init_parm%input_nml_file => input_nml_file
Init_parm%fn_nml='using internal file'
-#else
- pelist_name=mpp_get_current_pelist_name()
- Init_parm%fn_nml='input_'//trim(pelist_name)//'.nml'
- inquire(FILE=Init_parm%fn_nml, EXIST=fexist)
- if (.not. fexist ) then
- Init_parm%fn_nml='input.nml'
- endif
-#endif
call GFS_initialize (GFS_control, GFS_data%Statein, GFS_data%Stateout, GFS_data%Sfcprop, &
GFS_data%Coupling, GFS_data%Grid, GFS_data%Tbd, GFS_data%Cldprop, GFS_data%Radtend, &
@@ -711,7 +698,7 @@ subroutine atmos_model_init (Atmos, Time_init, Time, Time_step)
if (ierr/=0) call mpp_error(FATAL, 'Call to CCPP physics_init step failed')
if (GFS_Control%do_sppt .or. GFS_Control%do_shum .or. GFS_Control%do_skeb .or. &
- GFS_Control%lndp_type > 0 .or. GFS_Control%do_ca) then
+ GFS_Control%lndp_type > 0 .or. GFS_Control%do_ca .or. GFS_Control%do_spp) then
!--- Initialize stochastic physics pattern generation / cellular automata for first time step
call stochastic_physics_wrapper(GFS_control, GFS_data, Atm_block, ierr)
@@ -964,7 +951,7 @@ subroutine atmos_model_end (Atmos)
use update_ca, only: write_ca_restart
type (atmos_data_type), intent(inout) :: Atmos
!---local variables
- integer :: idx, seconds, ierr
+ integer :: ierr
!-----------------------------------------------------------------------
!---- termination routine for atmospheric model ----
@@ -977,7 +964,7 @@ subroutine atmos_model_end (Atmos)
! call write_stoch_restart_atm('RESTART/atm_stoch.res.nc')
endif
if (GFS_Control%do_sppt .or. GFS_Control%do_shum .or. GFS_Control%do_skeb .or. &
- GFS_Control%lndp_type > 0 .or. GFS_Control%do_ca ) then
+ GFS_Control%lndp_type > 0 .or. GFS_Control%do_ca .or. GFS_Control%do_spp) then
if(restart_endfcst) then
call write_stoch_restart_atm('RESTART/atm_stoch.res.nc')
if (GFS_control%do_ca)then
@@ -993,6 +980,8 @@ subroutine atmos_model_end (Atmos)
call CCPP_step (step="finalize", nblks=Atm_block%nblks, ierr=ierr)
if (ierr/=0) call mpp_error(FATAL, 'Call to CCPP finalize step failed')
+ call dealloc_atmos_data_type (Atmos)
+
end subroutine atmos_model_end
!
@@ -1541,53 +1530,6 @@ subroutine update_atmos_chemistry(state, rc)
end select
end subroutine update_atmos_chemistry
-!
-
-!#######################################################################
-!
-!
-!
-! Print checksums of the various fields in the atmos_data_type.
-!
-
-!
-! Routine to print checksums of the various fields in the atmos_data_type.
-!
-
-!
-! call atmos_data_type_chksum(id, timestep, atm)
-!
-
-!
-! Derived-type variable that contains fields in the atmos_data_type.
-!
-!
-!
-! Label to differentiate where this routine in being called from.
-!
-!
-!
-! An integer to indicate which timestep this routine is being called for.
-!
-!
-subroutine atmos_data_type_chksum(id, timestep, atm)
-type(atmos_data_type), intent(in) :: atm
- character(len=*), intent(in) :: id
- integer , intent(in) :: timestep
- integer :: n, outunit
-
-100 format("CHECKSUM::",A32," = ",Z20)
-101 format("CHECKSUM::",A16,a,'%',a," = ",Z20)
-
- outunit = stdout()
- write(outunit,*) 'BEGIN CHECKSUM(Atmos_data_type):: ', id, timestep
- write(outunit,100) ' atm%lon_bnd ', mpp_chksum(atm%lon_bnd)
- write(outunit,100) ' atm%lat_bnd ', mpp_chksum(atm%lat_bnd)
- write(outunit,100) ' atm%lon ', mpp_chksum(atm%lon)
- write(outunit,100) ' atm%lat ', mpp_chksum(atm%lat)
-
-end subroutine atmos_data_type_chksum
-
!
subroutine alloc_atmos_data_type (nlon, nlat, Atmos)
@@ -1623,7 +1565,6 @@ subroutine assign_importdata(jdat, rc)
integer :: sphum, liq_wat, ice_wat, o3mr
character(len=128) :: impfield_name, fldname
type(ESMF_TypeKind_Flag) :: datatype
- real(kind=ESMF_KIND_R4), dimension(:,:), pointer :: datar42d
real(kind=ESMF_KIND_R8), dimension(:,:), pointer :: datar82d
real(kind=ESMF_KIND_R8), dimension(:,:,:), pointer:: datar83d
real(kind=GFS_kind_phys), dimension(:,:), pointer :: datar8
@@ -1634,7 +1575,7 @@ subroutine assign_importdata(jdat, rc)
type(ESMF_Grid) :: grid
type(ESMF_Field) :: dbgField
character(19) :: currtimestring
- real (kind=GFS_kind_phys), parameter :: z0ice=1.1 ! (in cm)
+ real (kind=GFS_kind_phys), parameter :: z0ice=1.0 ! (in cm)
!
! real(kind=GFS_kind_phys), parameter :: himax = 8.0 !< maximum ice thickness allowed
@@ -1690,10 +1631,6 @@ subroutine assign_importdata(jdat, rc)
if (mpp_pe() == mpp_root_pe() .and. debug) print *,'in cplIMP,atmos gets ',trim(impfield_name),' datar8=', &
datar8(isc,jsc), maxval(datar8), minval(datar8)
found = .true.
-! gfs physics runs with r8
-! else
-! call ESMF_FieldGet(importFields(n),farrayPtr=datar42d,localDE=0, rc=rc)
-! datar8 = datar42d
endif
else if( dimCount == 3) then
@@ -2489,7 +2426,7 @@ subroutine assign_importdata(jdat, rc)
if (GFS_data(nb)%Sfcprop%fice(ix) >= GFS_control%min_seaice) then
GFS_data(nb)%Coupling%hsnoin_cpl(ix) = min(hsmax, GFS_data(nb)%Coupling%hsnoin_cpl(ix) &
- / (GFS_data(nb)%Sfcprop%fice(ix)*GFS_data(nb)%Sfcprop%oceanfrac(ix)))
+ / GFS_data(nb)%Sfcprop%fice(ix))
GFS_data(nb)%Sfcprop%zorli(ix) = z0ice
tem = GFS_data(nb)%Sfcprop%tisfc(ix) * GFS_data(nb)%Sfcprop%tisfc(ix)
tem = con_sbc * tem * tem
@@ -2546,7 +2483,6 @@ subroutine assign_importdata(jdat, rc)
rc=0
!
- if (mpp_pe() == mpp_root_pe()) print *,'end of assign_importdata'
end subroutine assign_importdata
!
@@ -2560,9 +2496,9 @@ subroutine setup_exportdata(rc)
integer, optional, intent(out) :: rc
!--- local variables
- integer :: i, j, k, idx, ix
+ integer :: i, j, ix
integer :: isc, iec, jsc, jec
- integer :: ib, jb, nb, nsb, nk
+ integer :: nb, nk
integer :: sphum, liq_wat, ice_wat, o3mr
real(GFS_kind_phys) :: rtime, rtimek
@@ -2586,7 +2522,6 @@ subroutine setup_exportdata(rc)
jsc = Atm_block%jsc
jec = Atm_block%jec
nk = Atm_block%npz
- nsb = Atm_block%blkno(isc,jsc)
rtime = one / GFS_control%dtp
rtimek = GFS_control%rho_h2o * rtime
@@ -2895,7 +2830,6 @@ subroutine addLsmask2grid(fcstGrid, rc)
integer isc, iec, jsc, jec
integer i, j, nb, ix
! integer CLbnd(2), CUbnd(2), CCount(2), TLbnd(2), TUbnd(2), TCount(2)
- type(ESMF_StaggerLoc) :: staggerloc
integer, allocatable :: lsmask(:,:)
integer(kind=ESMF_KIND_I4), pointer :: maskPtr(:,:)
!
@@ -2947,5 +2881,14 @@ subroutine addLsmask2grid(fcstGrid, rc)
end subroutine addLsmask2grid
!------------------------------------------------------------------------------
+ subroutine atmos_model_get_nth_domain_info(n, layout, nx, ny, pelist)
+ integer, intent(in) :: n
+ integer, intent(out) :: layout(2)
+ integer, intent(out) :: nx, ny
+ integer, pointer, intent(out) :: pelist(:)
+
+ call get_nth_domain_info(n, layout, nx, ny, pelist)
+
+ end subroutine atmos_model_get_nth_domain_info
end module atmos_model_mod
diff --git a/ccpp/data/CCPP_typedefs.F90 b/ccpp/data/CCPP_typedefs.F90
index 01695bc4a..1e2171838 100644
--- a/ccpp/data/CCPP_typedefs.F90
+++ b/ccpp/data/CCPP_typedefs.F90
@@ -141,8 +141,8 @@ subroutine interstitial_create (Interstitial, is, ie, isd, ied, js, je, jsd, jed
! For multi-gases physics
integer, intent(in) :: nwat
integer, intent(in), optional :: ngas
- real(kind_dyn), intent(in), optional :: rilist(:)
- real(kind_dyn), intent(in), optional :: cpilist(:)
+ real(kind_dyn), intent(in), optional :: rilist(0:)
+ real(kind_dyn), intent(in), optional :: cpilist(0:)
integer, intent(in) :: mpirank
integer, intent(in) :: mpiroot
!
diff --git a/ccpp/data/GFS_typedefs.F90 b/ccpp/data/GFS_typedefs.F90
index 5190b7bc0..1dd3a1cc3 100644
--- a/ccpp/data/GFS_typedefs.F90
+++ b/ccpp/data/GFS_typedefs.F90
@@ -48,6 +48,10 @@ module GFS_typedefs
integer, parameter :: naux2dmax = 20 !< maximum number of auxiliary 2d arrays in output (for debugging)
integer, parameter :: naux3dmax = 20 !< maximum number of auxiliary 3d arrays in output (for debugging)
+ integer, parameter :: dfi_radar_max_intervals = 4 !< Number of radar-derived temperature tendency and/or convection suppression intervals. Do not change.
+
+ real(kind=kind_phys), parameter :: limit_unspecified = 1e12 !< special constant for "namelist value was not provided" in radar-derived temperature tendency limit range
+
!> \section arg_table_GFS_typedefs
!! \htmlinclude GFS_typedefs.html
!!
@@ -537,6 +541,11 @@ module GFS_typedefs
real (kind=kind_phys), pointer :: skebu_wts (:,:) => null() !
real (kind=kind_phys), pointer :: skebv_wts (:,:) => null() !
real (kind=kind_phys), pointer :: sfc_wts (:,:) => null() ! mg, sfc-perts
+ real (kind=kind_phys), pointer :: spp_wts_pbl (:,:) => null() ! spp-pbl-perts
+ real (kind=kind_phys), pointer :: spp_wts_sfc (:,:) => null() ! spp-sfc-perts
+ real (kind=kind_phys), pointer :: spp_wts_mp (:,:) => null() ! spp-mp-perts
+ real (kind=kind_phys), pointer :: spp_wts_gwd (:,:) => null() ! spp-gwd-perts
+ real (kind=kind_phys), pointer :: spp_wts_rad (:,:) => null() ! spp-rad-perts
!--- aerosol surface emissions for Thompson microphysics
real (kind=kind_phys), pointer :: nwfa2d (:) => null() !< instantaneous water-friendly sfc aerosol source
@@ -802,6 +811,15 @@ module GFS_typedefs
real(kind=kind_phys) :: tcr
real(kind=kind_phys) :: tcrf
!
+ integer :: num_dfi_radar !< number of timespans with radar-prescribed temperature tendencies
+ real (kind=kind_phys) :: fh_dfi_radar(1+dfi_radar_max_intervals) !< begin+end of timespans to receive radar-prescribed temperature tendencies
+ logical :: do_cap_suppress !< enable convection suppression in GF scheme if fh_dfi_radar is specified
+ real (kind=kind_phys) :: radar_tten_limits(2) !< radar_tten values outside this range (min,max) are discarded
+ integer :: ix_dfi_radar(dfi_radar_max_intervals) = -1 !< Index within dfi_radar_tten of each timespan (-1 means "none")
+ integer :: dfi_radar_max_intervals
+ integer :: dfi_radar_max_intervals_plus_one
+
+ !
logical :: effr_in !< eg to turn on ffective radii for MG
logical :: microp_uniform
logical :: do_cldliq
@@ -1160,6 +1178,16 @@ module GFS_typedefs
! multiple patterns. It wasn't fully coded (and wouldn't have worked
! with nlndp>1, so I just dropped it). If we want to code it properly,
! we'd need to make this dim(6,5).
+ logical :: do_spp ! Overall flag to turn on SPP or not
+ integer :: spp_pbl
+ integer :: spp_sfc
+ integer :: spp_mp
+ integer :: spp_rad
+ integer :: spp_gwd
+ integer :: n_var_spp
+ character(len=3) , pointer :: spp_var_list(:) ! dimension here must match n_var_spp in stochy_nml_def
+ real(kind=kind_phys), pointer :: spp_prt_list(:) ! dimension here must match n_var_spp in stochy_nml_def
+
!--- tracer handling
character(len=32), pointer :: tracer_names(:) !< array of initialized tracers from dynamic core
integer :: ntrac !< number of tracers
@@ -1196,6 +1224,7 @@ module GFS_typedefs
integer :: index_of_process_conv_trans !< tracer changes caused by convective transport
integer :: index_of_process_physics !< tracer changes caused by physics schemes
integer :: index_of_process_non_physics !< tracer changes caused by everything except physics schemes
+ integer :: index_of_process_dfi_radar !< tracer changes caused by radar mp temperature tendency forcing
integer :: index_of_process_photochem !< all changes to ozone
logical, pointer :: is_photochem(:) => null()!< flags for which processes should be summed as photochemical
@@ -1278,6 +1307,7 @@ module GFS_typedefs
integer :: kdt !< current forecast iteration
logical :: first_time_step !< flag signaling first time step for time integration routine
logical :: restart !< flag whether this is a coldstart (.false.) or a warmstart/restart (.true.)
+ logical :: lsm_cold_start
logical :: hydrostatic !< flag whether this is a hydrostatic or non-hydrostatic run
integer :: jdat(1:8) !< current forecast date and time
!< (yr, mon, day, t-zone, hr, min, sec, mil-sec)
@@ -1454,6 +1484,10 @@ module GFS_typedefs
real (kind=kind_phys), pointer :: phy_myj_a1t(:) => null() !
real (kind=kind_phys), pointer :: phy_myj_a1q(:) => null() !
+ !--- DFI Radar
+ real (kind=kind_phys), pointer :: dfi_radar_tten(:,:,:) => null() !
+ real (kind=kind_phys), pointer :: cap_suppress(:,:) => null() !
+
contains
procedure :: create => tbd_create !< allocate array data
end type GFS_tbd_type
@@ -1655,8 +1689,7 @@ module GFS_typedefs
real (kind=kind_phys), pointer :: tdomzr (:) => null() !< dominant accumulated freezing rain type
real (kind=kind_phys), pointer :: tdomip (:) => null() !< dominant accumulated sleet type
real (kind=kind_phys), pointer :: tdoms (:) => null() !< dominant accumulated snow type
-
- real (kind=kind_phys), pointer :: zmtnblck(:) => null() ! null() ! levs) then
+ write(0,*) "Logic error, number of radiation levels (levr) cannot exceed number of model levels (levs)"
+ stop
else
Model%levr = levr
endif
@@ -4408,12 +4481,23 @@ subroutine control_initialize (Model, nlunit, fn_nml, me, master, &
Model%lndp_type = lndp_type
Model%n_var_lndp = n_var_lndp
Model%lndp_each_step = lndp_each_step
+ Model%do_spp = do_spp
+ Model%n_var_spp = n_var_spp
+
if (Model%lndp_type/=0) then
allocate(Model%lndp_var_list(Model%n_var_lndp))
allocate(Model%lndp_prt_list(Model%n_var_lndp))
Model%lndp_var_list(:) = ''
Model%lndp_prt_list(:) = clear_val
end if
+
+ if (Model%do_spp) then
+ allocate(Model%spp_var_list(Model%n_var_spp))
+ allocate(Model%spp_prt_list(Model%n_var_spp))
+ Model%spp_var_list(:) = ''
+ Model%spp_prt_list(:) = clear_val
+ end if
+
!--- cellular automata options
! force namelist constsitency
allocate(Model%vfact_ca(levs))
@@ -4512,17 +4596,18 @@ subroutine control_initialize (Model, nlunit, fn_nml, me, master, &
Model%index_of_process_rayleigh_damping = 12
Model%index_of_process_nonorographic_gwd = 13
Model%index_of_process_conv_trans = 14
+ Model%index_of_process_dfi_radar = 15
! Number of processes to sum (last index of prior set)
- Model%nprocess_summed = 14
+ Model%nprocess_summed = Model%index_of_process_dfi_radar
! Sums of other processes, which must be after nprocess_summed:
- Model%index_of_process_physics = 15
- Model%index_of_process_non_physics = 16
- Model%index_of_process_photochem = 17
+ Model%index_of_process_physics = Model%nprocess_summed+1
+ Model%index_of_process_non_physics = Model%nprocess_summed+2
+ Model%index_of_process_photochem = Model%nprocess_summed+3
! Total number of processes (last index of prior set)
- Model%nprocess = 17
+ Model%nprocess = Model%index_of_process_photochem
! List which processes should be summed as photochemical:
allocate(Model%is_photochem(Model%nprocess))
@@ -4637,6 +4722,7 @@ subroutine control_initialize (Model, nlunit, fn_nml, me, master, &
call label_dtend_cause(Model,Model%index_of_process_ozmix,'o3mix','tendency due to ozone mixing ratio')
call label_dtend_cause(Model,Model%index_of_process_temp,'temp','tendency due to temperature')
call label_dtend_cause(Model,Model%index_of_process_overhead_ozone,'o3column','tendency due to overhead ozone column')
+ call label_dtend_cause(Model,Model%index_of_process_dfi_radar,'dfi_radar','tendency due to dfi radar mp temperature forcing')
call label_dtend_cause(Model,Model%index_of_process_photochem,'photochem','tendency due to photochemical processes')
call label_dtend_cause(Model,Model%index_of_process_physics,'phys','tendency due to physics')
call label_dtend_cause(Model,Model%index_of_process_non_physics,'nophys','tendency due to non-physics processes', &
@@ -4654,6 +4740,7 @@ subroutine control_initialize (Model, nlunit, fn_nml, me, master, &
call fill_dtidx(Model,dtend_select,Model%index_of_temperature,Model%index_of_process_dcnv,have_dcnv)
call fill_dtidx(Model,dtend_select,Model%index_of_temperature,Model%index_of_process_scnv,have_scnv)
call fill_dtidx(Model,dtend_select,Model%index_of_temperature,Model%index_of_process_mp,have_mp)
+ call fill_dtidx(Model,dtend_select,Model%index_of_temperature,Model%index_of_process_dfi_radar,have_mp .and. Model%num_dfi_radar>0)
call fill_dtidx(Model,dtend_select,Model%index_of_temperature,Model%index_of_process_orographic_gwd)
call fill_dtidx(Model,dtend_select,Model%index_of_temperature,Model%index_of_process_rayleigh_damping,have_rdamp)
call fill_dtidx(Model,dtend_select,Model%index_of_temperature,Model%index_of_process_nonorographic_gwd)
@@ -4787,14 +4874,15 @@ subroutine control_initialize (Model, nlunit, fn_nml, me, master, &
Model%kdt = nint(Model%fhour*con_hr/Model%dtp)
Model%first_time_step = .true.
Model%restart = restart
+ Model%lsm_cold_start = .not. restart
Model%hydrostatic = hydrostatic
Model%jdat(1:8) = jdat(1:8)
- allocate(Model%si(Model%levr+1))
+ allocate(Model%si(Model%levs+1))
!--- Define sigma level for radiation initialization
!--- The formula converting hybrid sigma pressure coefficients to sigma coefficients follows Eckermann (2009, MWR)
!--- ps is replaced with p0. The value of p0 uses that in http://www.emc.ncep.noaa.gov/officenotes/newernotes/on461.pdf
!--- ak/bk have been flipped from their original FV3 orientation and are defined sfc -> toa
- Model%si = (ak + bk * con_p0 - ak(Model%levr+1)) / (con_p0 - ak(Model%levr+1))
+ Model%si(1:Model%levs+1) = (ak(1:Model%levs+1) + bk(1:Model%levs+1) * con_p0 - ak(Model%levs+1)) / (con_p0 - ak(Model%levs+1))
Model%sec = 0
Model%yearlen = 365
Model%julian = -9999.
@@ -5069,7 +5157,6 @@ subroutine control_initialize (Model, nlunit, fn_nml, me, master, &
Model%num_p3d = 4
Model%num_p2d = 3
Model%shcnvcw = .false.
-! Model%ncnd = 1 ! ncnd is the number of cloud condensate types
Model%nT2delt = 1
Model%nqv2delt = 2
Model%nTdelt = 3
@@ -5086,7 +5173,6 @@ subroutine control_initialize (Model, nlunit, fn_nml, me, master, &
Model%npdf3d = 3
Model%num_p3d = 4
Model%num_p2d = 3
-! Model%ncnd = 1
if (Model%me == Model%master) print *,'Using Zhao/Carr/Sundqvist Microphysics with PDF Cloud'
else if (Model%imp_physics == Model%imp_physics_fer_hires) then ! Ferrier-Aligo scheme
@@ -5095,8 +5181,6 @@ subroutine control_initialize (Model, nlunit, fn_nml, me, master, &
Model%num_p2d = 1
Model%pdfcld = .false.
Model%shcnvcw = .false.
- ! DH* REALLY ?
-! Model%ncnd = 3 !???????? need to clarify this - Moorthi
Model%nleffr = 1
Model%nieffr = 2
Model%nseffr = 3
@@ -5116,7 +5200,6 @@ subroutine control_initialize (Model, nlunit, fn_nml, me, master, &
!Model%num_p2d = 1
!Model%pdfcld = .false.
!Model%shcnvcw = .false.
-! !Model%ncnd = 5
!Model%nleffr = 1
!Model%nieffr = 2
!Model%nseffr = 3
@@ -5128,7 +5211,6 @@ subroutine control_initialize (Model, nlunit, fn_nml, me, master, &
Model%num_p2d = 1
Model%pdfcld = .false.
Model%shcnvcw = .false.
-! Model%ncnd = 5
Model%nleffr = 1
Model%nieffr = 2
Model%nseffr = 3
@@ -5159,7 +5241,6 @@ subroutine control_initialize (Model, nlunit, fn_nml, me, master, &
Model%num_p2d = 1
Model%pdfcld = .false.
Model%shcnvcw = .false.
-! Model%ncnd = 2
Model%nleffr = 2
Model%nieffr = 3
Model%nreffr = 4
@@ -5172,16 +5253,6 @@ subroutine control_initialize (Model, nlunit, fn_nml, me, master, &
print *,' Morrison-Gettelman MP requires nwat to be set to 6 - job aborted'
stop
end if
-! if (abs(Model%fprcp) == 1) then
-! Model%ncnd = 4
-! elseif (Model%fprcp >= 2) then
-! Model%ncnd = 4
-! if (Model%mg_do_graupel .or. Model%mg_do_hail) then
-! Model%ncnd = 5
-! endif
-! Model%num_p3d = 6
-! Model%ngeffr = 6
-! endif
if (Model%me == Model%master) &
print *,' Using Morrison-Gettelman double moment microphysics', &
' iaerclm=', Model%iaerclm, ' iccn=', Model%iccn, &
@@ -5218,7 +5289,6 @@ subroutine control_initialize (Model, nlunit, fn_nml, me, master, &
Model%num_p2d = 1
Model%pdfcld = .false.
Model%shcnvcw = .false.
-! Model%ncnd = 5
if (nwat /= 6) then
print *,' GFDL MP requires nwat to be set to 6 - job aborted'
stop
@@ -5232,7 +5302,7 @@ subroutine control_initialize (Model, nlunit, fn_nml, me, master, &
endif
if(Model%ras .or. Model%cscnv) Model%cnvcld = .false.
- if(Model%do_shoc .or. Model%pdfcld .or. Model%do_mynnedmf) Model%cnvcld = .false.
+ if(Model%do_shoc .or. Model%pdfcld .or. Model%do_mynnedmf .or. Model%imfdeepcnv == Model%imfdeepcnv_gf) Model%cnvcld = .false.
if(Model%cnvcld) Model%ncnvcld3d = 1
!--- get cnvwind index in phy_f2d; last entry in phy_f2d array
@@ -5274,7 +5344,7 @@ subroutine control_initialize (Model, nlunit, fn_nml, me, master, &
endif
if (me == Model%master) &
- write(0,*) ' num_p3d=', Model%num_p3d, ' num_p2d=', Model%num_p2d, &
+ write(*,*) ' num_p3d=', Model%num_p3d, ' num_p2d=', Model%num_p2d, &
' crtrh=', Model%crtrh, ' npdf3d=', Model%npdf3d, &
' pdfcld=', Model%pdfcld, ' shcnvcw=', Model%shcnvcw, &
' cnvcld=', Model%cnvcld, ' ncnvcld3d=',Model%ncnvcld3d, &
@@ -5318,6 +5388,68 @@ subroutine control_initialize (Model, nlunit, fn_nml, me, master, &
end subroutine control_initialize
+ subroutine control_initialize_radar_tten(Model, radar_tten_limits)
+ implicit none
+
+ ! Helper subroutine for initializing variables for radar-derived
+ ! temperature tendency or convection suppression.
+
+ class(GFS_control_type) :: Model
+ real(kind_phys) :: radar_tten_limits(2)
+ integer :: i
+
+ Model%num_dfi_radar = 0
+ do i=1,dfi_radar_max_intervals
+ if(Model%fh_dfi_radar(i)>-1e10 .and. Model%fh_dfi_radar(i+1)>-1e10) then
+ Model%num_dfi_radar = Model%num_dfi_radar+1
+ Model%ix_dfi_radar(i) = Model%num_dfi_radar
+ else
+ Model%ix_dfi_radar(i) = -1
+ endif
+ enddo
+
+ if(Model%num_dfi_radar>0) then
+ if(radar_tten_limits(1)==limit_unspecified) then
+ if(radar_tten_limits(2)==limit_unspecified) then
+ radar_tten_limits(1) = -19
+ radar_tten_limits(2) = 19
+ if(Model%me==Model%master) then
+ write(0,*) 'Warning: using internal defaults for radar_tten_limits. If the oceans boil, try different values.'
+ write(0,'(A,F12.4,A)') 'radar_tten_limits(1) = ',radar_tten_limits(1),' <-- lower limit'
+ write(0,'(A,F12.4,A)') 'radar_tten_limits(2) = ',radar_tten_limits(2),' <-- upper limit'
+ endif
+ else
+ radar_tten_limits(1) = -abs(radar_tten_limits(2))
+ radar_tten_limits(2) = abs(radar_tten_limits(2))
+ endif
+ else if(radar_tten_limits(2)==limit_unspecified) then
+ radar_tten_limits(1) = -abs(radar_tten_limits(1))
+ radar_tten_limits(2) = abs(radar_tten_limits(1))
+ else if(radar_tten_limits(1)>radar_tten_limits(2)) then
+ if(Model%me==Model%master) then
+ write(0,*) 'Error: radar_tten_limits lower limit is higher than upper!'
+ write(0,'(A,F12.4,A)') 'radar_tten_limits(1) = ',radar_tten_limits(1),' <-- lower limit'
+ write(0,'(A,F12.4,A)') 'radar_tten_limits(2) = ',radar_tten_limits(2),' <-- upper limit'
+ write(0,*) "If you do not want me to apply the prescribed tendencies, just say so! Remove fh_dfi_radar from your namelist."
+ stop
+ endif
+ else
+ !o! Rejoice !o! Radar_tten_limits had lower and upper bounds.
+ endif
+ Model%radar_tten_limits = radar_tten_limits
+
+ if(Model%do_cap_suppress) then
+ if(Model%me==Model%master .and. Model%imfdeepcnv>=0) then
+ if(Model%imfdeepcnv/=3) then
+ write(0,*) 'Warning: untested configuration in use! Radar-derived convection suppression is only supported for the GF deep scheme. That feature will be inactive, but microphysics tendencies will still be enabled. This combination is untested. Beware!'
+ else
+ write(0,*) 'Warning: experimental configuration in use! Radar-derived convection suppression is experimental (GF deep scheme with fh_dfi_radar).'
+ endif
+ endif
+ endif
+ endif
+
+ end subroutine control_initialize_radar_tten
!---------------------------
! GFS_control%init_chemistry
@@ -5426,6 +5558,9 @@ subroutine control_print(Model)
!--- interface variables
class(GFS_control_type) :: Model
+!--- local variables
+ integer :: i
+
if (Model%me == Model%master) then
print *, ' '
print *, 'basic control parameters'
@@ -5593,6 +5728,18 @@ subroutine control_print(Model)
print *, ' icloud : ', Model%icloud
print *, ' '
endif
+ if (Model%num_dfi_radar>0) then
+ print *, ' num_dfi_radar : ', Model%num_dfi_radar
+ print *, ' do_cap_suppress : ', Model%do_cap_suppress
+ do i = 1, dfi_radar_max_intervals+1
+8888 format(' fh_dfi_radar(',I0,') :',F12.4)
+ if(Model%fh_dfi_radar(i)>-1e10) then
+ print 8888,i,Model%fh_dfi_radar(i)
+ endif
+ enddo
+9999 format(' radar_tten_limits: ', F12.4, ' ... ',F12.4)
+ print 9999,Model%radar_tten_limits(1),Model%radar_tten_limits(2)
+ endif
print *, 'land/surface model parameters'
print *, ' lsm : ', Model%lsm
print *, ' lsoil : ', Model%lsoil
@@ -5780,6 +5927,8 @@ subroutine control_print(Model)
print *, ' lndp_type : ', Model%lndp_type
print *, ' n_var_lndp : ', Model%n_var_lndp
print *, ' lndp_each_step : ', Model%lndp_each_step
+ print *, ' do_spp : ', Model%do_spp
+ print *, ' n_var_spp : ', Model%n_var_spp
print *, ' '
print *, 'cellular automata'
print *, ' nca : ', Model%nca
@@ -5872,6 +6021,7 @@ subroutine control_print(Model)
print *, ' sec : ', Model%sec
print *, ' first_time_step : ', Model%first_time_step
print *, ' restart : ', Model%restart
+ print *, ' lsm_cold_start : ', Model%lsm_cold_start
print *, ' hydrostatic : ', Model%hydrostatic
endif
@@ -5972,6 +6122,19 @@ subroutine tbd_create (Tbd, IM, Model)
Tbd%icsdlw = zero
endif
+!--- DFI radar forcing
+ nullify(Tbd%dfi_radar_tten)
+ nullify(Tbd%cap_suppress)
+ if(Model%num_dfi_radar>0) then
+ allocate(Tbd%dfi_radar_tten(IM,Model%levs,Model%num_dfi_radar))
+ Tbd%dfi_radar_tten = -20.0
+ Tbd%dfi_radar_tten(:,1,:) = zero
+ if(Model%do_cap_suppress) then
+ allocate(Tbd%cap_suppress(IM,Model%num_dfi_radar))
+ Tbd%cap_suppress(:,:) = zero
+ endif
+ endif
+
!--- ozone and stratosphere h2o needs
allocate (Tbd%ozpl (IM,levozp,oz_coeff))
allocate (Tbd%h2opl (IM,levh2o,h2o_coeff))
@@ -6819,6 +6982,7 @@ subroutine diag_phys_zero (Diag, Model, linit, iauwindow_center)
Diag%tdomzr = zero
Diag%tdomip = zero
Diag%tdoms = zero
+ Diag%zmtnblck = zero
if(Model%lsm == Model%lsm_noahmp)then
Diag%paha = zero
@@ -7499,9 +7663,7 @@ subroutine interstitial_setup_tracers(Interstitial, Model)
endif
if (Model%cplchm) then
- ! Only Zhao/Carr/Sundqvist and GFDL microphysics schemes are supported
- ! when coupling with chemistry. PBL diffusion of aerosols is only supported
- ! for GFDL microphysics and MG microphysics.
+ ! Only the following microphysics schemes are supported with coupled chemistry
if (Model%imp_physics == Model%imp_physics_zhao_carr) then
Interstitial%nvdiff = 3
elseif (Model%imp_physics == Model%imp_physics_mg) then
@@ -7512,8 +7674,14 @@ subroutine interstitial_setup_tracers(Interstitial, Model)
endif
elseif (Model%imp_physics == Model%imp_physics_gfdl) then
Interstitial%nvdiff = 7
+ elseif (Model%imp_physics == Model%imp_physics_thompson) then
+ if (Model%ltaerosol) then
+ Interstitial%nvdiff = 12
+ else
+ Interstitial%nvdiff = 9
+ endif
else
- write(0,*) "Only Zhao/Carr/Sundqvist and GFDL microphysics schemes are supported when coupling with chemistry"
+ write(0,*) "Selected microphysics scheme is not supported when coupling with chemistry"
stop
endif
if (Interstitial%trans_aero) Interstitial%nvdiff = Interstitial%nvdiff + Model%ntchm
diff --git a/ccpp/data/GFS_typedefs.meta b/ccpp/data/GFS_typedefs.meta
index a45b55866..3c3b31c0c 100644
--- a/ccpp/data/GFS_typedefs.meta
+++ b/ccpp/data/GFS_typedefs.meta
@@ -2244,10 +2244,50 @@
type = real
kind = kind_phys
active = (flag_for_stochastic_skeb_option)
+[spp_wts_pbl]
+ standard_name = spp_weights_for_pbl_scheme
+ long_name = spp weights for pbl scheme
+ units = 1
+ dimensions = (horizontal_loop_extent,vertical_layer_dimension)
+ type = real
+ kind = kind_phys
+ active = (do_stochastically_perturbed_parameterizations)
+[spp_wts_sfc]
+ standard_name = spp_weights_for_surface_layer_scheme
+ long_name = spp weights for surface layer scheme
+ units = 1
+ dimensions = (horizontal_loop_extent,vertical_layer_dimension)
+ type = real
+ kind = kind_phys
+ active = (do_stochastically_perturbed_parameterizations)
+[spp_wts_mp]
+ standard_name = spp_weights_for_microphysics_scheme
+ long_name = spp weights for microphysics scheme
+ units = 1
+ dimensions = (horizontal_loop_extent,vertical_layer_dimension)
+ type = real
+ kind = kind_phys
+ active = (do_stochastically_perturbed_parameterizations)
+[spp_wts_gwd]
+ standard_name = spp_weights_for_gravity_wave_drag_scheme
+ long_name = spp weights for gravity wave drag scheme
+ units = 1
+ dimensions = (horizontal_loop_extent,vertical_layer_dimension)
+ type = real
+ kind = kind_phys
+ active = (do_stochastically_perturbed_parameterizations)
+[spp_wts_rad]
+ standard_name = spp_weights_for_radiation_scheme
+ long_name = spp weights for radiation scheme
+ units = 1
+ dimensions = (horizontal_loop_extent,vertical_layer_dimension)
+ type = real
+ kind = kind_phys
+ active = (do_stochastically_perturbed_parameterizations)
[sfc_wts]
standard_name = surface_stochastic_weights_from_coupled_process
long_name = weights for stochastic surface physics perturbation
- units = none
+ units = 1
dimensions = (horizontal_loop_extent,number_of_perturbed_land_surface_variables)
type = real
kind = kind_phys
@@ -2519,13 +2559,13 @@
standard_name = sigma_pressure_hybrid_coordinate_a_coefficient
long_name = a parameter for sigma pressure level calculations
units = Pa
- dimensions = (vertical_interface_dimension_for_radiation)
+ dimensions = (vertical_interface_dimension)
type = real
[bk]
standard_name = sigma_pressure_hybrid_coordinate_b_coefficient
long_name = b parameter for sigma pressure level calculations
units = none
- dimensions = (vertical_interface_dimension_for_radiation)
+ dimensions = (vertical_interface_dimension)
type = real
[levsp1]
standard_name = vertical_interface_dimension
@@ -3381,6 +3421,24 @@
dimensions = ()
type = real
kind = kind_phys
+[num_dfi_radar]
+ standard_name = number_of_radar_derived_temperature_or_convection_suppression_intervals
+ long_name = number of time ranges with radar-derived microphysics temperature tendencies or radar-derived convection suppression
+ units = count
+ dimensions = ()
+ type = integer
+[dfi_radar_max_intervals]
+ standard_name = maximum_number_of_radar_derived_temperature_or_convection_suppression_intervals
+ long_name = maximum allowed number of time ranges with radar-derived microphysics temperature tendencies or radar-derived convection suppression
+ units = count
+ dimensions = ()
+ type = integer
+[dfi_radar_max_intervals_plus_one]
+ standard_name = maximum_number_of_radar_derived_temperature_or_convection_suppression_intervals_plus_one
+ long_name = one more than the maximum allowed number of time ranges with radar-derived microphysics temperature tendencies or radar-derived convection suppression
+ units = count
+ dimensions = ()
+ type = integer
[effr_in]
standard_name = flag_for_cloud_effective_radii
long_name = flag for cloud effective radii calculations in GFDL microphysics
@@ -3459,6 +3517,32 @@
units = flag
dimensions = ()
type = logical
+[radar_tten_limits]
+ standard_name = allowed_bounds_of_radar_prescribed_tendencies
+ long_name = allowed bounds of prescribed microphysics temperature tendencies
+ units = K s-1
+ dimensions = (2)
+ type = real
+ kind = kind_phys
+[do_cap_suppress]
+ standard_name = flag_for_radar_derived_convection_suppression
+ long_name = flag for radar-derived convection suppression
+ units = flag
+ dimensions = ()
+ type = logical
+[fh_dfi_radar]
+ standard_name = forecast_lead_times_bounding_radar_derived_temperature_or_convection_suppression_intervals
+ long_name = forecast lead times bounding radar derived temperature or convection suppression intervals
+ units = h
+ dimensions = (maximum_number_of_radar_derived_temperature_or_convection_suppression_intervals_plus_one)
+ type = real
+ kind = kind_phys
+[ix_dfi_radar]
+ standard_name = indices_with_radar_derived_temperature_or_convection_suppression_data
+ long_name = indices with radar derived temperature or convection suppression data
+ units = index
+ dimensions = (maximum_number_of_radar_derived_temperature_or_convection_suppression_intervals)
+ type = integer
[shoc_parm(1)]
standard_name = pressure_threshold_for_increased_tke_dissipation
long_name = pressure below which extra TKE diss. is applied in SHOC
@@ -4566,6 +4650,12 @@
units = flag
dimensions = ()
type = logical
+[do_spp]
+ standard_name = do_stochastically_perturbed_parameterizations
+ long_name = flag for stochastic spp option
+ units = flag
+ dimensions = ()
+ type = logical
[lndp_type]
standard_name = control_for_stochastic_land_surface_perturbation
long_name = index for stochastic land surface perturbations type
@@ -4594,6 +4684,56 @@
type = character
kind = len=3
active = (control_for_stochastic_land_surface_perturbation /= 0)
+[n_var_spp]
+ standard_name = number_of_perturbed_spp_schemes
+ long_name = number of perturbed spp schemes
+ units = count
+ dimensions = ()
+ type = integer
+[spp_prt_list]
+ standard_name =magnitude_of_spp_perturbations
+ long_name = magnitude of spp perturbations
+ units = 1
+ dimensions = (number_of_spp_schemes_perturbed)
+ type = real
+ kind = kind_phys
+[spp_var_list]
+ standard_name = perturbed_spp_schemes
+ long_name = perturbed spp schemes
+ units = none
+ dimensions = (number_of_spp_schemes_perturbed)
+ type = character
+ kind = len=3
+[spp_pbl]
+ standard_name = control_for_pbl_spp_perturbations
+ long_name = control for pbl spp perturbations
+ units = count
+ dimensions = ()
+ type = integer
+[spp_sfc]
+ standard_name = control_for_surface_layer_spp_perturbations
+ long_name = control for surface layer spp perturbations
+ units = count
+ dimensions = ()
+ type = integer
+[spp_mp]
+ standard_name = control_for_microphysics_spp_perturbations
+ long_name = control for microphysics spp perturbations
+ units = count
+ dimensions = ()
+ type = integer
+[spp_rad]
+ standard_name = control_for_radiation_spp_perturbations
+ long_name = control for radiation spp perturbations
+ units = count
+ dimensions = ()
+ type = integer
+[spp_gwd]
+ standard_name = control_for_gravity_wave_drag_spp_perturbations
+ long_name = control for gravity wave drag spp perturbations
+ units = count
+ dimensions = ()
+ type = integer
[ntrac]
standard_name = number_of_tracers
long_name = number of tracers
@@ -4720,6 +4860,12 @@
units = index
dimensions = ()
type = integer
+[index_of_process_dfi_radar]
+ standard_name = index_of_radar_derived_microphysics_temperature_forcing_in_cumulative_change_index
+ long_name = index of radar-derived microphysics temperature forcing in second dimension of array cumulative change index
+ units = index
+ dimensions = ()
+ type = integer
[index_of_process_physics]
standard_name = index_of_all_physics_process_in_cumulative_change_index
long_name = index of all physics transport process in second dimension of array cumulative change index
@@ -5058,7 +5204,7 @@
dimensions = ()
type = integer
[ncnvwind]
- standard_name = index_of_enhancement_to_wind_speed_at_surface_adjacent_layer_due_to_convectionin_in_xy_dimensioned_restart_array
+ standard_name = index_of_enhancement_to_wind_speed_at_surface_adjacent_layer_due_to_convection_in_xy_dimensioned_restart_array
long_name = the index of surface wind enhancement due to convection in phy f2d
units =
dimensions = ()
@@ -5180,6 +5326,12 @@
units = flag
dimensions = ()
type = logical
+[lsm_cold_start]
+ standard_name = do_lsm_cold_start
+ long_name = flag to signify LSM is cold-started
+ units = flag
+ dimensions = ()
+ type = logical
[hydrostatic]
standard_name = flag_for_hydrostatic_solver
long_name = flag for hydrostatic solver from dynamics
@@ -5228,7 +5380,7 @@
standard_name = sigma_pressure_hybrid_vertical_coordinate
long_name = vertical sigma coordinate for radiation initialization
units = none
- dimensions = (vertical_interface_dimension_for_radiation)
+ dimensions = (vertical_interface_dimension)
type = real
kind = kind_phys
[dxinv]
@@ -5902,14 +6054,14 @@
type = real
kind = kind_phys
active = (index_of_surface_air_pressure_on_previous_timestep_in_xyz_dimensioned_restart_array > 0)
-[phy_f2d(:,index_of_enhancement_to_wind_speed_at_surface_adjacent_layer_due_to_convectionin_in_xy_dimensioned_restart_array)]
+[phy_f2d(:,index_of_enhancement_to_wind_speed_at_surface_adjacent_layer_due_to_convection_in_xy_dimensioned_restart_array)]
standard_name = enhancement_to_wind_speed_at_surface_adjacent_layer_due_to_convection
long_name = surface wind enhancement due to convection
units = m s-1
dimensions = (horizontal_loop_extent)
type = real
kind = kind_phys
- active = (index_of_enhancement_to_wind_speed_at_surface_adjacent_layer_due_to_convectionin_in_xy_dimensioned_restart_array > 0)
+ active = (index_of_enhancement_to_wind_speed_at_surface_adjacent_layer_due_to_convection_in_xy_dimensioned_restart_array > 0)
[phy_f3d(:,:,index_of_air_temperature_two_timesteps_back_in_xyz_dimensioned_restart_array)]
standard_name = air_temperature_two_timesteps_back
long_name = air temperature two timesteps back
@@ -6244,6 +6396,22 @@
type = real
kind = kind_phys
active = (flag_for_mellor_yamada_janjic_surface_layer_scheme .or. flag_for_mellor_yamada_janjic_pbl_scheme)
+[dfi_radar_tten]
+ standard_name = radar_derived_microphysics_temperature_tendency
+ long_name = radar-derived microphysics temperature tendency
+ units = K s-1
+ dimensions = (horizontal_loop_extent,vertical_layer_dimension,number_of_radar_derived_temperature_or_convection_suppression_intervals)
+ type = real
+ kind = kind_phys
+ active = (number_of_radar_derived_temperature_or_convection_suppression_intervals>0)
+[cap_suppress]
+ standard_name = radar_derived_convection_suppression
+ long_name = radar-derived convection suppression
+ units = unitless
+ dimensions = (horizontal_loop_extent,number_of_radar_derived_temperature_or_convection_suppression_intervals)
+ type = real
+ kind = kind_phys
+ active = (number_of_radar_derived_temperature_or_convection_suppression_intervals>0 .and. flag_for_radar_derived_convection_suppression)
########################################################################
[ccpp-table-properties]
diff --git a/ccpp/driver/GFS_diagnostics.F90 b/ccpp/driver/GFS_diagnostics.F90
index 8d9e67cdb..6e4b62337 100644
--- a/ccpp/driver/GFS_diagnostics.F90
+++ b/ccpp/driver/GFS_diagnostics.F90
@@ -138,7 +138,7 @@ subroutine GFS_externaldiag_populate (ExtDiag, Model, Statein, Stateout, Sfcprop
type(GFS_init_type), intent(in) :: Init_parm
!--- local variables
- integer :: idt, idx, num, nb, nblks, NFXR, idtend, ichem, itrac, iprocess
+ integer :: idt, idx, num, nb, nblks, NFXR, idtend, ichem, itrac, iprocess, i
character(len=2) :: xtra
real(kind=kind_phys), parameter :: cn_one = 1._kind_phys
real(kind=kind_phys), parameter :: cn_100 = 100._kind_phys
@@ -2296,6 +2296,71 @@ subroutine GFS_externaldiag_populate (ExtDiag, Model, Statein, Stateout, Sfcprop
enddo
endif
+ if (Model%do_spp) then
+ idx = idx + 1
+ ExtDiag(idx)%axes = 3
+ ExtDiag(idx)%name = 'spp_wts_pbl'
+ ExtDiag(idx)%desc = 'spp pbl perturbation wts'
+ ExtDiag(idx)%unit = 'm/s'
+ ExtDiag(idx)%mod_name = 'gfs_phys'
+ allocate (ExtDiag(idx)%data(nblks))
+ do nb = 1,nblks
+ ExtDiag(idx)%data(nb)%var3 => Coupling(nb)%spp_wts_pbl(:,:)
+ enddo
+ endif
+
+ if (Model%do_spp) then
+ idx = idx + 1
+ ExtDiag(idx)%axes = 3
+ ExtDiag(idx)%name = 'spp_wts_sfc'
+ ExtDiag(idx)%desc = 'spp sfc perturbation wts'
+ ExtDiag(idx)%unit = 'm/s'
+ ExtDiag(idx)%mod_name = 'gfs_phys'
+ allocate (ExtDiag(idx)%data(nblks))
+ do nb = 1,nblks
+ ExtDiag(idx)%data(nb)%var3 => Coupling(nb)%spp_wts_sfc(:,:)
+ enddo
+ endif
+
+ if (Model%do_spp) then
+ idx = idx + 1
+ ExtDiag(idx)%axes = 3
+ ExtDiag(idx)%name = 'spp_wts_mp'
+ ExtDiag(idx)%desc = 'spp mp perturbation wts'
+ ExtDiag(idx)%unit = 'm/s'
+ ExtDiag(idx)%mod_name = 'gfs_phys'
+ allocate (ExtDiag(idx)%data(nblks))
+ do nb = 1,nblks
+ ExtDiag(idx)%data(nb)%var3 => Coupling(nb)%spp_wts_mp(:,:)
+ enddo
+ endif
+
+ if (Model%do_spp) then
+ idx = idx + 1
+ ExtDiag(idx)%axes = 3
+ ExtDiag(idx)%name = 'spp_wts_gwd'
+ ExtDiag(idx)%desc = 'spp gwd perturbation wts'
+ ExtDiag(idx)%unit = 'm/s'
+ ExtDiag(idx)%mod_name = 'gfs_phys'
+ allocate (ExtDiag(idx)%data(nblks))
+ do nb = 1,nblks
+ ExtDiag(idx)%data(nb)%var3 => Coupling(nb)%spp_wts_gwd(:,:)
+ enddo
+ endif
+
+ if (Model%do_spp) then
+ idx = idx + 1
+ ExtDiag(idx)%axes = 3
+ ExtDiag(idx)%name = 'spp_wts_rad'
+ ExtDiag(idx)%desc = 'spp rad perturbation wts'
+ ExtDiag(idx)%unit = 'm/s'
+ ExtDiag(idx)%mod_name = 'gfs_phys'
+ allocate (ExtDiag(idx)%data(nblks))
+ do nb = 1,nblks
+ ExtDiag(idx)%data(nb)%var3 => Coupling(nb)%spp_wts_rad(:,:)
+ enddo
+ endif
+
if (Model%lndp_type /= 0) then
idx = idx + 1
ExtDiag(idx)%axes = 2
@@ -3476,39 +3541,58 @@ subroutine GFS_externaldiag_populate (ExtDiag, Model, Statein, Stateout, Sfcprop
enddo
end if thompson_extended_diagnostics
- !! Cloud effective radii from Microphysics
- !if (Model%imp_physics == Model%imp_physics_thompson .or. Model%imp_physics == Model%imp_physics_wsm6 .or. Model%imp_physics == Model%imp_physics_fer_hires) then
- ! idx = idx + 1
- ! ExtDiag(idx)%axes = 3
- ! ExtDiag(idx)%name = 'cleffr'
- ! ExtDiag(idx)%desc = 'effective radius of cloud liquid water particle'
- ! ExtDiag(idx)%unit = 'um'
- ! ExtDiag(idx)%mod_name = 'gfs_phys'
- ! allocate (ExtDiag(idx)%data(nblks))
- ! do nb = 1,nblks
- ! ExtDiag(idx)%data(nb)%var3 => Tbd(nb)%phy_f3d(:,:,Model%nleffr)
- ! enddo
- ! idx = idx + 1
- ! ExtDiag(idx)%axes = 3
- ! ExtDiag(idx)%name = 'cieffr'
- ! ExtDiag(idx)%desc = 'effective radius of stratiform cloud ice particle in um'
- ! ExtDiag(idx)%unit = 'um'
- ! ExtDiag(idx)%mod_name = 'gfs_phys'
- ! allocate (ExtDiag(idx)%data(nblks))
- ! do nb = 1,nblks
- ! ExtDiag(idx)%data(nb)%var3 => Tbd(nb)%phy_f3d(:,:,Model%nieffr)
- ! enddo
- ! idx = idx + 1
- ! ExtDiag(idx)%axes = 3
- ! ExtDiag(idx)%name = 'cseffr'
- ! ExtDiag(idx)%desc = 'effective radius of stratiform cloud snow particle in um'
- ! ExtDiag(idx)%unit = 'um'
- ! ExtDiag(idx)%mod_name = 'gfs_phys'
- ! allocate (ExtDiag(idx)%data(nblks))
- ! do nb = 1,nblks
- ! ExtDiag(idx)%data(nb)%var3 => Tbd(nb)%phy_f3d(:,:,Model%nseffr)
- ! enddo
- !endif
+ do i=1,Model%num_dfi_radar
+ idx = idx + 1
+ ExtDiag(idx)%axes = 3
+ if(i>1) then
+ write(ExtDiag(idx)%name,'(A,I0)') 'radar_tten_',i
+ else
+ ExtDiag(idx)%name = 'radar_tten'
+ endif
+ write(ExtDiag(idx)%desc,'(A,I0,A,I0)') 'temperature tendency due to dfi radar tendencies ',i,' of ',Model%num_dfi_radar
+ ExtDiag(idx)%unit = 'K s-1'
+ ExtDiag(idx)%mod_name = 'gfs_phys'
+ ExtDiag(idx)%time_avg = .FALSE.
+
+ allocate (ExtDiag(idx)%data(nblks))
+ do nb = 1,nblks
+ ExtDiag(idx)%data(nb)%var3 => Tbd(nb)%dfi_radar_tten(:,:,i)
+ enddo
+ enddo
+
+ ! Cloud effective radii from Microphysics
+ if (Model%imp_physics == Model%imp_physics_thompson .or. Model%imp_physics == Model%imp_physics_fer_hires) then
+ idx = idx + 1
+ ExtDiag(idx)%axes = 3
+ ExtDiag(idx)%name = 'cleffr'
+ ExtDiag(idx)%desc = 'effective radius of cloud liquid water particle'
+ ExtDiag(idx)%unit = 'um'
+ ExtDiag(idx)%mod_name = 'gfs_phys'
+ allocate (ExtDiag(idx)%data(nblks))
+ do nb = 1,nblks
+ ExtDiag(idx)%data(nb)%var3 => Tbd(nb)%phy_f3d(:,:,Model%nleffr)
+ enddo
+ idx = idx + 1
+ ExtDiag(idx)%axes = 3
+ ExtDiag(idx)%name = 'cieffr'
+ ExtDiag(idx)%desc = 'effective radius of stratiform cloud ice particle in um'
+ ExtDiag(idx)%unit = 'um'
+ ExtDiag(idx)%mod_name = 'gfs_phys'
+ allocate (ExtDiag(idx)%data(nblks))
+ do nb = 1,nblks
+ ExtDiag(idx)%data(nb)%var3 => Tbd(nb)%phy_f3d(:,:,Model%nieffr)
+ enddo
+ idx = idx + 1
+ ExtDiag(idx)%axes = 3
+ ExtDiag(idx)%name = 'cseffr'
+ ExtDiag(idx)%desc = 'effective radius of stratiform cloud snow particle in um'
+ ExtDiag(idx)%unit = 'um'
+ ExtDiag(idx)%mod_name = 'gfs_phys'
+ allocate (ExtDiag(idx)%data(nblks))
+ do nb = 1,nblks
+ ExtDiag(idx)%data(nb)%var3 => Tbd(nb)%phy_f3d(:,:,Model%nseffr)
+ enddo
+ endif
!MYNN
if (Model%do_mynnedmf) then
diff --git a/ccpp/driver/GFS_restart.F90 b/ccpp/driver/GFS_restart.F90
index 07d52a8f0..1ffaed4dc 100644
--- a/ccpp/driver/GFS_restart.F90
+++ b/ccpp/driver/GFS_restart.F90
@@ -60,7 +60,7 @@ subroutine GFS_restart_populate (Restart, Model, Statein, Stateout, Sfcprop, &
!--- local variables
integer :: idx, ndiag_rst
- integer :: ndiag_idx(20)
+ integer :: ndiag_idx(20), itime
integer :: nblks, num, nb, max_rstrt, offset
character(len=2) :: c2 = ''
@@ -115,14 +115,20 @@ subroutine GFS_restart_populate (Restart, Model, Statein, Stateout, Sfcprop, &
endif
! MYNN SFC
if (Model%do_mynnsfclay) then
- Restart%num2d = Restart%num2d + 1
+ Restart%num2d = Restart%num2d + 13
endif
! Thompson aerosol-aware
if (Model%imp_physics == Model%imp_physics_thompson .and. Model%ltaerosol) then
Restart%num2d = Restart%num2d + 2
endif
+ if (Model%do_cap_suppress .and. Model%num_dfi_radar>0) then
+ Restart%num2d = Restart%num2d + Model%num_dfi_radar
+ endif
Restart%num3d = Model%ntot3d
+ if (Model%num_dfi_radar>0) then
+ Restart%num3d = Restart%num3d + Model%num_dfi_radar
+ endif
if(Model%lrefres) then
Restart%num3d = Model%ntot3d+1
endif
@@ -134,7 +140,7 @@ subroutine GFS_restart_populate (Restart, Model, Statein, Stateout, Sfcprop, &
if (Model%imfdeepcnv == 3) then
Restart%num3d = Restart%num3d + 3
endif
- ! MYNN PBL
+ ! MYNN PBL
if (Model%do_mynnedmf) then
Restart%num3d = Restart%num3d + 9
endif
@@ -309,6 +315,66 @@ subroutine GFS_restart_populate (Restart, Model, Statein, Stateout, Sfcprop, &
do nb = 1,nblks
Restart%data(nb,num)%var2p => Sfcprop(nb)%uustar(:)
enddo
+ num = num + 1
+ Restart%name2d(num) = 'mynn_2d_hpbl'
+ do nb = 1,nblks
+ Restart%data(nb,num)%var2p => Tbd(nb)%hpbl(:)
+ enddo
+ num = num + 1
+ Restart%name2d(num) = 'mynn_2d_ustm'
+ do nb = 1,nblks
+ Restart%data(nb,num)%var2p => Sfcprop(nb)%ustm(:)
+ enddo
+ num = num + 1
+ Restart%name2d(num) = 'mynn_2d_zol'
+ do nb = 1,nblks
+ Restart%data(nb,num)%var2p => Sfcprop(nb)%zol(:)
+ enddo
+ num = num + 1
+ Restart%name2d(num) = 'mynn_2d_mol'
+ do nb = 1,nblks
+ Restart%data(nb,num)%var2p => Sfcprop(nb)%mol(:)
+ enddo
+ num = num + 1
+ Restart%name2d(num) = 'mynn_2d_flhc'
+ do nb = 1,nblks
+ Restart%data(nb,num)%var2p => Sfcprop(nb)%flhc(:)
+ enddo
+ num = num + 1
+ Restart%name2d(num) = 'mynn_2d_flqc'
+ do nb = 1,nblks
+ Restart%data(nb,num)%var2p => Sfcprop(nb)%flqc(:)
+ enddo
+ num = num + 1
+ Restart%name2d(num) = 'mynn_2d_chs2'
+ do nb = 1,nblks
+ Restart%data(nb,num)%var2p => Sfcprop(nb)%chs2(:)
+ enddo
+ num = num + 1
+ Restart%name2d(num) = 'mynn_2d_cqs2'
+ do nb = 1,nblks
+ Restart%data(nb,num)%var2p => Sfcprop(nb)%cqs2(:)
+ enddo
+ num = num + 1
+ Restart%name2d(num) = 'mynn_2d_lh'
+ do nb = 1,nblks
+ Restart%data(nb,num)%var2p => Sfcprop(nb)%lh(:)
+ enddo
+ num = num + 1
+ Restart%name2d(num) = 'mynn_2d_hflx'
+ do nb = 1,nblks
+ Restart%data(nb,num)%var2p => Sfcprop(nb)%hflx(:)
+ enddo
+ num = num + 1
+ Restart%name2d(num) = 'mynn_2d_evap'
+ do nb = 1,nblks
+ Restart%data(nb,num)%var2p => Sfcprop(nb)%evap(:)
+ enddo
+ num = num + 1
+ Restart%name2d(num) = 'mynn_2d_qss'
+ do nb = 1,nblks
+ Restart%data(nb,num)%var2p => Sfcprop(nb)%qss(:)
+ enddo
endif
! Thompson aerosol-aware
if (Model%imp_physics == Model%imp_physics_thompson .and. Model%ltaerosol) then
@@ -324,6 +390,23 @@ subroutine GFS_restart_populate (Restart, Model, Statein, Stateout, Sfcprop, &
enddo
endif
+ ! Convection suppression
+ if (Model%do_cap_suppress .and. Model%num_dfi_radar > 0) then
+ do itime=1,Model%dfi_radar_max_intervals
+ if(Model%ix_dfi_radar(itime)>0) then
+ num = num + 1
+ if(itime==1) then
+ Restart%name2d(num) = 'cap_suppress'
+ else
+ write(Restart%name2d(num),'("cap_suppress_",I0)') itime
+ endif
+ do nb = 1,nblks
+ Restart%data(nb,num)%var2p => Tbd(nb)%cap_suppress(:,Model%ix_dfi_radar(itime))
+ enddo
+ endif
+ enddo
+ endif
+
!--- phy_f3d variables
do num = 1,Model%ntot3d
!--- set the variable name
@@ -422,6 +505,24 @@ subroutine GFS_restart_populate (Restart, Model, Statein, Stateout, Sfcprop, &
enddo
endif
+ ! Radar-derived microphysics temperature tendencies
+ if (Model%num_dfi_radar > 0) then
+ do itime=1,Model%dfi_radar_max_intervals
+ if(Model%ix_dfi_radar(itime)>0) then
+ num = num + 1
+ if(itime==1) then
+ Restart%name3d(num) = 'radar_tten'
+ else
+ write(Restart%name3d(num),'("radar_tten_",I0)') itime
+ endif
+ do nb = 1,nblks
+ Restart%data(nb,num)%var3p => Tbd(nb)%dfi_radar_tten( &
+ :,:,Model%ix_dfi_radar(itime))
+ enddo
+ endif
+ enddo
+ endif
+
end subroutine GFS_restart_populate
end module GFS_restart
diff --git a/ccpp/framework b/ccpp/framework
index 64b5afd13..a55457fe3 160000
--- a/ccpp/framework
+++ b/ccpp/framework
@@ -1 +1 @@
-Subproject commit 64b5afd1352d709f3b72734bf960e36024a838d3
+Subproject commit a55457fe3ef66e1651c94f99e72aba3362b105a2
diff --git a/ccpp/physics b/ccpp/physics
index cb33319a0..f0d6dcc7c 160000
--- a/ccpp/physics
+++ b/ccpp/physics
@@ -1 +1 @@
-Subproject commit cb33319a0a42b8b9b8a25b59f19014bad6720b7d
+Subproject commit f0d6dcc7cbc583974e555fa3990486ca1a225a34
diff --git a/ccpp/suites/suite_FV3_GFS_v16_coupled_p8.xml b/ccpp/suites/suite_FV3_GFS_v16_coupled_p8.xml
new file mode 100644
index 000000000..423d37f6d
--- /dev/null
+++ b/ccpp/suites/suite_FV3_GFS_v16_coupled_p8.xml
@@ -0,0 +1,95 @@
+
+
+
+
+
+
+ fv_sat_adj
+
+
+
+
+ GFS_time_vary_pre
+ GFS_rrtmg_setup
+ GFS_rad_time_vary
+ GFS_phys_time_vary
+
+
+
+
+ GFS_suite_interstitial_rad_reset
+ GFS_rrtmg_pre
+ GFS_radiation_surface
+ rrtmg_sw_pre
+ rrtmg_sw
+ rrtmg_sw_post
+ rrtmg_lw_pre
+ rrtmg_lw
+ rrtmg_lw_post
+ GFS_rrtmg_post
+
+
+
+
+ GFS_suite_interstitial_phys_reset
+ GFS_suite_stateout_reset
+ get_prs_fv3
+ GFS_suite_interstitial_1
+ GFS_surface_generic_pre
+ GFS_surface_composites_pre
+ dcyc2t3
+ GFS_surface_composites_inter
+ GFS_suite_interstitial_2
+
+
+
+ sfc_diff
+ GFS_surface_loop_control_part1
+ sfc_nst_pre
+ sfc_nst
+ sfc_nst_post
+ noahmpdrv
+ sfc_cice
+ sfc_sice
+ GFS_surface_loop_control_part2
+
+
+
+ GFS_surface_composites_post
+ sfc_diag
+ sfc_diag_post
+ GFS_surface_generic_post
+ GFS_PBL_generic_pre
+ satmedmfvdifq
+ GFS_PBL_generic_post
+ GFS_GWD_generic_pre
+ ugwpv1_gsldrag
+ ugwpv1_gsldrag_post
+ GFS_GWD_generic_post
+ GFS_suite_stateout_update
+ ozphys_2015
+ h2ophys
+ get_phi_fv3
+ GFS_suite_interstitial_3
+ GFS_DCNV_generic_pre
+ samfdeepcnv
+ GFS_DCNV_generic_post
+ GFS_SCNV_generic_pre
+ samfshalcnv
+ GFS_SCNV_generic_post
+ GFS_suite_interstitial_4
+ cnvc90
+ GFS_MP_generic_pre
+ gfdl_cloud_microphys
+ GFS_MP_generic_post
+ maximum_hourly_diagnostics
+ phys_tend
+
+
+
+
+ GFS_stochastics
+
+
+
+
diff --git a/ccpp/suites/suite_FV3_GFS_v16_p8.xml b/ccpp/suites/suite_FV3_GFS_v16_p8.xml
index ef860e66d..8828f503a 100644
--- a/ccpp/suites/suite_FV3_GFS_v16_p8.xml
+++ b/ccpp/suites/suite_FV3_GFS_v16_p8.xml
@@ -10,7 +10,7 @@
GFS_time_vary_pre
- GFS_rrtmgp_setup
+ GFS_rrtmg_setup
GFS_rad_time_vary
GFS_phys_time_vary
@@ -18,25 +18,15 @@
GFS_suite_interstitial_rad_reset
- GFS_rrtmgp_pre
+ GFS_rrtmg_pre
GFS_radiation_surface
- GFS_rrtmgp_gfdlmp_pre
- GFS_rrtmgp_cloud_overlap_pre
- GFS_cloud_diagnostics
- GFS_rrtmgp_sw_pre
- rrtmgp_sw_gas_optics
- rrtmgp_sw_aerosol_optics
- rrtmgp_sw_cloud_optics
- rrtmgp_sw_cloud_sampling
- rrtmgp_sw_rte
- GFS_rrtmgp_sw_post
- rrtmgp_lw_pre
- rrtmgp_lw_gas_optics
- rrtmgp_lw_aerosol_optics
- rrtmgp_lw_cloud_optics
- rrtmgp_lw_cloud_sampling
- rrtmgp_lw_rte
- GFS_rrtmgp_lw_post
+ rrtmg_sw_pre
+ rrtmg_sw
+ rrtmg_sw_post
+ rrtmg_lw_pre
+ rrtmg_lw
+ rrtmg_lw_post
+ GFS_rrtmg_post
diff --git a/ccpp/suites/suite_FV3_HAFS_v0_hwrf_thompson.xml b/ccpp/suites/suite_FV3_HAFS_v0_thompson_tedmf_gfdlsf.xml
similarity index 93%
rename from ccpp/suites/suite_FV3_HAFS_v0_hwrf_thompson.xml
rename to ccpp/suites/suite_FV3_HAFS_v0_thompson_tedmf_gfdlsf.xml
index 2bc2f8592..652c5eabb 100644
--- a/ccpp/suites/suite_FV3_HAFS_v0_hwrf_thompson.xml
+++ b/ccpp/suites/suite_FV3_HAFS_v0_thompson_tedmf_gfdlsf.xml
@@ -1,6 +1,6 @@
-
+
@@ -13,17 +13,15 @@
GFS_suite_interstitial_rad_reset
-
GFS_rrtmg_pre
GFS_radiation_surface
- rrtmg_lw_pre
- rrtmg_lw
- rrtmg_lw_post
rrtmg_sw_pre
rrtmg_sw
rrtmg_sw_post
+ rrtmg_lw_pre
+ rrtmg_lw
+ rrtmg_lw_post
GFS_rrtmg_post
-
@@ -45,7 +43,7 @@
sfc_nst_pre
sfc_nst
sfc_nst_post
- noahmpdrv
+ lsm_noah
sfc_sice
GFS_surface_loop_control_part2
@@ -56,7 +54,7 @@
sfc_diag_post
GFS_surface_generic_post
GFS_PBL_generic_pre
- hedmf
+ satmedmfvdifq
GFS_PBL_generic_post
GFS_GWD_generic_pre
cires_ugwp
@@ -65,9 +63,9 @@
GFS_suite_stateout_update
ozphys_2015
h2ophys
- GFS_DCNV_generic_pre
get_phi_fv3
GFS_suite_interstitial_3
+ GFS_DCNV_generic_pre
samfdeepcnv
GFS_DCNV_generic_post
GFS_SCNV_generic_pre
@@ -77,7 +75,11 @@
cnvc90
GFS_MP_generic_pre
mp_thompson_pre
+
+
mp_thompson
+
+
mp_thompson_post
GFS_MP_generic_post
maximum_hourly_diagnostics
diff --git a/ccpp/suites/suite_FV3_RAP_noah_sfcdiff_ugwpv1.xml b/ccpp/suites/suite_FV3_RAP_noah_sfcdiff_ugwpv1.xml
new file mode 100644
index 000000000..924398609
--- /dev/null
+++ b/ccpp/suites/suite_FV3_RAP_noah_sfcdiff_ugwpv1.xml
@@ -0,0 +1,94 @@
+
+
+
+
+
+
+ GFS_time_vary_pre
+ GFS_rrtmg_setup
+ GFS_rad_time_vary
+ GFS_phys_time_vary
+
+
+
+
+ GFS_suite_interstitial_rad_reset
+ sgscloud_radpre
+ GFS_rrtmg_pre
+ GFS_radiation_surface
+ rrtmg_sw_pre
+ rrtmg_sw
+ rrtmg_sw_post
+ rrtmg_lw_pre
+ rrtmg_lw
+ sgscloud_radpost
+ rrtmg_lw_post
+ GFS_rrtmg_post
+
+
+
+
+ GFS_suite_interstitial_phys_reset
+ GFS_suite_stateout_reset
+ get_prs_fv3
+ GFS_suite_interstitial_1
+ GFS_surface_generic_pre
+ GFS_surface_composites_pre
+ dcyc2t3
+ GFS_surface_composites_inter
+ GFS_suite_interstitial_2
+
+
+
+ sfc_diff
+ GFS_surface_loop_control_part1
+ sfc_nst_pre
+ sfc_nst
+ sfc_nst_post
+ lsm_noah
+ sfc_sice
+ GFS_surface_loop_control_part2
+
+
+
+ GFS_surface_composites_post
+ sfc_diag
+ sfc_diag_post
+ GFS_surface_generic_post
+ mynnedmf_wrapper
+ GFS_GWD_generic_pre
+ ugwpv1_gsldrag
+ ugwpv1_gsldrag_post
+ GFS_GWD_generic_post
+ GFS_suite_stateout_update
+ ozphys_2015
+ h2ophys
+ get_phi_fv3
+ GFS_suite_interstitial_3
+ GFS_DCNV_generic_pre
+ cu_gf_driver_pre
+ cu_gf_driver
+ GFS_DCNV_generic_post
+ GFS_suite_interstitial_4
+ cnvc90
+ GFS_MP_generic_pre
+ mp_thompson_pre
+
+
+ mp_thompson
+
+
+ mp_thompson_post
+ GFS_MP_generic_post
+ cu_gf_driver_post
+ maximum_hourly_diagnostics
+ phys_tend
+
+
+
+
+ GFS_stochastics
+
+
+
+
diff --git a/ccpp/suites/suite_FV3_RAP_noah_sfcdiff_unified_ugwp.xml b/ccpp/suites/suite_FV3_RAP_noah_sfcdiff_unified_ugwp.xml
new file mode 100644
index 000000000..f639f233f
--- /dev/null
+++ b/ccpp/suites/suite_FV3_RAP_noah_sfcdiff_unified_ugwp.xml
@@ -0,0 +1,90 @@
+
+
+
+
+
+
+ GFS_time_vary_pre
+ GFS_rrtmg_setup
+ GFS_rad_time_vary
+ GFS_phys_time_vary
+
+
+
+
+ GFS_suite_interstitial_rad_reset
+ sgscloud_radpre
+ GFS_rrtmg_pre
+ GFS_radiation_surface
+ rrtmg_sw_pre
+ rrtmg_sw
+ rrtmg_sw_post
+ rrtmg_lw_pre
+ rrtmg_lw
+ sgscloud_radpost
+ rrtmg_lw_post
+ GFS_rrtmg_post
+
+
+
+
+ GFS_suite_interstitial_phys_reset
+ GFS_suite_stateout_reset
+ get_prs_fv3
+ GFS_suite_interstitial_1
+ GFS_surface_generic_pre
+ GFS_surface_composites_pre
+ dcyc2t3
+ GFS_surface_composites_inter
+ GFS_suite_interstitial_2
+
+
+
+ sfc_diff
+ GFS_surface_loop_control_part1
+ sfc_nst_pre
+ sfc_nst
+ sfc_nst_post
+ lsm_noah
+ sfc_sice
+ GFS_surface_loop_control_part2
+
+
+
+ GFS_surface_composites_post
+ sfc_diag
+ sfc_diag_post
+ GFS_surface_generic_post
+ mynnedmf_wrapper
+ GFS_GWD_generic_pre
+ unified_ugwp
+ unified_ugwp_post
+ GFS_GWD_generic_post
+ GFS_suite_stateout_update
+ ozphys_2015
+ h2ophys
+ get_phi_fv3
+ GFS_suite_interstitial_3
+ GFS_DCNV_generic_pre
+ cu_gf_driver_pre
+ cu_gf_driver
+ GFS_DCNV_generic_post
+ GFS_suite_interstitial_4
+ cnvc90
+ GFS_MP_generic_pre
+ mp_thompson_pre
+ mp_thompson
+ mp_thompson_post
+ GFS_MP_generic_post
+ cu_gf_driver_post
+ maximum_hourly_diagnostics
+ phys_tend
+
+
+
+
+ GFS_stochastics
+
+
+
+
diff --git a/ccpp/suites/suite_FV3_RRFS_v1alpha.xml b/ccpp/suites/suite_FV3_RRFS_v1alpha.xml
new file mode 100644
index 000000000..b3622828e
--- /dev/null
+++ b/ccpp/suites/suite_FV3_RRFS_v1alpha.xml
@@ -0,0 +1,84 @@
+
+
+
+
+
+
+ GFS_time_vary_pre
+ GFS_rrtmg_setup
+ GFS_rad_time_vary
+ GFS_phys_time_vary
+
+
+
+
+ GFS_suite_interstitial_rad_reset
+ sgscloud_radpre
+ GFS_rrtmg_pre
+ GFS_radiation_surface
+ rrtmg_sw_pre
+ rrtmg_sw
+ rrtmg_sw_post
+ rrtmg_lw_pre
+ rrtmg_lw
+ sgscloud_radpost
+ rrtmg_lw_post
+ GFS_rrtmg_post
+
+
+
+
+ GFS_suite_interstitial_phys_reset
+ GFS_suite_stateout_reset
+ get_prs_fv3
+ GFS_suite_interstitial_1
+ GFS_surface_generic_pre
+ GFS_surface_composites_pre
+ dcyc2t3
+ GFS_surface_composites_inter
+ GFS_suite_interstitial_2
+
+
+
+ sfc_diff
+ GFS_surface_loop_control_part1
+ sfc_nst_pre
+ sfc_nst
+ sfc_nst_post
+ noahmpdrv
+ sfc_sice
+ GFS_surface_loop_control_part2
+
+
+
+ GFS_surface_composites_post
+ sfc_diag
+ sfc_diag_post
+ GFS_surface_generic_post
+ mynnedmf_wrapper
+ GFS_GWD_generic_pre
+ cires_ugwp
+ cires_ugwp_post
+ GFS_GWD_generic_post
+ GFS_suite_stateout_update
+ ozphys_2015
+ h2ophys
+ get_phi_fv3
+ GFS_suite_interstitial_3
+ GFS_suite_interstitial_4
+ GFS_MP_generic_pre
+ mp_thompson_pre
+ mp_thompson
+ mp_thompson_post
+ GFS_MP_generic_post
+ maximum_hourly_diagnostics
+ phys_tend
+
+
+
+
+ GFS_stochastics
+
+
+
+
diff --git a/cpl/module_block_data.F90 b/cpl/module_block_data.F90
index 02ef0ebc8..9d2cc9192 100644
--- a/cpl/module_block_data.F90
+++ b/cpl/module_block_data.F90
@@ -71,6 +71,7 @@ subroutine block_copy_1d_i4_to_2d_r8(destin_ptr, source_ptr, block, block_index,
if (associated(destin_ptr) .and. associated(source_ptr)) then
factor = 1._kind_phys
if (present(scale_factor)) factor = scale_factor
+!$omp parallel do private(ix,ib,jb,i,j)
do ix = 1, block%blksz(block_index)
ib = block%index(block_index)%ii(ix)
jb = block%index(block_index)%jj(ix)
@@ -105,6 +106,7 @@ subroutine block_copy_1d_to_2d_r8(destin_ptr, source_ptr, block, block_index, sc
if (associated(destin_ptr) .and. associated(source_ptr)) then
factor = 1._kind_phys
if (present(scale_factor)) factor = scale_factor
+!$omp parallel do private(ix,ib,jb,i,j)
do ix = 1, block%blksz(block_index)
ib = block%index(block_index)%ii(ix)
jb = block%index(block_index)%jj(ix)
@@ -144,6 +146,7 @@ subroutine block_copy_1dslice_to_2d_r8(destin_ptr, source_ptr, slice, block, blo
if (slice > 0 .and. slice <= size(source_ptr, dim=2)) then
factor = 1._kind_phys
if (present(scale_factor)) factor = scale_factor
+!$omp parallel do private(ix,ib,jb,i,j)
do ix = 1, block%blksz(block_index)
ib = block%index(block_index)%ii(ix)
jb = block%index(block_index)%jj(ix)
@@ -182,6 +185,7 @@ subroutine block_copy_2d_to_3d_r8(destin_ptr, source_ptr, block, block_index, sc
factor = 1._kind_phys
if (present(scale_factor)) factor = scale_factor
do k = 1, size(source_ptr, dim=2)
+!$omp parallel do private(ix,ib,jb,i,j)
do ix = 1, block%blksz(block_index)
ib = block%index(block_index)%ii(ix)
jb = block%index(block_index)%jj(ix)
@@ -219,6 +223,7 @@ subroutine block_copy_2d_to_2d_r8(destin_ptr, source_ptr, block, block_index, sc
if (associated(destin_ptr) .and. associated(source_ptr)) then
factor = 1._kind_phys
if (present(scale_factor)) factor = scale_factor
+!$omp parallel do private(ix,ib,jb,i,j)
do ix = 1, block%blksz(block_index)
ib = block%index(block_index)%ii(ix)
jb = block%index(block_index)%jj(ix)
@@ -253,6 +258,7 @@ subroutine block_array_copy_2d_to_2d_r8(destin_ptr, source_arr, block, block_ind
if (associated(destin_ptr)) then
factor = 1._kind_phys
if (present(scale_factor)) factor = scale_factor
+!$omp parallel do private(ix,ib,jb,i,j)
do ix = 1, block%blksz(block_index)
ib = block%index(block_index)%ii(ix)
jb = block%index(block_index)%jj(ix)
@@ -290,6 +296,7 @@ subroutine block_copy_3d_to_3d_r8(destin_ptr, source_ptr, block, block_index, sc
factor = 1._kind_phys
if (present(scale_factor)) factor = scale_factor
do k = 1, size(source_ptr, dim=3)
+!$omp parallel do private(ix,ib,jb,i,j)
do ix = 1, block%blksz(block_index)
ib = block%index(block_index)%ii(ix)
jb = block%index(block_index)%jj(ix)
@@ -326,6 +333,7 @@ subroutine block_array_copy_3d_to_3d_r8(destin_ptr, source_arr, block, block_ind
factor = 1._kind_phys
if (present(scale_factor)) factor = scale_factor
do k = 1, size(source_arr, dim=3)
+!$omp parallel do private(ix,ib,jb,i,j)
do ix = 1, block%blksz(block_index)
ib = block%index(block_index)%ii(ix)
jb = block%index(block_index)%jj(ix)
@@ -367,6 +375,7 @@ subroutine block_copy_3dslice_to_3d_r8(destin_ptr, source_ptr, slice, block, blo
factor = 1._kind_phys
if (present(scale_factor)) factor = scale_factor
do k = 1, size(source_ptr, dim=3)
+!$omp parallel do private(ix,ib,jb,i,j)
do ix = 1, block%blksz(block_index)
ib = block%index(block_index)%ii(ix)
jb = block%index(block_index)%jj(ix)
@@ -407,6 +416,7 @@ subroutine block_array_copy_3dslice_to_3d_r8(destin_ptr, source_arr, slice, bloc
factor = 1._kind_phys
if (present(scale_factor)) factor = scale_factor
do k = 1, size(source_arr, dim=3)
+!$omp parallel do private(ix,ib,jb,i,j)
do ix = 1, block%blksz(block_index)
ib = block%index(block_index)%ii(ix)
jb = block%index(block_index)%jj(ix)
@@ -441,6 +451,7 @@ subroutine block_fill_2d_r8(destin_ptr, fill_value, block, block_index, rc)
! -- begin
localrc = ESMF_RC_PTR_NOTALLOC
if (associated(destin_ptr)) then
+!$omp parallel do private(ix,ib,jb,i,j)
do ix = 1, block%blksz(block_index)
ib = block%index(block_index)%ii(ix)
jb = block%index(block_index)%jj(ix)
@@ -474,6 +485,7 @@ subroutine block_fill_3d_r8(destin_ptr, fill_value, block, block_index, rc)
localrc = ESMF_RC_PTR_NOTALLOC
if (associated(destin_ptr)) then
do k = 1, size(destin_ptr, dim=3)
+!$omp parallel do private(ix,ib,jb,i,j)
do ix = 1, block%blksz(block_index)
ib = block%index(block_index)%ii(ix)
jb = block%index(block_index)%jj(ix)
@@ -586,6 +598,7 @@ subroutine block_combine_frac_1d_to_2d_r8(destin_ptr, fract1_ptr, fract2_ptr, bl
localrc = ESMF_RC_PTR_NOTALLOC
if (associated(destin_ptr) .and. &
associated(fract1_ptr) .and. associated(fract2_ptr)) then
+!$omp parallel do private(ix,ib,jb,i,j)
do ix = 1, block%blksz(block_index)
ib = block%index(block_index)%ii(ix)
jb = block%index(block_index)%jj(ix)
diff --git a/cpl/module_cap_cpl.F90 b/cpl/module_cap_cpl.F90
index 47f48ce4d..d69f6c989 100644
--- a/cpl/module_cap_cpl.F90
+++ b/cpl/module_cap_cpl.F90
@@ -6,253 +6,15 @@ module module_cap_cpl
! 12 Mar 2018: J. Wang Pull coupled subroutines from fv3_cap.F90 to this module
!
use ESMF
- use NUOPC
- use module_cplfields, only : FieldInfo
-!
implicit none
+
private
- public clock_cplIntval
- ! public realizeConnectedInternCplField
- public realizeConnectedCplFields
public diagnose_cplFields
!
contains
!-----------------------------------------------------------------------------
- !-----------------------------------------------------------------------------
-
- subroutine clock_cplIntval(gcomp, CF)
-
- type(ESMF_GridComp) :: gcomp
- type(ESMF_Config) :: CF
-!
- real(ESMF_KIND_R8) :: medAtmCouplingIntervalSec
- type(ESMF_Clock) :: fv3Clock
- type(ESMF_TimeInterval) :: fv3Step
- integer :: rc
-!
- call ESMF_ConfigGetAttribute(config=CF, value=medAtmCouplingIntervalSec, &
- label="atm_coupling_interval_sec:", default=-1.0_ESMF_KIND_R8, rc=RC)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) &
- call ESMF_Finalize(endflag=ESMF_END_ABORT)
-
- if (medAtmCouplingIntervalSec > 0._ESMF_KIND_R8) then ! The coupling time step is provided
- call ESMF_TimeIntervalSet(fv3Step, s_r8=medAtmCouplingIntervalSec, rc=RC)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) &
- call ESMF_Finalize(endflag=ESMF_END_ABORT)
- call ESMF_GridCompGet(gcomp, clock=fv3Clock, rc=RC)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) &
- call ESMF_Finalize(endflag=ESMF_END_ABORT)
- call ESMF_ClockSet(fv3Clock, timestep=fv3Step, rc=RC)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) &
- call ESMF_Finalize(endflag=ESMF_END_ABORT)
- endif
-
- end subroutine clock_cplIntval
-
- !-----------------------------------------------------------------------------
-
- subroutine addFieldMetadata(field, key, values, rc)
-
- ! This subroutine implements a preliminary method to provide metadata to
- ! a coupled model that is accessing the field via reference sharing
- ! (NUOPC SharedStatusField=.true.). The method sets a (key, values) pair
- ! in the field's array ESMF_Info object to retrieve an array of strings
- ! encoding metadata.
- !
- ! Such a capability should be implemented in the standard NUOPC connector
- ! for more general applications, possibly providing access to the field's
- ! ESMF_Info object.
-
- type(ESMF_Field) :: field
- character(len=*), intent(in) :: key
- character(len=*), intent(in) :: values(:)
- integer, optional, intent(out) :: rc
-
- ! local variable
- integer :: localrc
- type(ESMF_Array) :: array
- type(ESMF_Info) :: info
-
- ! begin
- if (present(rc)) rc = ESMF_SUCCESS
-
- call ESMF_FieldGet(field, array=array, rc=localrc)
- if (ESMF_LogFoundError(rcToCheck=localrc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__, rcToReturn=rc)) return
- call ESMF_InfoGetFromHost(array, info, rc=localrc)
- if (ESMF_LogFoundError(rcToCheck=localrc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__, rcToReturn=rc)) return
- call ESMF_InfoSet(info, key, values, rc=localrc)
- if (ESMF_LogFoundError(rcToCheck=localrc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__, rcToReturn=rc)) return
-
- end subroutine addFieldMetadata
-
- !-----------------------------------------------------------------------------
-
-#if 0
- subroutine realizeConnectedInternCplField(state, field, standardName, grid, rc)
-
- type(ESMF_State) :: state
- type(ESMF_Field), optional :: field
- character(len=*), optional :: standardName
- type(ESMF_Grid), optional :: grid
- integer, intent(out), optional :: rc
-
- ! local variables
- character(len=80) :: fieldName
- type(ESMF_ArraySpec) :: arrayspec
- integer :: i, localrc
- logical :: isConnected
- real(ESMF_KIND_R8), pointer :: fptr(:,:)
-
- if (present(rc)) rc = ESMF_SUCCESS
-
- fieldName = standardName ! use standard name as field name
-
- !! Create fields using wam2dmesh if they are WAM fields
- isConnected = NUOPC_IsConnected(state, fieldName=fieldName, rc=localrc)
- if (ESMF_LogFoundError(rcToCheck=localrc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__, rcToReturn=rc)) return
-
- if (isConnected) then
-
- field = ESMF_FieldCreate(grid, ESMF_TYPEKIND_R8, name=fieldName, rc=localrc)
- if (ESMF_LogFoundError(rcToCheck=localrc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__, rcToReturn=rc)) return
- call NUOPC_Realize(state, field=field, rc=localrc)
- if (ESMF_LogFoundError(rcToCheck=localrc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__, rcToReturn=rc)) return
-
- call ESMF_FieldGet(field, farrayPtr=fptr, rc=localrc)
- if (ESMF_LogFoundError(rcToCheck=localrc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__, rcToReturn=rc)) return
-
- fptr=0._ESMF_KIND_R8 ! zero out the entire field
- call NUOPC_SetAttribute(field, name="Updated", value="true", rc=localrc)
- if (ESMF_LogFoundError(rcToCheck=localrc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__, rcToReturn=rc)) return
-
- else
- ! remove a not connected Field from State
- call ESMF_StateRemove(state, (/fieldName/), rc=localrc)
- if (ESMF_LogFoundError(rcToCheck=localrc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__, rcToReturn=rc)) return
- endif
-
- end subroutine realizeConnectedInternCplField
-#endif
- !-----------------------------------------------------------------------------
-
- subroutine realizeConnectedCplFields(state, grid, &
- numLevels, numSoilLayers, numTracers, &
- fields_info, state_tag, fieldList, fill_value, rc)
-
- use field_manager_mod, only: MODEL_ATMOS
- use tracer_manager_mod, only: get_number_tracers, get_tracer_names
-
- type(ESMF_State), intent(inout) :: state
- type(ESMF_Grid), intent(in) :: grid
- integer, intent(in) :: numLevels
- integer, intent(in) :: numSoilLayers
- integer, intent(in) :: numTracers
- type(FieldInfo), dimension(:), intent(in) :: fields_info
- character(len=*), intent(in) :: state_tag !< Import or export.
- type(ESMF_Field), dimension(:), intent(out) :: fieldList
- real(ESMF_KIND_R8), optional , intent(in) :: fill_value
- integer, intent(out) :: rc
-
- ! local variables
-
- integer :: item, pos, tracerCount
- logical :: isConnected
- type(ESMF_Field) :: field
- real(ESMF_KIND_R8) :: l_fill_value
- real(ESMF_KIND_R8), parameter :: d_fill_value = 0._ESMF_KIND_R8
- type(ESMF_StateIntent_Flag) :: stateintent
- character(len=32), allocatable, dimension(:) :: tracerNames, tracerUnits
-
- ! begin
- rc = ESMF_SUCCESS
-
- if (present(fill_value)) then
- l_fill_value = fill_value
- else
- l_fill_value = d_fill_value
- end if
-
- ! attach list of tracer names to exported tracer field as metadata
- call ESMF_StateGet(state, stateintent=stateintent, rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
-
- if (stateintent == ESMF_STATEINTENT_EXPORT) then
- call get_number_tracers(MODEL_ATMOS, num_tracers=tracerCount)
- allocate(tracerNames(tracerCount), tracerUnits(tracerCount))
- do item = 1, tracerCount
- call get_tracer_names(MODEL_ATMOS, item, tracerNames(item), units=tracerUnits(item))
- end do
- end if
-
- do item = 1, size(fields_info)
- isConnected = NUOPC_IsConnected(state, fieldName=trim(fields_info(item)%name), rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- if (isConnected) then
- call ESMF_StateGet(state, field=field, itemName=trim(fields_info(item)%name), rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- call ESMF_FieldEmptySet(field, grid=grid, rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- select case (fields_info(item)%type)
- case ('l','layer')
- call ESMF_FieldEmptyComplete(field, typekind=ESMF_TYPEKIND_R8, &
- ungriddedLBound=(/1/), ungriddedUBound=(/numLevels/), rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- case ('i','interface')
- call ESMF_FieldEmptyComplete(field, typekind=ESMF_TYPEKIND_R8, &
- ungriddedLBound=(/1/), ungriddedUBound=(/numLevels+1/), rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- case ('t','tracer')
- call ESMF_FieldEmptyComplete(field, typekind=ESMF_TYPEKIND_R8, &
- ungriddedLBound=(/1, 1/), ungriddedUBound=(/numLevels, numTracers/), rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- if (allocated(tracerNames)) then
- call addFieldMetadata(field, 'tracerNames', tracerNames, rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- end if
- if (allocated(tracerUnits)) then
- call addFieldMetadata(field, 'tracerUnits', tracerUnits, rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- end if
- case ('s','surface')
- call ESMF_FieldEmptyComplete(field, typekind=ESMF_TYPEKIND_R8, rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- case ('g','soil')
- call ESMF_FieldEmptyComplete(field, typekind=ESMF_TYPEKIND_R8, &
- ungriddedLBound=(/1/), ungriddedUBound=(/numSoilLayers/), rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- case default
- call ESMF_LogSetError(ESMF_RC_NOT_VALID, &
- msg="exportFieldType = '"//trim(fields_info(item)%type)//"' not recognized", &
- line=__LINE__, file=__FILE__, rcToReturn=rc)
- return
- end select
- call NUOPC_Realize(state, field=field, rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
-
- ! -- initialize field value
- call ESMF_FieldFill(field, dataFillScheme="const", const1=l_fill_value, rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
-
- ! -- save field
- fieldList(item) = field
- call ESMF_LogWrite('realizeConnectedCplFields '//trim(state_tag)//' Field '//trim(fields_info(item)%name) &
- // ' is connected ', ESMF_LOGMSG_INFO, line=__LINE__, file=__FILE__, rc=rc)
- else
- ! remove a not connected Field from State
- call ESMF_StateRemove(state, (/trim(fields_info(item)%name)/), rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- call ESMF_LogWrite('realizeConnectedCplFields '//trim(state_tag)//' Field '//trim(fields_info(item)%name) &
- // ' is not connected ', ESMF_LOGMSG_INFO, line=__LINE__, file=__FILE__, rc=rc)
- end if
- end do
-
- if (allocated(tracerNames)) deallocate(tracerNames)
- if (allocated(tracerUnits)) deallocate(tracerUnits)
-
- end subroutine realizeConnectedCplFields
-
!-----------------------------------------------------------------------------
subroutine diagnose_cplFields(gcomp, clock_fv3, fcstpe, &
@@ -316,103 +78,6 @@ subroutine diagnose_cplFields(gcomp, clock_fv3, fcstpe, &
end subroutine diagnose_cplFields
- !-----------------------------------------------------------------------------
-
- subroutine ESMFPP_RegridWriteState(state, fileName, timeslice, rc)
-
- type(ESMF_State), intent(in) :: state
- character(len=*), intent(in) :: fileName
- integer, intent(in) :: timeslice
- integer, intent(out) :: rc
-
- ! local
- type(ESMF_Field) :: field
- type(ESMF_Grid) :: outGrid
- integer :: i, icount
- character(64), allocatable :: itemNameList(:)
- type(ESMF_StateItem_Flag), allocatable :: typeList(:)
-
- rc = ESMF_SUCCESS
-
- ! 1degx1deg
- outGrid = ESMF_GridCreate1PeriDimUfrm(maxIndex=(/360,180/), &
- minCornerCoord=(/0.0_ESMF_KIND_R8,-90.0_ESMF_KIND_R8/), &
- maxCornerCoord=(/360.0_ESMF_KIND_R8,90.0_ESMF_KIND_R8/), &
- staggerLocList=(/ESMF_STAGGERLOC_CORNER, ESMF_STAGGERLOC_CENTER/), rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
-
- call ESMF_StateGet(state, itemCount=icount, rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- allocate(typeList(icount), itemNameList(icount))
- call ESMF_StateGet(state, itemTypeList=typeList, itemNameList=itemNameList, rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
-
- do i = 1, icount
- if(typeList(i) == ESMF_STATEITEM_FIELD) then
- call ESMF_LogWrite("RegridWrite Field Name Initiated: "//trim(itemNameList(i)), ESMF_LOGMSG_INFO)
- call ESMF_StateGet(state, itemName=itemNameList(i), field=field, rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- call ESMFPP_RegridWrite(field, outGrid, ESMF_REGRIDMETHOD_BILINEAR, &
- fileName//trim(itemNameList(i))//'.nc', trim(itemNameList(i)), timeslice, rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- call ESMF_LogWrite("RegridWrite Field Name done: "//trim(itemNameList(i)), ESMF_LOGMSG_INFO)
- endif
- enddo
-
- deallocate(typeList, itemNameList)
-
- call ESMF_GridDestroy(outGrid,noGarbage=.true., rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
-
- end subroutine ESMFPP_RegridWriteState
-
- subroutine ESMFPP_RegridWrite(inField, outGrid, regridMethod, fileName, fieldName, timeslice, rc)
-
- ! input arguments
- type(ESMF_Field), intent(in) :: inField
- type(ESMF_Grid), intent(in) :: outGrid
- type(ESMF_RegridMethod_Flag), intent(in) :: regridMethod
- character(len=*), intent(in) :: filename
- character(len=*), intent(in) :: fieldName
- integer, intent(in) :: timeslice
- integer, intent(inout) :: rc
-
- ! local variables
- integer :: srcTermProcessing
- type(ESMF_Routehandle) :: rh
- type(ESMF_Field) :: outField
-
- outField = ESMF_FieldCreate(outGrid, typekind=ESMF_TYPEKIND_R8, rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
-
- ! Perform entire regridding arithmetic on the destination PET
- srcTermProcessing = 0
- ! For other options for the regrid operation, please refer to:
- ! http://www.earthsystemmodeling.org/esmf_releases/last_built/ESMF_refdoc/node5.html#SECTION050366000000000000000
- call ESMF_FieldRegridStore(inField, outField, regridMethod=regridMethod, &
- unmappedaction=ESMF_UNMAPPEDACTION_IGNORE, &
- srcTermProcessing=srcTermProcessing, Routehandle=rh, rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
-
- ! Use fixed ascending order for the sum terms based on their source
- ! sequence index to ensure bit-for-bit reproducibility
- call ESMF_FieldRegrid(inField, outField, Routehandle=rh, &
- termorderflag=ESMF_TERMORDER_SRCSEQ, rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
-
- call ESMF_FieldWrite(outField, fileName, variableName=fieldName, timeslice=timeslice, rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
-
- call ESMF_FieldRegridRelease(routehandle=rh, rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
-
- call ESMF_FieldDestroy(outField,noGarbage=.true., rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
-
- rc = ESMF_SUCCESS
-
- end subroutine ESMFPP_RegridWrite
-
!-----------------------------------------------------------------------------
! This subroutine requires ESMFv8 - for coupled FV3
diff --git a/cpl/module_cplfields.F90 b/cpl/module_cplfields.F90
index 380c49c77..68d6f10d8 100644
--- a/cpl/module_cplfields.F90
+++ b/cpl/module_cplfields.F90
@@ -6,6 +6,7 @@ module module_cplfields
!-----------------------------------------------------------------------------
use ESMF
+ use NUOPC
implicit none
@@ -224,6 +225,7 @@ module module_cplfields
! Methods
public queryImportFields, queryExportFields
public cplFieldGet
+ public realizeConnectedCplFields
!-----------------------------------------------------------------------------
contains
@@ -237,6 +239,8 @@ integer function queryExportFields(fieldname, abortflag)
end function queryExportFields
+!-----------------------------------------------------------------------------
+
integer function queryImportFields(fieldname, abortflag)
character(len=*),intent(in) :: fieldname
@@ -246,6 +250,7 @@ integer function queryImportFields(fieldname, abortflag)
end function queryImportFields
+!-----------------------------------------------------------------------------
integer function queryFieldList(fieldsInfo, fieldname, abortflag)
! returns integer index of first found fieldname in fieldlist
@@ -282,9 +287,9 @@ integer function queryFieldList(fieldsInfo, fieldname, abortflag)
CALL ESMF_Finalize(endflag=ESMF_END_ABORT)
endif
end function queryFieldList
-!
-!------------------------------------------------------------------------------
-!
+
+!-----------------------------------------------------------------------------
+
subroutine cplStateGet(state, fieldList, fieldCount, rc)
character(len=*), intent(in) :: state
@@ -311,6 +316,7 @@ subroutine cplStateGet(state, fieldList, fieldCount, rc)
end subroutine cplStateGet
+!-----------------------------------------------------------------------------
subroutine cplFieldGet(state, name, localDe, &
farrayPtr2d, farrayPtr3d, farrayPtr4d, rc)
@@ -379,6 +385,159 @@ subroutine cplFieldGet(state, name, localDe, &
end do
end subroutine cplFieldGet
+
+
+ subroutine realizeConnectedCplFields(state, grid, &
+ numLevels, numSoilLayers, numTracers, &
+ fields_info, state_tag, fieldList, fill_value, rc)
+
+ use field_manager_mod, only: MODEL_ATMOS
+ use tracer_manager_mod, only: get_number_tracers, get_tracer_names
+
+ type(ESMF_State), intent(inout) :: state
+ type(ESMF_Grid), intent(in) :: grid
+ integer, intent(in) :: numLevels
+ integer, intent(in) :: numSoilLayers
+ integer, intent(in) :: numTracers
+ type(FieldInfo), dimension(:), intent(in) :: fields_info
+ character(len=*), intent(in) :: state_tag !< Import or export.
+ type(ESMF_Field), dimension(:), intent(out) :: fieldList
+ real(ESMF_KIND_R8), optional , intent(in) :: fill_value
+ integer, intent(out) :: rc
+
+ ! local variables
+
+ integer :: item, pos, tracerCount
+ logical :: isConnected
+ type(ESMF_Field) :: field
+ real(ESMF_KIND_R8) :: l_fill_value
+ real(ESMF_KIND_R8), parameter :: d_fill_value = 0._ESMF_KIND_R8
+ type(ESMF_StateIntent_Flag) :: stateintent
+ character(len=32), allocatable, dimension(:) :: tracerNames, tracerUnits
+
+ ! begin
+ rc = ESMF_SUCCESS
+
+ if (present(fill_value)) then
+ l_fill_value = fill_value
+ else
+ l_fill_value = d_fill_value
+ end if
+
+ ! attach list of tracer names to exported tracer field as metadata
+ call ESMF_StateGet(state, stateintent=stateintent, rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+
+ if (stateintent == ESMF_STATEINTENT_EXPORT) then
+ call get_number_tracers(MODEL_ATMOS, num_tracers=tracerCount)
+ allocate(tracerNames(tracerCount), tracerUnits(tracerCount))
+ do item = 1, tracerCount
+ call get_tracer_names(MODEL_ATMOS, item, tracerNames(item), units=tracerUnits(item))
+ end do
+ end if
+
+ do item = 1, size(fields_info)
+ isConnected = NUOPC_IsConnected(state, fieldName=trim(fields_info(item)%name), rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+ if (isConnected) then
+ call ESMF_StateGet(state, field=field, itemName=trim(fields_info(item)%name), rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+ call ESMF_FieldEmptySet(field, grid=grid, rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+ select case (fields_info(item)%type)
+ case ('l','layer')
+ call ESMF_FieldEmptyComplete(field, typekind=ESMF_TYPEKIND_R8, &
+ ungriddedLBound=(/1/), ungriddedUBound=(/numLevels/), rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+ case ('i','interface')
+ call ESMF_FieldEmptyComplete(field, typekind=ESMF_TYPEKIND_R8, &
+ ungriddedLBound=(/1/), ungriddedUBound=(/numLevels+1/), rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+ case ('t','tracer')
+ call ESMF_FieldEmptyComplete(field, typekind=ESMF_TYPEKIND_R8, &
+ ungriddedLBound=(/1, 1/), ungriddedUBound=(/numLevels, numTracers/), rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+ if (allocated(tracerNames)) then
+ call addFieldMetadata(field, 'tracerNames', tracerNames, rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+ end if
+ if (allocated(tracerUnits)) then
+ call addFieldMetadata(field, 'tracerUnits', tracerUnits, rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+ end if
+ case ('s','surface')
+ call ESMF_FieldEmptyComplete(field, typekind=ESMF_TYPEKIND_R8, rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+ case ('g','soil')
+ call ESMF_FieldEmptyComplete(field, typekind=ESMF_TYPEKIND_R8, &
+ ungriddedLBound=(/1/), ungriddedUBound=(/numSoilLayers/), rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+ case default
+ call ESMF_LogSetError(ESMF_RC_NOT_VALID, &
+ msg="exportFieldType = '"//trim(fields_info(item)%type)//"' not recognized", &
+ line=__LINE__, file=__FILE__, rcToReturn=rc)
+ return
+ end select
+ call NUOPC_Realize(state, field=field, rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+
+ ! -- initialize field value
+ call ESMF_FieldFill(field, dataFillScheme="const", const1=l_fill_value, rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+
+ ! -- save field
+ fieldList(item) = field
+ call ESMF_LogWrite('realizeConnectedCplFields '//trim(state_tag)//' Field '//trim(fields_info(item)%name) &
+ // ' is connected ', ESMF_LOGMSG_INFO, line=__LINE__, file=__FILE__, rc=rc)
+ else
+ ! remove a not connected Field from State
+ call ESMF_StateRemove(state, (/trim(fields_info(item)%name)/), rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+ call ESMF_LogWrite('realizeConnectedCplFields '//trim(state_tag)//' Field '//trim(fields_info(item)%name) &
+ // ' is not connected ', ESMF_LOGMSG_INFO, line=__LINE__, file=__FILE__, rc=rc)
+ end if
+ end do
+
+ if (allocated(tracerNames)) deallocate(tracerNames)
+ if (allocated(tracerUnits)) deallocate(tracerUnits)
+
+ end subroutine realizeConnectedCplFields
+
+!-----------------------------------------------------------------------------
+
+ subroutine addFieldMetadata(field, key, values, rc)
+
+ ! This subroutine implements a preliminary method to provide metadata to
+ ! a coupled model that is accessing the field via reference sharing
+ ! (NUOPC SharedStatusField=.true.). The method sets a (key, values) pair
+ ! in the field's array ESMF_Info object to retrieve an array of strings
+ ! encoding metadata.
+ !
+ ! Such a capability should be implemented in the standard NUOPC connector
+ ! for more general applications, possibly providing access to the field's
+ ! ESMF_Info object.
+
+ type(ESMF_Field) :: field
+ character(len=*), intent(in) :: key
+ character(len=*), intent(in) :: values(:)
+ integer, optional, intent(out) :: rc
+
+ ! local variable
+ integer :: localrc
+ type(ESMF_Array) :: array
+ type(ESMF_Info) :: info
+
+ ! begin
+ if (present(rc)) rc = ESMF_SUCCESS
+
+ call ESMF_FieldGet(field, array=array, rc=localrc)
+ if (ESMF_LogFoundError(rcToCheck=localrc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__, rcToReturn=rc)) return
+ call ESMF_InfoGetFromHost(array, info, rc=localrc)
+ if (ESMF_LogFoundError(rcToCheck=localrc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__, rcToReturn=rc)) return
+ call ESMF_InfoSet(info, key, values, rc=localrc)
+ if (ESMF_LogFoundError(rcToCheck=localrc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__, rcToReturn=rc)) return
+
+ end subroutine addFieldMetadata
!
!------------------------------------------------------------------------------
!
diff --git a/fv3_cap.F90 b/fv3_cap.F90
index a256fbdf6..87dbe0e69 100644
--- a/fv3_cap.F90
+++ b/fv3_cap.F90
@@ -16,21 +16,20 @@ module fv3gfs_cap_mod
use ESMF
use NUOPC
use NUOPC_Model, only: model_routine_SS => SetServices, &
+ SetVM, &
routine_Run, &
label_Advertise, &
label_RealizeProvided, &
label_Advance, &
label_CheckImport, &
+ label_SetRunClock, &
label_TimestampExport, &
label_Finalize, &
NUOPC_ModelGet
!
use module_fv3_config, only: quilting, output_fh, &
nfhout, nfhout_hf, nsout, dt_atmos, &
- nfhmax, nfhmax_hf,output_hfmax, &
- output_interval,output_interval_hf, &
- calendar, calendar_type, &
- force_date_from_configure, &
+ calendar, &
cplprint_flag,output_1st_tstep_rst, &
first_kdt
@@ -38,12 +37,11 @@ module fv3gfs_cap_mod
num_files, filename_base, &
wrttasks_per_group, n_group, &
lead_wrttask, last_wrttask, &
- output_grid, output_file, &
nsout_io, iau_offset, lflname_fulltime
!
use module_fcst_grid_comp, only: fcstSS => SetServices, &
fcstGrid, numLevels, numSoilLayers, &
- numTracers
+ numTracers, mygrid, grid_number_on_all_pets
use module_wrt_grid_comp, only: wrtSS => SetServices
!
@@ -51,8 +49,8 @@ module fv3gfs_cap_mod
nImportFields, importFields, importFieldsInfo, &
importFieldsValid, queryImportFields
- use module_cap_cpl, only: realizeConnectedCplFields, &
- clock_cplIntval, diagnose_cplFields
+ use module_cplfields, only: realizeConnectedCplFields
+ use module_cap_cpl, only: diagnose_cplFields
use atmos_model_mod, only: setup_exportdata
@@ -62,7 +60,6 @@ module fv3gfs_cap_mod
!
!-----------------------------------------------------------------------
!
- type(ESMF_Clock),save :: clock_fv3
type(ESMF_GridComp) :: fcstComp
type(ESMF_State) :: fcstState
@@ -138,6 +135,14 @@ subroutine SetServices(gcomp, rc)
specPhaseLabel="phase2", specRoutine=ModelAdvance_phase2, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+ ! specializations to set fv3 cap run clock (model clock)
+ call ESMF_MethodRemove(gcomp, label=label_SetRunClock, rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+
+ call NUOPC_CompSpecialize(gcomp, specLabel=label_SetRunClock, &
+ specRoutine=ModelSetRunClock, rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+
! specializations required to support 'inline' run sequences
call NUOPC_CompSpecialize(gcomp, specLabel=label_CheckImport, &
specPhaseLabel="phase1", specRoutine=fv3_checkimport, rc=rc)
@@ -173,40 +178,45 @@ subroutine InitializeAdvertise(gcomp, rc)
character(240) :: msgString
logical :: isPresent, isSet
type(ESMF_VM) :: vm, fcstVM
- type(ESMF_Time) :: currTime, startTime, stopTime
- type(ESMF_TimeInterval) :: RunDuration, timeStep, rsthour, IAU_offsetTI
+ type(ESMF_Time) :: currTime, startTime
+ type(ESMF_TimeInterval) :: timeStep, rsthour
type(ESMF_Config) :: cf
type(ESMF_RegridMethod_Flag) :: regridmethod
- type(ESMF_TimeInterval) :: earthStep
- integer(ESMF_KIND_I4) :: nhf, nrg
- integer,dimension(6) :: date, date_init
- integer :: i, j, k, io_unit, urc, ierr, ist
+ integer :: i, j, k, urc, ist
integer :: noutput_fh, nfh, nfh2
integer :: petcount
- integer :: num_output_file
+ integer :: nfhmax_hf
+ real :: nfhmax
real :: output_startfh, outputfh, outputfh2(2)
- logical :: opened, loutput_fh, lfreq
+ logical :: loutput_fh, lfreq
character(ESMF_MAXSTR) :: name
integer,dimension(:), allocatable :: petList, fcstPetList, originPetList, targetPetList
character(len=esmf_maxstr),allocatable :: fcstItemNameList(:)
type(ESMF_StateItem_Flag), allocatable :: fcstItemTypeList(:)
character(20) :: cwrtcomp
integer :: isrcTermProcessing
+ type(ESMF_Info) :: parentInfo, childInfo
character(len=*),parameter :: subname='(fv3_cap:InitializeAdvertise)'
- real(kind=8) :: MPI_Wtime, timewri, timeis, timerhs
+ real(kind=8) :: MPI_Wtime, timeis, timerhs
!
!------------------------------------------------------------------------
!
rc = ESMF_SUCCESS
timeis = MPI_Wtime()
+ call ESMF_GridCompGet(gcomp,name=name,vm=vm,rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+
+ call ESMF_VMGet(vm, petCount=petcount, localpet=mype, rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+
! query for importState and exportState
call NUOPC_ModelGet(gcomp, driverClock=clock, importState=importState, exportState=exportState, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- call ESMF_AttributeGet(gcomp, name="ProfileMemory", value=value, defaultValue="true", &
+ call ESMF_AttributeGet(gcomp, name="ProfileMemory", value=value, defaultValue="false", &
convention="NUOPC", purpose="Instance", rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
profile_memory = (trim(value)/="false")
@@ -228,18 +238,6 @@ subroutine InitializeAdvertise(gcomp, rc)
write(msgString,'(A,i6)') trim(subname)//' dbug = ',dbug
call ESMF_LogWrite(trim(msgString), ESMF_LOGMSG_INFO, rc=rc)
- call ESMF_GridCompGet(gcomp,name=name,vm=vm,rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
-
- call ESMF_VMGet(vm, petCount=petcount, localpet=mype, rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
-
-! print *,'in fv3_cap,initAdvertize,name=',trim(name),'petcount=',petcount,'mype=',mype
-!
-! create an instance clock for fv3
- clock_fv3 = ESMF_ClockCreate(clock, rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
-!
!------------------------------------------------------------------------
! get config variables
!
@@ -265,8 +263,8 @@ subroutine InitializeAdvertise(gcomp, rc)
noutput_fh = ESMF_ConfigGetLen(config=CF, label ='output_fh:',rc=rc)
- if(mype == 0) print *,'af nems config,quilting=',quilting,'calendar=', trim(calendar),' iau_offset=',iau_offset, &
- 'noutput_fh=',noutput_fh
+ if(mype == 0) print *,'af nems config,quilting=',quilting,' calendar=', trim(calendar),' iau_offset=',iau_offset, &
+ ' noutput_fh=',noutput_fh
!
nfhout = 0 ; nfhmax_hf = 0 ; nfhout_hf = 0 ; nsout = 0
if ( quilting ) then
@@ -282,9 +280,8 @@ subroutine InitializeAdvertise(gcomp, rc)
label ='isrcTermProcessing:',rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- if(mype == 0) print *,'af nems config,quilting=',quilting,'write_groups=', &
- write_groups,wrttasks_per_group,'calendar=',trim(calendar),'calendar_type=',calendar_type, &
- 'isrcTermProcessing=', isrcTermProcessing
+ if(mype == 0) print *,'af nems config,quilting=',quilting,' write_groups=', &
+ write_groups,wrttasks_per_group,' isrcTermProcessing=', isrcTermProcessing
!
call ESMF_ConfigGetAttribute(config=CF,value=num_files, &
label ='num_files:',rc=rc)
@@ -297,33 +294,6 @@ subroutine InitializeAdvertise(gcomp, rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
enddo
- allocate(output_file(num_files))
- num_output_file = ESMF_ConfigGetLen(config=CF, label ='output_file:',rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- if (num_files == num_output_file) then
- call ESMF_ConfigGetAttribute(CF,valueList=output_file,label='output_file:', &
- count=num_files, rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- do i = 1, num_files
- if(output_file(i) /= "netcdf" .and. output_file(i) /= "netcdf_parallel") then
- write(0,*)"fv3_cap.F90: only netcdf and netcdf_parallel are allowed for multiple values of output_file"
- call ESMF_Finalize(endflag=ESMF_END_ABORT)
- endif
- enddo
- else if ( num_output_file == 1) then
- call ESMF_ConfigGetAttribute(CF,valuelist=output_file,label='output_file:', count=1, rc=rc)
- output_file(1:num_files) = output_file(1)
- else
- output_file(1:num_files) = 'netcdf'
- endif
- if(mype == 0) then
- print *,'af nems config,num_files=',num_files
- do i=1,num_files
- print *,'num_file=',i,'filename_base= ',trim(filename_base(i)),&
- ' output_file= ',trim(output_file(i))
- enddo
- endif
-!
! variables for output
call ESMF_ConfigGetAttribute(config=CF, value=nfhout, label ='nfhout:', default=-1,rc=rc)
call ESMF_ConfigGetAttribute(config=CF, value=nfhmax_hf,label ='nfhmax_hf:',default=-1,rc=rc)
@@ -338,75 +308,9 @@ subroutine InitializeAdvertise(gcomp, rc)
call ESMF_ConfigGetAttribute(config=CF, value=dt_atmos, label ='dt_atmos:', rc=rc)
call ESMF_ConfigGetAttribute(config=CF, value=nfhmax, label ='nhours_fcst:',rc=rc)
if(mype == 0) print *,'af nems config,dt_atmos=',dt_atmos,'nfhmax=',nfhmax
- call ESMF_TimeIntervalSet(timeStep,s=dt_atmos,rc=rc)
- call ESMF_ClockSet(clock_fv3,timeStep=timeStep, rc=rc)
-!
-!------------------------------------------------------------------------
-! may need to set currTime for restart
-!
- call ESMF_ClockGet(clock_fv3, currTime=currTime, StartTime=startTime, &
- RunDuration=RunDuration, rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
-
- stopTime = startTime + RunDuration
-! *** read restart time from restart file
- do i=751,899
- inquire(i, opened=opened)
- if(.not. opened)then
- io_unit = i
- exit
- endif
- enddo
-!
- date = 0 ; date_init = 0
- force_date_from_configure = .true.
-!
- open(unit=io_unit, file=trim('INPUT/coupler.res'),status="old",err=998 )
- read (io_unit,*,err=999) calendar_type
- read (io_unit,*) date_init
- read (io_unit,*) date
- close(io_unit)
- force_date_from_configure = .false.
-!
- if(date(1) == 0 .and. date_init(1) /= 0) date = date_init
- if(mype == 0) print *,'bf clock_fv3,date=',date,'date_init=',date_init
-
- call ESMF_VMbroadcast(vm, date, 6, 0)
- call ESMF_TimeSet(time=currTime,yy=date(1),mm=date(2),dd=date(3),h=date(4), &
- m=date(5),s=date(6),rc=rc)
+ call ESMF_TimeIntervalSet(timeStep, s=dt_atmos, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
-999 continue
-998 continue
-! if(mype==0) print *,'final date =',date,'date_init=',date_init
-
-!reset currTime in clock
- call ESMF_ClockSet(clock_fv3, currTime=currTime, startTime=startTime, &
- stopTime=stopTime, timeStep=timeStep, rc=rc)
-
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
-
- !
- !Under NUOPC, the EARTH driver clock is a separate instance from the
- ! - fv3 clock. However, the fv3 clock may have been reset from restart
- ! - therefore the EARTH driver clock must also be adjusted.
- ! - Affected: currTime, timeStep
- call ESMF_ClockGet(clock, timeStep=earthStep, rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, &
- line=__LINE__, file=__FILE__)) call ESMF_Finalize(endflag=ESMF_END_ABORT)
-
- if (earthStep > (stopTime-currTime)) earthStep = stopTime - currTime
- call ESMF_ClockSet(clock, currTime=currTime, timeStep=earthStep, rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, &
- line=__LINE__, file=__FILE__)) call ESMF_Finalize(endflag=ESMF_END_ABORT)
-
- ! Set fv3 component clock as copy of EARTH clock.
- call NUOPC_CompSetClock(gcomp, clock, rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, &
- line=__LINE__, file=__FILE__)) call ESMF_Finalize(endflag=ESMF_END_ABORT)
-
- ! Read in the FV3 coupling interval
- call clock_cplIntval(gcomp, CF)
first_kdt = 1
if( output_1st_tstep_rst) then
@@ -436,6 +340,18 @@ subroutine InitializeAdvertise(gcomp, rc)
fcstComp = ESMF_GridCompCreate(petList=fcstPetList, name='fv3_fcst', rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
!
+ ! copy attributes from fv3cap component to fcstComp
+ call ESMF_InfoGetFromHost(gcomp, info=parentInfo, rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+ call ESMF_InfoGetFromHost(fcstComp, info=childInfo, rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+ call ESMF_InfoUpdate(lhs=childInfo, rhs=parentInfo, rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+
+ ! use the generic SetVM method to do resource and threading control
+ call ESMF_GridCompSetVM(fcstComp, SetVM, userRc=urc, rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+ if (ESMF_LogFoundError(rcToCheck=urc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__, rcToReturn=rc)) return
call ESMF_GridCompSetServices(fcstComp, fcstSS, userRc=urc, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
if (ESMF_LogFoundError(rcToCheck=urc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__, rcToReturn=rc)) return
@@ -449,12 +365,12 @@ subroutine InitializeAdvertise(gcomp, rc)
! call fcst Initialize (including creating fcstgrid and fcst fieldbundle)
call ESMF_GridCompInitialize(fcstComp, exportState=fcstState, &
- clock=clock_fv3, userRc=urc, rc=rc)
+ clock=clock, userRc=urc, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
if (ESMF_LogFoundError(rcToCheck=urc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__, rcToReturn=rc)) return
!
-! reconcile the fcstComp's import state
+! reconcile the fcstComp's export state
call ESMF_StateReconcile(fcstState, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
!
@@ -483,7 +399,9 @@ subroutine InitializeAdvertise(gcomp, rc)
! pull out the item names and item types from fcstState
call ESMF_StateGet(fcstState, itemNameList=fcstItemNameList, &
- itemTypeList=fcstItemTypeList, rc=rc)
+ itemTypeList=fcstItemTypeList, &
+ !itemorderflag=ESMF_ITEMORDER_ADDORDER, &
+ rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
! loop over all items in the fcstState and collect all FieldBundles
@@ -502,9 +420,6 @@ subroutine InitializeAdvertise(gcomp, rc)
return
endif
enddo
-!
-! set up ESMF time interval at center of iau window
- call ESMF_TimeIntervalSet(IAU_offsetTI, h=iau_offset, rc=rc)
!
k = num_pes_fcst
timerhs = MPI_Wtime()
@@ -526,6 +441,17 @@ subroutine InitializeAdvertise(gcomp, rc)
! print *,'af wrtComp(i)=',i,'name=',trim(cwrtcomp),'rc=',rc
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+! copy attributes from fv3cap component to wrtComp
+ call ESMF_InfoGetFromHost(wrtComp(i), info=childInfo, rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+ call ESMF_InfoUpdate(lhs=childInfo, rhs=parentInfo, rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+
+! use the generic SetVM method to do resource and threading control
+ call ESMF_GridCompSetVM(wrtComp(i), SetVM, userRc=urc, rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+ if (ESMF_LogFoundError(rcToCheck=urc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__, rcToReturn=rc)) return
+
! call into wrtComp(i) SetServices
call ESMF_GridCompSetServices(wrtComp(i), wrtSS, userRc=urc, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
@@ -550,7 +476,7 @@ subroutine InitializeAdvertise(gcomp, rc)
! call into wrtComp(i) Initialize
call ESMF_GridCompInitialize(wrtComp(i), importState=wrtstate(i), &
- clock=clock_fv3, phase=1, userRc=urc, rc=rc)
+ clock=clock, phase=1, userRc=urc, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
if (ESMF_LogFoundError(rcToCheck=urc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__, rcToReturn=rc)) return
@@ -603,10 +529,8 @@ subroutine InitializeAdvertise(gcomp, rc)
if (i==1) then
! this is a Store() for the first wrtComp -> must do the Store()
- timewri = MPI_Wtime()
-
- call ESMF_FieldBundleRegridStore(fcstFB(j), wrtFB(j,i), &
- regridMethod=regridmethod, routehandle=routehandle(j,i), &
+ call ESMF_FieldBundleRegridStore(fcstFB(j), wrtFB(j,1), &
+ regridMethod=regridmethod, routehandle=routehandle(j,1), &
unmappedaction=ESMF_UNMAPPEDACTION_IGNORE, &
srcTermProcessing=isrcTermProcessing, rc=rc)
@@ -633,10 +557,10 @@ subroutine InitializeAdvertise(gcomp, rc)
endif
write(msgString,"(A,I2.2,',',I2.2,A)") "... returned from wrtFB(",j,i, ") FieldBundleRegridStore()."
call ESMF_LogWrite(msgString, ESMF_LOGMSG_INFO, rc=rc)
- enddo
+ enddo ! j=1, FBcount
! end write_groups
- enddo
+ enddo ! i=1, write_groups
if(mype==0) print *,'in fv3cap init, time wrtcrt/regrdst',MPI_Wtime()-timerhs
deallocate(petList)
deallocate(originPetList)
@@ -660,7 +584,7 @@ subroutine InitializeAdvertise(gcomp, rc)
!--- use nsout for output frequency nsout*dt_atmos
nfh = 0
if( nfhmax > output_startfh ) nfh = nint((nfhmax-output_startfh)/(nsout*dt_atmos/3600.))+1
- if(nfh >0) then
+ if(nfh >0) then
allocate(output_fh(nfh))
if( output_startfh == 0) then
output_fh(1) = dt_atmos/3600.
@@ -746,7 +670,7 @@ subroutine InitializeAdvertise(gcomp, rc)
endif
do i=2,nfh
output_fh(i) = (i-1)*outputfh2(1) + output_startfh
- ! Except fh000, which is the first time output, if any other of the
+ ! Except fh000, which is the first time output, if any other of the
! output time is not integer hour, set lflname_fulltime to be true, so the
! history file names will contain the full time stamp (HHH-MM-SS).
if(.not.lflname_fulltime) then
@@ -764,7 +688,7 @@ subroutine InitializeAdvertise(gcomp, rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
if( output_startfh == 0) then
! If the output time in output_fh array contains first time stamp output,
- ! check the rest of output time, otherwise, check all the output time.
+ ! check the rest of output time, otherwise, check all the output time.
! If any of them is not integer hour, the history file names will
! contain the full time stamp (HHH-MM-SS)
ist = 1
@@ -790,7 +714,7 @@ subroutine InitializeAdvertise(gcomp, rc)
endif
endif
endif ! end loutput_fh
- endif
+ endif
if(mype==0) print *,'output_fh=',output_fh(1:size(output_fh)),'lflname_fulltime=',lflname_fulltime
!
! --- advertise Fields in importState and exportState -------------------
@@ -824,10 +748,9 @@ subroutine InitializeRealize(gcomp, rc)
integer, intent(out) :: rc
! local variables
- character(len=*),parameter :: subname='(fv3gfs_cap:InitializeRealize)'
- type(ESMF_State) :: importState, exportState
- logical :: isPetLocal
- integer :: n
+ character(len=*),parameter :: subname='(fv3gfs_cap:InitializeRealize)'
+ type(ESMF_State) :: importState, exportState
+ logical :: isPetLocal
rc = ESMF_SUCCESS
@@ -843,7 +766,7 @@ subroutine InitializeRealize(gcomp, rc)
if (isPetLocal) then
! -- realize connected fields in exportState
- call realizeConnectedCplFields(exportState, fcstGrid, &
+ call realizeConnectedCplFields(exportState, fcstGrid(mygrid), &
numLevels, numSoilLayers, numTracers, &
exportFieldsInfo, 'FV3 Export', exportFields, 0.0_ESMF_KIND_R8, rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
@@ -853,7 +776,7 @@ subroutine InitializeRealize(gcomp, rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
! -- realize connected fields in importState
- call realizeConnectedCplFields(importState, fcstGrid, &
+ call realizeConnectedCplFields(importState, fcstGrid(mygrid), &
numLevels, numSoilLayers, numTracers, &
importFieldsInfo, 'FV3 Import', importFields, 9.99e20_ESMF_KIND_R8, rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
@@ -869,88 +792,18 @@ subroutine ModelAdvance(gcomp, rc)
type(ESMF_GridComp) :: gcomp
integer, intent(out) :: rc
- ! local variables
- type(ESMF_Clock) :: clock
- type(ESMF_Time) :: currTime, startTime, stopTime
- type(ESMF_TimeInterval) :: timeStep
-
- integer :: i, urc
- character(len=*),parameter :: subname='(fv3_cap:ModelAdvance)'
- character(240) :: msgString
- character(240) :: startTime_str, currTime_str, stopTime_str, timeStep_str
-
!-----------------------------------------------------------------------------
rc = ESMF_SUCCESS
if (profile_memory) call ESMF_VMLogMemInfo("Entering FV3 ModelAdvance: ")
- ! Because of the way that the internal Clock was set in SetClock(),
- ! its timeStep is likely smaller than the parent timeStep. As a consequence
- ! the time interval covered by a single parent timeStep will result in
- ! multiple calls to the ModelAdvance() routine. Every time the currTime
- ! will come in by one internal timeStep advanced. This goes until the
- ! stopTime of the internal Clock has been reached.
-
- call ESMF_ClockPrint(clock_fv3, options="currTime", &
- preString="------>Advancing FV3 from: ", unit=msgString, rc=rc)
+ call ModelAdvance_phase1(gcomp, rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- call ESMF_LogWrite(msgString, ESMF_LOGMSG_INFO, rc=rc)
+ call ModelAdvance_phase2(gcomp, rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
-!-----------------------------------------------------------------------
-!*** Use the internal Clock set by NUOPC layer for FV3 but update stopTime
-!-----------------------------------------------------------------------
-
- ! Component internal Clock gets updated per NUOPC rules
- call ESMF_GridCompGet(gcomp, clock=clock, rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
-
- ! The stopTime will be updated to be the next coupling time
- call ESMF_ClockGet(clock, currTime=currTime, stopTime=stopTime, rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
-
- ! Set the coupling time to be stopTime in Clock that FV3 core uses
- call ESMF_ClockSet(clock_fv3, currTime=currTime, stopTime=stopTime, rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
-
- call ESMF_ClockPrint(clock_fv3, options="currTime", &
- preString="entering FV3_ADVANCE with clock_fv3 current: ", &
- unit=msgString)
- call ESMF_LogWrite(msgString, ESMF_LOGMSG_INFO)
- call ESMF_ClockPrint(clock_fv3, options="startTime", &
- preString="entering FV3_ADVANCE with clock_fv3 start: ", &
- unit=msgString)
- call ESMF_LogWrite(msgString, ESMF_LOGMSG_INFO)
- call ESMF_ClockPrint(clock_fv3, options="stopTime", &
- preString="entering FV3_ADVANCE with clock_fv3 stop: ", &
- unit=msgString)
- call ESMF_LogWrite(msgString, ESMF_LOGMSG_INFO)
-
- ! call ESMF_ClockGet(clock_fv3, startTime=startTime, currTime=currTime, &
- ! timeStep=timeStep, stopTime=stopTime, rc=rc)
- ! if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
-
- ! call ESMF_TimeGet(startTime, timestring=startTime_str, rc=rc)
- ! call ESMF_TimeGet(currTime, timestring=currTime_str, rc=rc)
- ! call ESMF_TimeGet(stopTime, timestring=stopTime_str, rc=rc)
- ! call ESMF_TimeIntervalGet(timeStep, timestring=timeStep_str, rc=rc)
-
-!
-!-----------------------------------------------------------------------------
-!*** integration loop
-
- integrate: do while(.NOT.ESMF_ClockIsStopTime(clock_fv3, rc=rc))
-
- call ModelAdvance_phase1(gcomp, rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
-
- call ModelAdvance_phase2(gcomp, rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
-
- enddo integrate
-!
if (profile_memory) call ESMF_VMLogMemInfo("Leaving FV3 ModelAdvance: ")
end subroutine ModelAdvance
@@ -962,105 +815,42 @@ subroutine ModelAdvance_phase1(gcomp, rc)
integer, intent(out) :: rc
! local variables
- type(ESMF_State) :: importState, exportState
type(ESMF_Clock) :: clock
- type(ESMF_Time) :: currTime
- type(ESMF_TimeInterval) :: timeStep
- type(ESMF_Time) :: startTime, stopTime
-
integer :: urc
logical :: fcstpe
character(len=*),parameter :: subname='(fv3_cap:ModelAdvance_phase1)'
character(240) :: msgString
- integer :: date(6)
-
!-----------------------------------------------------------------------------
rc = ESMF_SUCCESS
if(profile_memory) call ESMF_VMLogMemInfo("Entering FV3 ModelAdvance_phase1: ")
- fcstpe = .false.
- if( mype < num_pes_fcst ) fcstpe = .true.
-
- ! Expecting to be called by NUOPC run method exactly once for every coupling
- ! step.
- ! Also expecting the coupling step to be identical to the timeStep for
- ! clock_fv3.
-
- call ESMF_ClockPrint(clock_fv3, options="currTime", &
- preString="------>Advancing FV3 phase1 from: ", unit=msgString, rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
-
- call ESMF_LogWrite(msgString, ESMF_LOGMSG_INFO, rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
-
-!-----------------------------------------------------------------------
-!*** Use the internal Clock set by NUOPC layer for FV3 but update stopTime
-!-----------------------------------------------------------------------
-
- ! Component internal Clock gets updated per NUOPC rules
call ESMF_GridCompGet(gcomp, clock=clock, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- ! The stopTime will be updated to be the next external coupling time
- call ESMF_ClockGet(clock, currTime=currTime, stopTime=stopTime, rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
-
- ! Set the FV3-OCN coupling time to be stopTime in Clock that FV3 core uses
- !call ESMF_ClockSet(clock_fv3, currTime=currTime, stopTime=stopTime, rc=rc)
- call ESMF_ClockSet(clock_fv3, stopTime=stopTime, rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
-
- call ESMF_ClockPrint(clock_fv3, options="currTime", &
- preString="entering FV3_ADVANCE phase1 with clock_fv3 current: ", &
+ call ESMF_ClockPrint(clock, options="currTime", &
+ preString="entering FV3_ADVANCE phase1 with clock current: ", &
unit=msgString)
call ESMF_LogWrite(msgString, ESMF_LOGMSG_INFO)
- call ESMF_ClockPrint(clock_fv3, options="startTime", &
- preString="entering FV3_ADVANCE phase1 with clock_fv3 start: ", &
+ call ESMF_ClockPrint(clock, options="startTime", &
+ preString="entering FV3_ADVANCE phase1 with clock start: ", &
unit=msgString)
call ESMF_LogWrite(msgString, ESMF_LOGMSG_INFO)
- call ESMF_ClockPrint(clock_fv3, options="stopTime", &
- preString="entering FV3_ADVANCE phase1 with clock_fv3 stop: ", &
+ call ESMF_ClockPrint(clock, options="stopTime", &
+ preString="entering FV3_ADVANCE phase1 with clock stop: ", &
unit=msgString)
call ESMF_LogWrite(msgString, ESMF_LOGMSG_INFO)
- call ESMF_ClockGet(clock_fv3, startTime=startTime, currTime=currTime, &
- timeStep=timeStep, stopTime=stopTime, rc=rc)
+ call ESMF_GridCompRun(fcstComp, exportState=fcstState, clock=clock, phase=1, userRc=urc, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
-
-! if(mype==0) print *,'total steps=', nint((stopTime-startTime)/timeStep)
-! if(mype==lead_wrttask(1)) print *,'on wrt lead,total steps=', nint((stopTime-startTime)/timeStep)
- call ESMF_TimeGet(time=stopTime,yy=date(1),mm=date(2),dd=date(3),h=date(4), &
- m=date(5),s=date(6),rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
-! if(mype==0) print *,'af clock,stop date=',date
-! if(mype==lead_wrttask(1)) print *,'on wrt lead,af clock,stop date=',date
- call ESMF_TimeIntervalGet(timeStep,yy=date(1),mm=date(2),d=date(3),h=date(4), &
- m=date(5),s=date(6),rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
-! if(mype==0) print *,'af clock,timestep date=',date
-! if(mype==lead_wrttask(1)) print *,'on wrt lead,af clock,timestep date=',date
-
- call ESMF_LogWrite('Model Advance phase1: before fcstcomp run ', ESMF_LOGMSG_INFO, rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
-
- call ESMF_GridCompRun(fcstComp, exportState=fcstState, clock=clock_fv3, &
- phase=1, userRc=urc, rc=rc)
- if (rc /= ESMF_SUCCESS) then
- if(mype==0) print *,'after fcstComp phase1 rc=',rc
- endif
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
-
if (ESMF_LogFoundError(rcToCheck=urc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__, rcToReturn=rc)) return
- call ESMF_LogWrite('Model Advance phase1: after fcstcomp run ', ESMF_LOGMSG_INFO, rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
-
- ! assign import_data called during phase=1
if( dbug > 0 .or. cplprint_flag ) then
- call diagnose_cplFields(gcomp, clock_fv3, fcstpe, cplprint_flag, dbug, 'import')
+ fcstpe = .false.
+ if( mype < num_pes_fcst ) fcstpe = .true.
+ call diagnose_cplFields(gcomp, clock, fcstpe, cplprint_flag, dbug, 'import')
endif
if (profile_memory) call ESMF_VMLogMemInfo("Leaving FV3 ModelAdvance_phase1: ")
@@ -1074,73 +864,63 @@ subroutine ModelAdvance_phase2(gcomp, rc)
integer, intent(out) :: rc
! local variables
- type(ESMF_State) :: importState, exportState
- type(ESMF_Clock) :: clock
type(ESMF_Time) :: currTime
type(ESMF_TimeInterval) :: timeStep
- type(ESMF_Time) :: startTime, stopTime
+ type(ESMF_Time) :: startTime
type(ESMF_TimeInterval) :: time_elapsed
- integer :: na, i, urc
+ integer :: na, j, urc
integer :: nfseconds
logical :: fcstpe
character(len=*),parameter :: subname='(fv3_cap:ModelAdvance_phase2)'
character(240) :: msgString
- real(kind=8) :: MPI_Wtime
- real(kind=8) :: timewri, timerhi, timerh
+
+ type(ESMF_Clock) :: clock, clock_out
!-----------------------------------------------------------------------------
rc = ESMF_SUCCESS
+
if(profile_memory) call ESMF_VMLogMemInfo("Entering FV3 ModelAdvance_phase2: ")
- fcstpe = .false.
- if( mype < num_pes_fcst ) fcstpe = .true.
-!
- timewri = MPI_Wtime()
- call ESMF_LogWrite('Model Advance phase2: before fcstComp run phase2', ESMF_LOGMSG_INFO, rc=rc)
+ call ESMF_GridCompGet(gcomp, clock=clock, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- call ESMF_GridCompRun(fcstComp, exportState=fcstState, clock=clock_fv3, &
- phase=2, userRc=urc, rc=rc)
-
+ call ESMF_GridCompRun(fcstComp, exportState=fcstState, clock=clock, phase=2, userRc=urc, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
if (ESMF_LogFoundError(rcToCheck=urc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__, rcToReturn=rc)) return
- call ESMF_LogWrite('Model Advance phase2: after fcstComp run phase2', ESMF_LOGMSG_INFO, rc=rc)
+ clock_out = ESMF_ClockCreate(clock, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- call ESMF_ClockAdvance(clock = clock_fv3, rc = RC)
+ call ESMF_ClockAdvance(clock_out, rc = RC)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- call ESMF_ClockGet(clock_fv3, startTime=startTime, currTime=currTime, &
- timeStep=timeStep, stopTime=stopTime, rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
-
- time_elapsed = currtime - starttime
- na = nint(time_elapsed/timeStep)
- call ESMF_TimeIntervalGet(time_elapsed, s=nfseconds, rc=rc)
-!
- if(mype==0) print *,'n fv3_cap,in model run, advance,na=',na
-
!-------------------------------------------------------------------------------
!*** if it is output time, call data transfer and write grid comp run
if( quilting ) then
+ call ESMF_ClockGet(clock_out, startTime=startTime, currTime=currTime, &
+ timeStep=timeStep, rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+
+ time_elapsed = currTime - startTime
+ na = nint(time_elapsed/timeStep)
+ call ESMF_TimeIntervalGet(time_elapsed, s=nfseconds, rc=rc)
+
output: if (ANY(nint(output_fh(:)*3600.0) == nfseconds)) then
!
if (mype == 0 .or. mype == lead_wrttask(1)) print *,' aft fcst run output time=',nfseconds, &
'FBcount=',FBcount,'na=',na
- timerhi = MPI_Wtime()
call ESMF_VMEpochEnter(epoch=ESMF_VMEpoch_Buffer, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- do i=1, FBCount
+ do j=1, FBCount
- call ESMF_FieldBundleRegrid(fcstFB(i), wrtFB(i,n_group), &
- routehandle=routehandle(i, n_group), &
+ call ESMF_FieldBundleRegrid(fcstFB(j), wrtFB(j,n_group), &
+ routehandle=routehandle(j, n_group), &
termorderflag=(/ESMF_TERMORDER_SRCSEQ/), rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
!
@@ -1149,15 +929,10 @@ subroutine ModelAdvance_phase2(gcomp, rc)
call ESMF_VMEpochExit(rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- ! if(mype==0 .or. mype==lead_wrttask(1)) print *,'on wrt bf wrt run, na=',na
call ESMF_LogWrite('Model Advance: before wrtcomp run ', ESMF_LOGMSG_INFO, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- timerhi = MPI_Wtime()
- call ESMF_GridCompRun(wrtComp(n_group), importState=wrtState(n_group), clock=clock_fv3,userRc=urc,rc=rc)
-
- timerh = MPI_Wtime()
-
+ call ESMF_GridCompRun(wrtComp(n_group), importState=wrtState(n_group), clock=clock_out, userRc=urc, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
if (ESMF_LogFoundError(rcToCheck=urc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__, rcToReturn=rc)) return
@@ -1174,28 +949,63 @@ subroutine ModelAdvance_phase2(gcomp, rc)
endif ! quilting
-!jw check clock
- call ESMF_ClockPrint(clock_fv3, options="currTime", &
- preString="leaving FV3_ADVANCE phase2 with clock_fv3 current: ", &
+ call ESMF_ClockPrint(clock, options="currTime", &
+ preString="leaving FV3_ADVANCE phase2 with clock current: ", &
unit=msgString)
call ESMF_LogWrite(msgString, ESMF_LOGMSG_INFO)
- call ESMF_ClockPrint(clock_fv3, options="startTime", &
- preString="leaving FV3_ADVANCE phase2 with clock_fv3 start: ", &
+ call ESMF_ClockPrint(clock, options="startTime", &
+ preString="leaving FV3_ADVANCE phase2 with clock start: ", &
unit=msgString)
call ESMF_LogWrite(msgString, ESMF_LOGMSG_INFO)
- call ESMF_ClockPrint(clock_fv3, options="stopTime", &
- preString="leaving FV3_ADVANCE phase2 with clock_fv3 stop: ", &
+ call ESMF_ClockPrint(clock, options="stopTime", &
+ preString="leaving FV3_ADVANCE phase2 with clock stop: ", &
unit=msgString)
call ESMF_LogWrite(msgString, ESMF_LOGMSG_INFO)
if( dbug > 0 .or. cplprint_flag ) then
- call diagnose_cplFields(gcomp, clock_fv3, fcstpe, cplprint_flag, dbug, 'export')
+ fcstpe = .false.
+ if( mype < num_pes_fcst ) fcstpe = .true.
+ call diagnose_cplFields(gcomp, clock_out, fcstpe, cplprint_flag, dbug, 'export')
end if
if(profile_memory) call ESMF_VMLogMemInfo("Leaving FV3 ModelAdvance_phase2: ")
end subroutine ModelAdvance_phase2
+!-----------------------------------------------------------------------------
+
+ subroutine ModelSetRunClock(gcomp, rc)
+
+ type(ESMF_GridComp) :: gcomp
+ integer, intent(out) :: rc
+
+ ! local variables
+ type(ESMF_Clock) :: dclock, mclock
+ type(ESMF_TimeInterval) :: dtimestep, mtimestep
+ type(ESMF_Time) :: mcurrtime, mstoptime
+
+!-----------------------------------------------------------------------------
+
+ rc = ESMF_SUCCESS
+
+ call NUOPC_ModelGet(gcomp, driverClock=dclock, modelClock=mclock, rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+
+ call ESMF_ClockGet(dclock, timeStep=dtimestep, rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+ call ESMF_ClockGet(mclock, currTime=mcurrtime, rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+
+ call ESMF_TimeIntervalSet(mtimestep,s=dt_atmos,rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+
+ mstoptime = mcurrtime + dtimestep
+
+ call ESMF_ClockSet(mclock, timeStep=mtimestep, stopTime=mstoptime, rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+
+ end subroutine ModelSetRunClock
+
!-----------------------------------------------------------------------------
subroutine fv3_checkimport(gcomp, rc)
@@ -1218,6 +1028,8 @@ subroutine fv3_checkimport(gcomp, rc)
character(esmf_maxstr) :: msgString
integer :: date(6)
+ rc = ESMF_SUCCESS
+
! query the Component for its clock
call ESMF_GridCompGet(gcomp, clock=clock, importState=importState, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
@@ -1323,7 +1135,7 @@ subroutine ModelFinalize(gcomp, rc)
! local variables
character(len=*),parameter :: subname='(fv3gfs_cap:ModelFinalize)'
- integer :: i, unit, urc
+ integer :: i, urc
type(ESMF_VM) :: vm
real(kind=8) :: MPI_Wtime, timeffs
!
@@ -1334,6 +1146,7 @@ subroutine ModelFinalize(gcomp, rc)
rc = ESMF_SUCCESS
!
call ESMF_GridCompGet(gcomp,vm=vm,rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
!
!*** finalize grid comps
if( quilting ) then
diff --git a/io/FV3GFS_io.F90 b/io/FV3GFS_io.F90
index 3827ccb68..041a2d46b 100644
--- a/io/FV3GFS_io.F90
+++ b/io/FV3GFS_io.F90
@@ -196,7 +196,7 @@ subroutine FV3GFS_GFS_checksum (Model, GFS_Data, Atm_block)
nsfcprop2d = nsfcprop2d + 16
endif
- allocate (temp2d(isc:iec,jsc:jec,nsfcprop2d+Model%ntot3d+Model%nctp))
+ allocate (temp2d(isc:iec,jsc:jec,nsfcprop2d+Model%ntot2d+Model%nctp))
allocate (temp3d(isc:iec,jsc:jec,1:lev,14+Model%ntot3d+2*ntr))
allocate (temp3dlevsp1(isc:iec,jsc:jec,1:lev+1,3))
@@ -1462,7 +1462,7 @@ subroutine sfc_prop_restart_read (Sfcprop, Atm_block, Model, fv_domain, warm_sta
do nb = 1, Atm_block%nblks
do ix = 1, Atm_block%blksz(nb)
if (Sfcprop(nb)%landfrac(ix) > zero) then
- tem = one / Sfcprop(nb)%landfrac(ix)
+ tem = one / (Sfcprop(nb)%fice(ix)*(one-Sfcprop(nb)%landfrac(ix))+Sfcprop(nb)%landfrac(ix))
Sfcprop(nb)%snodl(ix) = Sfcprop(nb)%snowd(ix) * tem
else
Sfcprop(nb)%snodl(ix) = zero
@@ -1477,7 +1477,7 @@ subroutine sfc_prop_restart_read (Sfcprop, Atm_block, Model, fv_domain, warm_sta
do nb = 1, Atm_block%nblks
do ix = 1, Atm_block%blksz(nb)
if (Sfcprop(nb)%landfrac(ix) > zero) then
- tem = one / Sfcprop(nb)%landfrac(ix)
+ tem = one / (Sfcprop(nb)%fice(ix)*(one-Sfcprop(nb)%landfrac(ix))+Sfcprop(nb)%landfrac(ix))
Sfcprop(nb)%weasdl(ix) = Sfcprop(nb)%weasd(ix) * tem
else
Sfcprop(nb)%weasdl(ix) = zero
@@ -1501,7 +1501,9 @@ subroutine sfc_prop_restart_read (Sfcprop, Atm_block, Model, fv_domain, warm_sta
!$omp parallel do default(shared) private(nb, ix)
do nb = 1, Atm_block%nblks
do ix = 1, Atm_block%blksz(nb)
- Sfcprop(nb)%zorlw(ix) = Sfcprop(nb)%zorl(ix) !--- compute zorlw from existing variables
+ if (Sfcprop(nb)%landfrac(ix) < one .and. Sfcprop(nb)%fice(ix) < one) then
+ Sfcprop(nb)%zorlw(ix) = min(Sfcprop(nb)%zorl(ix), 0.317)
+ endif
enddo
enddo
endif
@@ -1521,7 +1523,9 @@ subroutine sfc_prop_restart_read (Sfcprop, Atm_block, Model, fv_domain, warm_sta
!$omp parallel do default(shared) private(nb, ix)
do nb = 1, Atm_block%nblks
do ix = 1, Atm_block%blksz(nb)
- Sfcprop(nb)%zorli(ix) = Sfcprop(nb)%zorl(ix) !--- compute zorli from existing variables
+ if (Sfcprop(nb)%fice(ix)*(one-Sfcprop(nb)%landfrac(ix)) > zero) then
+ Sfcprop(nb)%zorli(ix) = one
+ endif
enddo
enddo
endif
@@ -1547,6 +1551,36 @@ subroutine sfc_prop_restart_read (Sfcprop, Atm_block, Model, fv_domain, warm_sta
enddo
endif
+ if (sfc_var2(i,j,47) < -9990.0_r8) then
+ if (Model%me == Model%master ) call mpp_error(NOTE, 'gfs_driver::surface_props_input - computing snodi')
+!$omp parallel do default(shared) private(nb, ix, tem)
+ do nb = 1, Atm_block%nblks
+ do ix = 1, Atm_block%blksz(nb)
+ if (Sfcprop(nb)%fice(ix) > zero) then
+ tem = one / (Sfcprop(nb)%fice(ix)*(one-Sfcprop(nb)%landfrac(ix))+Sfcprop(nb)%landfrac(ix))
+ Sfcprop(nb)%snodi(ix) = min(Sfcprop(nb)%snowd(ix) * tem, 3.0)
+ else
+ Sfcprop(nb)%snodi(ix) = zero
+ endif
+ enddo
+ enddo
+ endif
+
+ if (sfc_var2(i,j,48) < -9990.0_r8) then
+ if (Model%me == Model%master ) call mpp_error(NOTE, 'gfs_driver::surface_props_input - computing weasdi')
+!$omp parallel do default(shared) private(nb, ix, tem)
+ do nb = 1, Atm_block%nblks
+ do ix = 1, Atm_block%blksz(nb)
+ if (Sfcprop(nb)%fice(ix) > zero) then
+ tem = one / (Sfcprop(nb)%fice(ix)*(one-Sfcprop(nb)%landfrac(ix))+Sfcprop(nb)%landfrac(ix))
+ Sfcprop(nb)%weasdi(ix) = Sfcprop(nb)%weasd(ix)*tem
+ else
+ Sfcprop(nb)%weasdi(ix) = zero
+ endif
+ enddo
+ enddo
+ endif
+
if (Model%use_cice_alb) then
if (sfc_var2(i,j,49) < -9990.0_r8) then
!$omp parallel do default(shared) private(nb, ix)
@@ -3047,7 +3081,7 @@ end subroutine store_data3D
!
#ifdef use_WRTCOMP
- subroutine fv_phys_bundle_setup(Diag, axes, phys_bundle, fcst_grid, quilting, nbdlphys)
+ subroutine fv_phys_bundle_setup(Diag, axes, phys_bundle, fcst_grid, quilting, nbdlphys, rc)
!
!-------------------------------------------------------------
!*** set esmf bundle for phys output fields
@@ -3058,15 +3092,17 @@ subroutine fv_phys_bundle_setup(Diag, axes, phys_bundle, fcst_grid, quilting, nb
!
implicit none
!
- type(GFS_externaldiag_type),intent(in) :: Diag(:)
+ type(GFS_externaldiag_type),intent(in) :: Diag(:)
integer, intent(in) :: axes(:)
type(ESMF_FieldBundle),intent(inout) :: phys_bundle(:)
type(ESMF_Grid),intent(inout) :: fcst_grid
logical,intent(in) :: quilting
integer, intent(in) :: nbdlphys
+ integer,intent(out) :: rc
+
!
!*** local variables
- integer i, j, k, n, rc, idx, ibdl, nbdl
+ integer i, j, k, n, idx, ibdl, nbdl
integer id, axis_length, direction, edges, axis_typ
integer num_attributes, num_field_dyn
integer currdate(6)
@@ -3099,7 +3135,7 @@ subroutine fv_phys_bundle_setup(Diag, axes, phys_bundle, fcst_grid, quilting, nb
!------------------------------------------------------------
!
allocate(bdl_intplmethod(nbdlphys), outputfile(nbdlphys))
- if(mpp_pe()==mpp_root_pe())print *,'in fv_phys bundle,nbdl=',nbdlphys
+ if(mpp_pe()==mpp_root_pe()) print *,'in fv_phys bundle,nbdl=',nbdlphys
do ibdl = 1, nbdlphys
loutputfile = .false.
call ESMF_FieldBundleGet(phys_bundle(ibdl), name=physbdl_name,rc=rc)
@@ -3178,14 +3214,14 @@ subroutine fv_phys_bundle_setup(Diag, axes, phys_bundle, fcst_grid, quilting, nb
allocate(udimList(udimCount))
call ESMF_AttributeGet(fcst_grid, convention="NetCDF", purpose="FV3", &
name="vertical_dim_labels", valueList=udimList, rc=rc)
-! if(mpp_pe()==mpp_root_pe())print *,'in fv3gfsio, vertical
+! if(mpp_pe()==mpp_root_pe()) print *,'in fv3gfsio, vertical
! list=',udimList(1:udimCount),'rc=',rc
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
else
- if(mpp_pe()==mpp_root_pe())print *,'in fv_dyn bundle,axis_name_vert=',axis_name_vert
+ if(mpp_pe()==mpp_root_pe()) print *,'in fv_dyn bundle,axis_name_vert=',axis_name_vert
call ESMF_AttributeAdd(fcst_grid, convention="NetCDF", purpose="FV3", &
attrList=(/"vertical_dim_labels"/), rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
@@ -3193,6 +3229,7 @@ subroutine fv_phys_bundle_setup(Diag, axes, phys_bundle, fcst_grid, quilting, nb
name="vertical_dim_labels", valueList=axis_name_vert, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
endif
+ deallocate(axis_name_vert)
endif
!*** add attributes
@@ -3207,13 +3244,13 @@ subroutine fv_phys_bundle_setup(Diag, axes, phys_bundle, fcst_grid, quilting, nb
direction, edges, Domain, DomainU, axis_data, &
num_attributes=num_attributes, attributes=attributes)
!
- edgesS=''
+ edgesS = ''
do i = 1,num_axes_phys
if(axes(i) == edges) edgesS=axis_name(i)
enddo
! Add vertical dimension Attributes to Grid
if( id>2 ) then
-! if(mpp_pe()==mpp_root_pe())print *,' in dyn add grid, axis_name=', &
+! if(mpp_pe()==mpp_root_pe()) print *,' in dyn add grid, axis_name=', &
! trim(axis_name(id)),'axis_data=',axis_data
if(trim(edgesS)/='') then
call ESMF_AttributeAdd(fcst_grid, convention="NetCDF", purpose="FV3", &
@@ -3307,6 +3344,8 @@ subroutine fv_phys_bundle_setup(Diag, axes, phys_bundle, fcst_grid, quilting, nb
endif
enddo
+ deallocate(axis_name)
+ deallocate(all_axes)
end subroutine fv_phys_bundle_setup
!
@@ -3415,62 +3454,62 @@ subroutine add_field_to_phybundle(var_name,long_name,units,cell_methods, axes,ph
!
!*** add field attributes
call ESMF_AttributeAdd(field, convention="NetCDF", purpose="FV3", &
- attrList=(/"long_name"/), rc=rc)
+ attrList=(/"long_name"/), rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, &
line=__LINE__, file=__FILE__)) call ESMF_Finalize(endflag=ESMF_END_ABORT)
call ESMF_AttributeSet(field, convention="NetCDF", purpose="FV3", &
- name='long_name',value=trim(long_name),rc=rc)
+ name='long_name',value=trim(long_name),rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, &
line=__LINE__, file=__FILE__)) call ESMF_Finalize(endflag=ESMF_END_ABORT)
call ESMF_AttributeAdd(field, convention="NetCDF", purpose="FV3", &
- attrList=(/"units"/), rc=rc)
+ attrList=(/"units"/), rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, &
line=__LINE__, file=__FILE__)) call ESMF_Finalize(endflag=ESMF_END_ABORT)
call ESMF_AttributeSet(field, convention="NetCDF", purpose="FV3", &
- name='units',value=trim(units),rc=rc)
+ name='units',value=trim(units),rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, &
line=__LINE__, file=__FILE__)) call ESMF_Finalize(endflag=ESMF_END_ABORT)
call ESMF_AttributeAdd(field, convention="NetCDF", purpose="FV3", &
- attrList=(/"missing_value"/), rc=rc)
+ attrList=(/"missing_value"/), rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, &
line=__LINE__, file=__FILE__)) call ESMF_Finalize(endflag=ESMF_END_ABORT)
call ESMF_AttributeSet(field, convention="NetCDF", purpose="FV3", &
- name='missing_value',value=missing_value,rc=rc)
+ name='missing_value',value=missing_value,rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, &
line=__LINE__, file=__FILE__)) call ESMF_Finalize(endflag=ESMF_END_ABORT)
call ESMF_AttributeAdd(field, convention="NetCDF", purpose="FV3", &
- attrList=(/"_FillValue"/), rc=rc)
+ attrList=(/"_FillValue"/), rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, &
line=__LINE__, file=__FILE__)) call ESMF_Finalize(endflag=ESMF_END_ABORT)
call ESMF_AttributeSet(field, convention="NetCDF", purpose="FV3", &
- name='_FillValue',value=missing_value,rc=rc)
+ name='_FillValue',value=missing_value,rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, &
line=__LINE__, file=__FILE__)) call ESMF_Finalize(endflag=ESMF_END_ABORT)
call ESMF_AttributeAdd(field, convention="NetCDF", purpose="FV3", &
- attrList=(/"cell_methods"/), rc=rc)
+ attrList=(/"cell_methods"/), rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, &
line=__LINE__, file=__FILE__)) call ESMF_Finalize(endflag=ESMF_END_ABORT)
call ESMF_AttributeSet(field, convention="NetCDF", purpose="FV3", &
- name='cell_methods',value=trim(cell_methods),rc=rc)
+ name='cell_methods',value=trim(cell_methods),rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, &
line=__LINE__, file=__FILE__)) call ESMF_Finalize(endflag=ESMF_END_ABORT)
!
call ESMF_AttributeAdd(field, convention="NetCDF", purpose="FV3", &
- attrList=(/"output_file"/), rc=rc)
+ attrList=(/"output_file"/), rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, &
line=__LINE__, file=__FILE__)) call ESMF_Finalize(endflag=ESMF_END_ABORT)
call ESMF_AttributeSet(field, convention="NetCDF", purpose="FV3", &
- name='output_file',value=trim(output_file),rc=rc)
+ name='output_file',value=trim(output_file),rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, &
line=__LINE__, file=__FILE__)) call ESMF_Finalize(endflag=ESMF_END_ABORT)
diff --git a/io/inline_post.F90 b/io/inline_post.F90
index b51e2e7ac..2e123346e 100644
--- a/io/inline_post.F90
+++ b/io/inline_post.F90
@@ -16,7 +16,7 @@ module inline_post
contains
- subroutine inline_post_run(wrt_int_state,mypei,mpicomp,lead_write, &
+ subroutine inline_post_run(wrt_int_state,grid_id,mypei,mpicomp,lead_write, &
mynfhr,mynfmin,mynfsec)
!
! revision history:
@@ -30,6 +30,7 @@ subroutine inline_post_run(wrt_int_state,mypei,mpicomp,lead_write, &
!-----------------------------------------------------------------------
!
type(wrt_internal_state),intent(in) :: wrt_int_state
+ integer,intent(in) :: grid_id
integer,intent(in) :: mypei
integer,intent(in) :: mpicomp
integer,intent(in) :: lead_write
@@ -37,14 +38,14 @@ subroutine inline_post_run(wrt_int_state,mypei,mpicomp,lead_write, &
integer,intent(in) :: mynfmin
integer,intent(in) :: mynfsec
!
- if(mypei == 0) print *,'inline_post_run, output_grid=',trim(output_grid)
- if(trim(output_grid) == 'gaussian_grid' &
- .or. trim(output_grid) == 'global_latlon') then
+ if(mypei == 0) print *,'inline_post_run, output_grid=',trim(output_grid(grid_id))
+ if(trim(output_grid(grid_id)) == 'gaussian_grid' &
+ .or. trim(output_grid(grid_id)) == 'global_latlon') then
call post_run_gfs(wrt_int_state, mypei, mpicomp, lead_write, &
mynfhr, mynfmin,mynfsec)
- else if( trim(output_grid) == 'regional_latlon' &
- .or. trim(output_grid) == 'rotated_latlon' &
- .or. trim(output_grid) == 'lambert_conformal') then
+ else if( trim(output_grid(grid_id)) == 'regional_latlon' &
+ .or. trim(output_grid(grid_id)) == 'rotated_latlon' &
+ .or. trim(output_grid(grid_id)) == 'lambert_conformal') then
if(mypei == 0) print *,'inline_post_run, call post_run_regional'
call post_run_regional(wrt_int_state, mypei, mpicomp, lead_write, &
mynfhr, mynfmin,mynfsec)
@@ -55,21 +56,22 @@ end subroutine inline_post_run
!
!-----------------------------------------------------------------------
!
- subroutine inline_post_getattr(wrt_int_state)
+ subroutine inline_post_getattr(wrt_int_state,grid_id)
!
use esmf
!
implicit none
!
type(wrt_internal_state),intent(inout) :: wrt_int_state
+ integer, intent(in) :: grid_id
!
- if(trim(output_grid) == 'gaussian_grid' &
- .or. trim(output_grid) == 'global_latlon') then
+ if(trim(output_grid(grid_id)) == 'gaussian_grid' &
+ .or. trim(output_grid(grid_id)) == 'global_latlon') then
call post_getattr_gfs(wrt_int_state)
- else if( trim(output_grid) == 'regional_latlon' &
- .or. trim(output_grid) == 'rotated_latlon' &
- .or. trim(output_grid) == 'lambert_conformal') then
- call post_getattr_regional(wrt_int_state)
+ else if( trim(output_grid(grid_id)) == 'regional_latlon' &
+ .or. trim(output_grid(grid_id)) == 'rotated_latlon' &
+ .or. trim(output_grid(grid_id)) == 'lambert_conformal') then
+ call post_getattr_regional(wrt_int_state,grid_id)
endif
!
end subroutine inline_post_getattr
diff --git a/io/module_fv3_io_def.F90 b/io/module_fv3_io_def.F90
index 65d2b926b..dda5310ad 100644
--- a/io/module_fv3_io_def.F90
+++ b/io/module_fv3_io_def.F90
@@ -1,4 +1,4 @@
- module module_fv3_io_def
+module module_fv3_io_def
!
!*** fv3 io related configration variables
!
@@ -9,24 +9,26 @@ module module_fv3_io_def
!
use esmf, only : esmf_maxstr
implicit none
-!
+
integer :: num_pes_fcst
integer :: wrttasks_per_group, write_groups
integer :: n_group
integer :: num_files
- character(len=esmf_maxstr) :: app_domain
- character(len=esmf_maxstr) :: output_grid
- integer :: imo,jmo
- integer :: ichunk2d,jchunk2d,ichunk3d,jchunk3d,kchunk3d
integer :: nbdlphys
- integer :: nsout_io, iau_offset, ideflate, nbits
+ integer :: nsout_io, iau_offset
logical :: lflname_fulltime
- real :: cen_lon, cen_lat, lon1, lat1, lon2, lat2, dlon, dlat
- real :: stdlat1, stdlat2, dx, dy
+
character(len=esmf_maxstr),dimension(:),allocatable :: filename_base
character(len=esmf_maxstr),dimension(:),allocatable :: output_file
-!
+
integer,dimension(:),allocatable :: lead_wrttask, last_wrttask
-!
- end module module_fv3_io_def
+ character(len=esmf_maxstr),dimension(:),allocatable :: output_grid
+ integer,dimension(:),allocatable :: imo,jmo
+ real,dimension(:),allocatable :: cen_lon, cen_lat
+ real,dimension(:),allocatable :: lon1, lat1, lon2, lat2, dlon, dlat
+ real,dimension(:),allocatable :: stdlat1, stdlat2, dx, dy
+ integer,dimension(:),allocatable :: ideflate, nbits
+ integer,dimension(:),allocatable :: ichunk2d, jchunk2d, ichunk3d, jchunk3d, kchunk3d
+
+end module module_fv3_io_def
diff --git a/io/module_write_internal_state.F90 b/io/module_write_internal_state.F90
index e396063c9..9c9ebbe26 100644
--- a/io/module_write_internal_state.F90
+++ b/io/module_write_internal_state.F90
@@ -49,8 +49,8 @@ module write_internal_state
integer :: lat_start, lon_start
integer :: lat_end, lon_end
real :: latstart, latlast, lonstart, lonlast
- integer,dimension(:),allocatable :: lat_start_wrtgrp
- integer,dimension(:),allocatable :: lat_end_wrtgrp
+ integer,dimension(:),allocatable :: lat_start_wrtgrp, lon_start_wrtgrp
+ integer,dimension(:),allocatable :: lat_end_wrtgrp, lon_end_wrtgrp
real,dimension(:,:),allocatable :: lonPtr, latPtr
!
!--------------------------
diff --git a/io/module_write_netcdf.F90 b/io/module_write_netcdf.F90
index 30959e625..1445d5e04 100644
--- a/io/module_write_netcdf.F90
+++ b/io/module_write_netcdf.F90
@@ -1,7 +1,8 @@
-#define ESMF_ERR_RETURN(rc) if (ESMF_LogFoundError(rc, msg="Breaking out of subroutine", line=__LINE__, file=__FILE__)) call ESMF_Finalize(endflag=ESMF_END_ABORT)
+#define ESMF_ERR_RETURN(rc) \
+ if (ESMF_LogFoundError(rc, msg="Breaking out of subroutine", line=__LINE__, file=__FILE__)) call ESMF_Finalize(endflag=ESMF_END_ABORT)
#define NC_ERR_STOP(status) \
- if (status /= nf90_noerr) write(0,*) "line ", __LINE__, trim(nf90_strerror(status)); \
+ if (status /= nf90_noerr) write(0,*) "file: ", __FILE__, " line: ", __LINE__, trim(nf90_strerror(status)); \
if (status /= nf90_noerr) call ESMF_Finalize(endflag=ESMF_END_ABORT)
module module_write_netcdf
@@ -9,84 +10,159 @@ module module_write_netcdf
use esmf
use netcdf
use module_fv3_io_def,only : ideflate, nbits, &
+ ichunk2d,jchunk2d,ichunk3d,jchunk3d,kchunk3d, &
output_grid,dx,dy,lon1,lat1,lon2,lat2
+ use mpi
implicit none
private
public write_netcdf
+ logical :: par
+
+ interface quantize_array
+ module procedure quantize_array_3d
+ module procedure quantize_array_4d
+ end interface
+
contains
!----------------------------------------------------------------------------------------
- subroutine write_netcdf(fieldbundle, wrtfb, filename, mpi_comm, mype, im, jm, ichunk2d,jchunk2d,ichunk3d,jchunk3d,kchunk3d, rc)
+ subroutine write_netcdf(wrtfb, filename, &
+ use_parallel_netcdf, mpi_comm, mype, &
+ grid_id, rc)
!
- type(ESMF_FieldBundle), intent(in) :: fieldbundle
type(ESMF_FieldBundle), intent(in) :: wrtfb
character(*), intent(in) :: filename
+ logical, intent(in) :: use_parallel_netcdf
integer, intent(in) :: mpi_comm
integer, intent(in) :: mype
- integer, intent(in) :: im, jm
- integer, intent(in) :: ichunk2d,jchunk2d,ichunk3d,jchunk3d,kchunk3d
+ integer, intent(in) :: grid_id
integer, optional,intent(out) :: rc
!
!** local vars
- integer :: i,j,m,n,k
- integer :: lm
+ integer :: i,j,t, istart,iend,jstart,jend
+ integer :: im, jm, lm
+
+ integer, dimension(:), allocatable :: fldlev
+
+ real(ESMF_KIND_R4), dimension(:,:), pointer :: array_r4
+ real(ESMF_KIND_R4), dimension(:,:,:), pointer :: array_r4_cube
+ real(ESMF_KIND_R4), dimension(:,:,:), pointer :: array_r4_3d
+ real(ESMF_KIND_R4), dimension(:,:,:,:), pointer :: array_r4_3d_cube
- integer, dimension(:), allocatable :: fldlev
- real(4), dimension(:,:), allocatable :: arrayr4
- real(8), dimension(:,:), allocatable :: arrayr8
- real(4), dimension(:,:,:), allocatable :: arrayr4_3d,arrayr4_3d_save
- real(8), dimension(:,:,:), allocatable :: arrayr8_3d
+ real(ESMF_KIND_R8), dimension(:,:), pointer :: array_r8
+ real(ESMF_KIND_R8), dimension(:,:,:), pointer :: array_r8_cube
+ real(ESMF_KIND_R8), dimension(:,:,:), pointer :: array_r8_3d
+ real(ESMF_KIND_R8), dimension(:,:,:,:), pointer :: array_r8_3d_cube
- real(8) x(im),y(jm)
+ real(8), dimension(:), allocatable :: x,y
integer :: fieldCount, fieldDimCount, gridDimCount
integer, dimension(:), allocatable :: ungriddedLBound, ungriddedUBound
+ integer, dimension(:), allocatable :: start_idx
type(ESMF_Field), allocatable :: fcstField(:)
type(ESMF_TypeKind_Flag) :: typekind
type(ESMF_TypeKind_Flag) :: attTypeKind
type(ESMF_Grid) :: wrtgrid
type(ESMF_Array) :: array
+ type(ESMF_DistGrid) :: distgrid
- integer :: attcount
+ integer :: attCount
character(len=ESMF_MAXSTR) :: attName, fldName
integer :: varival
- real(4) :: varr4val, scale_fact, offset, dataMin, dataMax
+ real(4) :: varr4val, dataMin, dataMax
real(4), allocatable, dimension(:) :: compress_err
real(8) :: varr8val
character(len=ESMF_MAXSTR) :: varcval
- character(128) :: time_units
-
- integer :: ncerr
+ integer :: ncerr,ierr
integer :: ncid
integer :: oldMode
- integer :: im_dimid, jm_dimid, pfull_dimid, phalf_dimid, time_dimid
- integer :: im_varid, jm_varid, lm_varid, time_varid, lon_varid, lat_varid
+ integer :: im_dimid, jm_dimid, tile_dimid, pfull_dimid, phalf_dimid, time_dimid
+ integer :: im_varid, jm_varid, tile_varid, lon_varid, lat_varid
+ integer, dimension(:), allocatable :: dimids_2d, dimids_3d
integer, dimension(:), allocatable :: varids
logical shuffle
- call ESMF_FieldBundleGet(fieldbundle, fieldCount=fieldCount, rc=rc); ESMF_ERR_RETURN(rc)
+ logical :: is_cubed_sphere
+ integer :: rank, deCount, localDeCount, dimCount, tileCount
+ integer :: my_tile, start_i, start_j
+ integer, dimension(:,:), allocatable :: minIndexPDe, maxIndexPDe
+ integer, dimension(:,:), allocatable :: minIndexPTile, maxIndexPTile
+ integer, dimension(:), allocatable :: deToTileMap, localDeToDeMap
+ logical :: do_io
+ integer :: par_access
+!
+ is_cubed_sphere = .false.
+ tileCount = 0
+ my_tile = 0
+ start_i = -10000000
+ start_j = -10000000
+
+ par = use_parallel_netcdf
+ do_io = par .or. (mype==0)
+
+ call ESMF_FieldBundleGet(wrtfb, fieldCount=fieldCount, rc=rc); ESMF_ERR_RETURN(rc)
allocate(compress_err(fieldCount)); compress_err=-999.
allocate(fldlev(fieldCount)) ; fldlev = 0
allocate(fcstField(fieldCount))
allocate(varids(fieldCount))
- call ESMF_FieldBundleGet(fieldbundle, fieldList=fcstField, grid=wrtGrid, &
+ call ESMF_FieldBundleGet(wrtfb, fieldList=fcstField, grid=wrtGrid, &
! itemorderflag=ESMF_ITEMORDER_ADDORDER, &
rc=rc); ESMF_ERR_RETURN(rc)
call ESMF_GridGet(wrtgrid, dimCount=gridDimCount, rc=rc); ESMF_ERR_RETURN(rc)
do i=1,fieldCount
- call ESMF_FieldGet(fcstField(i), dimCount=fieldDimCount, rc=rc); ESMF_ERR_RETURN(rc)
+ call ESMF_FieldGet(fcstField(i), dimCount=fieldDimCount, array=array, rc=rc); ESMF_ERR_RETURN(rc)
+
if (fieldDimCount > 3) then
write(0,*)"write_netcdf: Only 2D and 3D fields are supported!"
- stop
+ call ESMF_Finalize(endflag=ESMF_END_ABORT)
end if
+
+ ! use first field to determine tile number, grid size, start index etc.
+ if (i == 1) then
+ call ESMF_ArrayGet(array, &
+ distgrid=distgrid, &
+ dimCount=dimCount, &
+ deCount=deCount, &
+ localDeCount=localDeCount, &
+ tileCount=tileCount, &
+ rc=rc); ESMF_ERR_RETURN(rc)
+
+ allocate(minIndexPDe(dimCount,deCount))
+ allocate(maxIndexPDe(dimCount,deCount))
+ allocate(minIndexPTile(dimCount, tileCount))
+ allocate(maxIndexPTile(dimCount, tileCount))
+ call ESMF_DistGridGet(distgrid, &
+ minIndexPDe=minIndexPDe, maxIndexPDe=maxIndexPDe, &
+ minIndexPTile=minIndexPTile, maxIndexPTile=maxIndexPTile, &
+ rc=rc); ESMF_ERR_RETURN(rc)
+
+ allocate(deToTileMap(deCount))
+ allocate(localDeToDeMap(localDeCount))
+ call ESMF_ArrayGet(array, &
+ deToTileMap=deToTileMap, &
+ localDeToDeMap=localDeToDeMap, &
+ rc=rc); ESMF_ERR_RETURN(rc)
+
+ is_cubed_sphere = (tileCount == 6)
+ my_tile = deToTileMap(localDeToDeMap(1)+1)
+ im = maxIndexPTile(1,1)
+ jm = maxIndexPTile(2,1)
+ start_i = minIndexPDe(1,localDeToDeMap(1)+1)
+ start_j = minIndexPDe(2,localDeToDeMap(1)+1)
+ if (.not. par) then
+ start_i = 1
+ start_j = 1
+ end if
+ end if
+
if (fieldDimCount > gridDimCount) then
allocate(ungriddedLBound(fieldDimCount-gridDimCount))
allocate(ungriddedUBound(fieldDimCount-gridDimCount))
@@ -104,301 +180,559 @@ subroutine write_netcdf(fieldbundle, wrtfb, filename, mpi_comm, mype, im, jm, ic
lm = maxval(fldlev(:))
- allocate(arrayr4(im,jm))
- allocate(arrayr8(im,jm))
- allocate(arrayr4_3d(im,jm,lm),arrayr4_3d_save(im,jm,lm))
- allocate(arrayr8_3d(im,jm,lm))
-
-! create netcdf file and enter define mode
- if (mype==0) then
-
- ncerr = nf90_create(trim(filename),&
- cmode=IOR(IOR(NF90_CLOBBER,NF90_NETCDF4),NF90_CLASSIC_MODEL),&
- ncid=ncid); NC_ERR_STOP(ncerr)
- ncerr = nf90_set_fill(ncid, NF90_NOFILL, oldMode); NC_ERR_STOP(ncerr)
-
- ! define dimensions
- ncerr = nf90_def_dim(ncid, "grid_xt", im, im_dimid); NC_ERR_STOP(ncerr)
- ncerr = nf90_def_dim(ncid, "grid_yt", jm, jm_dimid); NC_ERR_STOP(ncerr)
- ! define coordinate variables
- ncerr = nf90_def_var(ncid, "grid_xt", NF90_DOUBLE, im_dimid, im_varid); NC_ERR_STOP(ncerr)
- ncerr = nf90_def_var(ncid, "lon", NF90_DOUBLE, (/im_dimid,jm_dimid/), lon_varid); NC_ERR_STOP(ncerr)
- ncerr = nf90_put_att(ncid, lon_varid, "long_name", "T-cell longitude"); NC_ERR_STOP(ncerr)
- ncerr = nf90_put_att(ncid, lon_varid, "units", "degrees_E"); NC_ERR_STOP(ncerr)
- ncerr = nf90_put_att(ncid, im_varid, "cartesian_axis", "X"); NC_ERR_STOP(ncerr)
- ncerr = nf90_def_var(ncid, "grid_yt", NF90_DOUBLE, jm_dimid, jm_varid); NC_ERR_STOP(ncerr)
- ncerr = nf90_def_var(ncid, "lat", NF90_DOUBLE, (/im_dimid,jm_dimid/), lat_varid); NC_ERR_STOP(ncerr)
- ncerr = nf90_put_att(ncid, lat_varid, "long_name", "T-cell latitude"); NC_ERR_STOP(ncerr)
- ncerr = nf90_put_att(ncid, lat_varid, "units", "degrees_N"); NC_ERR_STOP(ncerr)
- ncerr = nf90_put_att(ncid, jm_varid, "cartesian_axis", "Y"); NC_ERR_STOP(ncerr)
-
- if (lm > 1) then
- call add_dim(ncid, "pfull", pfull_dimid, wrtgrid, rc)
- call add_dim(ncid, "phalf", phalf_dimid, wrtgrid, rc)
+ ! for serial output allocate 'global' arrays
+ if (.not. par) then
+ allocate(array_r4(im,jm))
+ allocate(array_r8(im,jm))
+ allocate(array_r4_3d(im,jm,lm))
+ allocate(array_r8_3d(im,jm,lm))
+ if (is_cubed_sphere) then
+ allocate(array_r4_cube(im,jm,tileCount))
+ allocate(array_r8_cube(im,jm,tileCount))
+ allocate(array_r4_3d_cube(im,jm,lm,tileCount))
+ allocate(array_r8_3d_cube(im,jm,lm,tileCount))
+ end if
end if
- call add_dim(ncid, "time", time_dimid, wrtgrid, rc)
+ ! create netcdf file and enter define mode
+ if (do_io) then
+
+ if (par) then
+ ncerr = nf90_create(trim(filename),&
+ cmode=IOR(IOR(NF90_CLOBBER,NF90_NETCDF4),NF90_CLASSIC_MODEL),&
+ comm=mpi_comm, info = MPI_INFO_NULL, ncid=ncid); NC_ERR_STOP(ncerr)
+ else
+ ncerr = nf90_create(trim(filename),&
+ cmode=IOR(IOR(NF90_CLOBBER,NF90_NETCDF4),NF90_CLASSIC_MODEL),&
+ ncid=ncid); NC_ERR_STOP(ncerr)
+ end if
- call get_global_attr(wrtfb, ncid, rc)
+ ! disable auto filling.
+ ncerr = nf90_set_fill(ncid, NF90_NOFILL, oldMode); NC_ERR_STOP(ncerr)
- do i=1, fieldCount
- call ESMF_FieldGet(fcstField(i), name=fldName, typekind=typekind, rc=rc); ESMF_ERR_RETURN(rc)
-
- ! define variables
- if (fldlev(i) == 1) then
- if (typekind == ESMF_TYPEKIND_R4) then
- if (ideflate > 0) then
- if (ichunk2d < 0 .or. jchunk2d < 0) then
- ! let netcdf lib choose chunksize
- ! shuffle filter on for 2d fields (lossless compression)
- ncerr = nf90_def_var(ncid, trim(fldName), NF90_FLOAT, &
- (/im_dimid,jm_dimid,time_dimid/), varids(i), &
- shuffle=.true.,deflate_level=ideflate); NC_ERR_STOP(ncerr)
- else
- ncerr = nf90_def_var(ncid, trim(fldName), NF90_FLOAT, &
- (/im_dimid,jm_dimid,time_dimid/), varids(i), &
- shuffle=.true.,deflate_level=ideflate,&
- chunksizes=(/ichunk2d,jchunk2d,1/),cache_size=40*im*jm); NC_ERR_STOP(ncerr)
- endif
- else
- ncerr = nf90_def_var(ncid, trim(fldName), NF90_FLOAT, &
- (/im_dimid,jm_dimid,time_dimid/), varids(i)); NC_ERR_STOP(ncerr)
- endif
- else if (typekind == ESMF_TYPEKIND_R8) then
- ncerr = nf90_def_var(ncid, trim(fldName), NF90_DOUBLE, &
- (/im_dimid,jm_dimid,time_dimid/), varids(i)); NC_ERR_STOP(ncerr)
- else
- write(0,*)'Unsupported typekind ', typekind
- stop
- end if
- else if (fldlev(i) > 1) then
- if (typekind == ESMF_TYPEKIND_R4) then
- if (ideflate > 0) then
- ! shuffle filter off for 3d fields using lossy compression
- if (nbits > 0) then
- shuffle=.false.
+ ! define dimensions [grid_xt, grid_yta ,(pfull/phalf), (tile), time]
+ ncerr = nf90_def_dim(ncid, "grid_xt", im, im_dimid); NC_ERR_STOP(ncerr)
+ ncerr = nf90_def_dim(ncid, "grid_yt", jm, jm_dimid); NC_ERR_STOP(ncerr)
+ if (lm > 1) then
+ call add_dim(ncid, "pfull", pfull_dimid, wrtgrid, rc)
+ call add_dim(ncid, "phalf", phalf_dimid, wrtgrid, rc)
+ end if
+ if (is_cubed_sphere) then
+ ncerr = nf90_def_dim(ncid, "tile", tileCount, tile_dimid); NC_ERR_STOP(ncerr)
+ end if
+ call add_dim(ncid, "time", time_dimid, wrtgrid, rc)
+
+ ! define coordinate variables
+ ncerr = nf90_def_var(ncid, "grid_xt", NF90_DOUBLE, im_dimid, im_varid); NC_ERR_STOP(ncerr)
+ ncerr = nf90_put_att(ncid, im_varid, "cartesian_axis", "X"); NC_ERR_STOP(ncerr)
+ ncerr = nf90_def_var(ncid, "grid_yt", NF90_DOUBLE, jm_dimid, jm_varid); NC_ERR_STOP(ncerr)
+ ncerr = nf90_put_att(ncid, jm_varid, "cartesian_axis", "Y"); NC_ERR_STOP(ncerr)
+ if (is_cubed_sphere) then
+ ncerr = nf90_def_var(ncid, "tile", NF90_INT, tile_dimid, tile_varid); NC_ERR_STOP(ncerr)
+ ncerr = nf90_put_att(ncid, tile_varid, "long_name", "cubed-spehere face"); NC_ERR_STOP(ncerr)
+ end if
+
+ ! coordinate variable attributes based on output_grid type
+ if (trim(output_grid(grid_id)) == 'gaussian_grid' .or. &
+ trim(output_grid(grid_id)) == 'global_latlon' .or. &
+ trim(output_grid(grid_id)) == 'regional_latlon') then
+ ncerr = nf90_put_att(ncid, im_varid, "long_name", "T-cell longitude"); NC_ERR_STOP(ncerr)
+ ncerr = nf90_put_att(ncid, im_varid, "units", "degrees_E"); NC_ERR_STOP(ncerr)
+ ncerr = nf90_put_att(ncid, jm_varid, "long_name", "T-cell latiitude"); NC_ERR_STOP(ncerr)
+ ncerr = nf90_put_att(ncid, jm_varid, "units", "degrees_N"); NC_ERR_STOP(ncerr)
+ else if (trim(output_grid(grid_id)) == 'rotated_latlon') then
+ ncerr = nf90_put_att(ncid, im_varid, "long_name", "rotated T-cell longiitude"); NC_ERR_STOP(ncerr)
+ ncerr = nf90_put_att(ncid, im_varid, "units", "degrees"); NC_ERR_STOP(ncerr)
+ ncerr = nf90_put_att(ncid, jm_varid, "long_name", "rotated T-cell latiitude"); NC_ERR_STOP(ncerr)
+ ncerr = nf90_put_att(ncid, jm_varid, "units", "degrees"); NC_ERR_STOP(ncerr)
+ else if (trim(output_grid(grid_id)) == 'lambert_conformal') then
+ ncerr = nf90_put_att(ncid, im_varid, "long_name", "x-coordinate of projection"); NC_ERR_STOP(ncerr)
+ ncerr = nf90_put_att(ncid, im_varid, "units", "meters"); NC_ERR_STOP(ncerr)
+ ncerr = nf90_put_att(ncid, jm_varid, "long_name", "y-coordinate of projection"); NC_ERR_STOP(ncerr)
+ ncerr = nf90_put_att(ncid, jm_varid, "units", "meters"); NC_ERR_STOP(ncerr)
+ end if
+
+ ! define longitude variable
+ if (is_cubed_sphere) then
+ ncerr = nf90_def_var(ncid, "lon", NF90_DOUBLE, [im_dimid,jm_dimid,tile_dimid], lon_varid); NC_ERR_STOP(ncerr)
+ else
+ ncerr = nf90_def_var(ncid, "lon", NF90_DOUBLE, [im_dimid,jm_dimid ], lon_varid); NC_ERR_STOP(ncerr)
+ end if
+ ncerr = nf90_put_att(ncid, lon_varid, "long_name", "T-cell longitude"); NC_ERR_STOP(ncerr)
+ ncerr = nf90_put_att(ncid, lon_varid, "units", "degrees_E"); NC_ERR_STOP(ncerr)
+
+ ! define latitude variable
+ if (is_cubed_sphere) then
+ ncerr = nf90_def_var(ncid, "lat", NF90_DOUBLE, [im_dimid,jm_dimid,tile_dimid], lat_varid); NC_ERR_STOP(ncerr)
+ else
+ ncerr = nf90_def_var(ncid, "lat", NF90_DOUBLE, [im_dimid,jm_dimid ], lat_varid); NC_ERR_STOP(ncerr)
+ end if
+ ncerr = nf90_put_att(ncid, lat_varid, "long_name", "T-cell latitude"); NC_ERR_STOP(ncerr)
+ ncerr = nf90_put_att(ncid, lat_varid, "units", "degrees_N"); NC_ERR_STOP(ncerr)
+
+ if (par) then
+ ncerr = nf90_var_par_access(ncid, im_varid, NF90_INDEPENDENT); NC_ERR_STOP(ncerr)
+ ncerr = nf90_var_par_access(ncid, lon_varid, NF90_INDEPENDENT); NC_ERR_STOP(ncerr)
+ ncerr = nf90_var_par_access(ncid, jm_varid, NF90_INDEPENDENT); NC_ERR_STOP(ncerr)
+ ncerr = nf90_var_par_access(ncid, lat_varid, NF90_INDEPENDENT); NC_ERR_STOP(ncerr)
+ if (is_cubed_sphere) then
+ ncerr = nf90_var_par_access(ncid, tile_varid, NF90_INDEPENDENT); NC_ERR_STOP(ncerr)
+ end if
+ end if
+
+
+ call get_global_attr(wrtfb, ncid, rc)
+
+
+ ! define variables (fields)
+ if (is_cubed_sphere) then
+ allocate(dimids_2d(4))
+ allocate(dimids_3d(5))
+ dimids_2d = [im_dimid,jm_dimid, tile_dimid,time_dimid]
+ if (lm > 1) dimids_3d = [im_dimid,jm_dimid,pfull_dimid,tile_dimid,time_dimid]
+ else
+ allocate(dimids_2d(3))
+ allocate(dimids_3d(4))
+ dimids_2d = [im_dimid,jm_dimid, time_dimid]
+ if (lm > 1) dimids_3d = [im_dimid,jm_dimid,pfull_dimid, time_dimid]
+ end if
+
+ do i=1, fieldCount
+ call ESMF_FieldGet(fcstField(i), name=fldName, rank=rank, typekind=typekind, rc=rc); ESMF_ERR_RETURN(rc)
+
+ par_access = NF90_INDEPENDENT
+ ! define variables
+ if (rank == 2) then
+ if (typekind == ESMF_TYPEKIND_R4) then
+ if (ideflate(grid_id) > 0) then
+ if (ichunk2d(grid_id) < 0 .or. jchunk2d(grid_id) < 0) then
+ ! let netcdf lib choose chunksize
+ ! shuffle filter on for 2d fields (lossless compression)
+ ncerr = nf90_def_var(ncid, trim(fldName), NF90_FLOAT, &
+ dimids_2d, varids(i), &
+ shuffle=.true.,deflate_level=ideflate(grid_id)); NC_ERR_STOP(ncerr)
+ else
+ if (is_cubed_sphere) then
+ ncerr = nf90_def_var(ncid, trim(fldName), NF90_FLOAT, &
+ dimids_2d, varids(i), &
+ shuffle=.true.,deflate_level=ideflate(grid_id),&
+ chunksizes=[ichunk2d(grid_id),jchunk2d(grid_id),tileCount,1]); NC_ERR_STOP(ncerr)
+ else
+ ncerr = nf90_def_var(ncid, trim(fldName), NF90_FLOAT, &
+ dimids_2d, varids(i), &
+ shuffle=.true.,deflate_level=ideflate(grid_id),&
+ chunksizes=[ichunk2d(grid_id),jchunk2d(grid_id), 1]); NC_ERR_STOP(ncerr)
+ end if
+ end if
+ ! compression filters require collective access.
+ par_access = NF90_COLLECTIVE
else
- shuffle=.true.
- endif
- if (ichunk3d < 0 .or. jchunk3d < 0 .or. kchunk3d < 0) then
- ! let netcdf lib choose chunksize
- ncerr = nf90_def_var(ncid, trim(fldName), NF90_FLOAT, &
- (/im_dimid,jm_dimid,pfull_dimid,time_dimid/), varids(i), &
- shuffle=shuffle,deflate_level=ideflate); NC_ERR_STOP(ncerr)
+ ncerr = nf90_def_var(ncid, trim(fldName), NF90_FLOAT, &
+ dimids_2d, varids(i)); NC_ERR_STOP(ncerr)
+ end if
+ else if (typekind == ESMF_TYPEKIND_R8) then
+ ncerr = nf90_def_var(ncid, trim(fldName), NF90_DOUBLE, &
+ dimids_2d, varids(i)); NC_ERR_STOP(ncerr)
+ else
+ write(0,*)'Unsupported typekind ', typekind
+ call ESMF_Finalize(endflag=ESMF_END_ABORT)
+ end if
+ else if (rank == 3) then
+ if (typekind == ESMF_TYPEKIND_R4) then
+ if (ideflate(grid_id) > 0) then
+ ! shuffle filter off for 3d fields using lossy compression
+ if (nbits(grid_id) > 0) then
+ shuffle=.false.
+ else
+ shuffle=.true.
+ end if
+ if (ichunk3d(grid_id) < 0 .or. jchunk3d(grid_id) < 0 .or. kchunk3d(grid_id) < 0) then
+ ! let netcdf lib choose chunksize
+ ncerr = nf90_def_var(ncid, trim(fldName), NF90_FLOAT, &
+ dimids_3d, varids(i), &
+ shuffle=shuffle,deflate_level=ideflate(grid_id)); NC_ERR_STOP(ncerr)
+ else
+ if (is_cubed_sphere) then
+ ncerr = nf90_def_var(ncid, trim(fldName), NF90_FLOAT, &
+ dimids_3d, varids(i), &
+ shuffle=shuffle,deflate_level=ideflate(grid_id),&
+ chunksizes=[ichunk3d(grid_id),jchunk3d(grid_id),kchunk3d(grid_id),tileCount,1]); NC_ERR_STOP(ncerr)
+ else
+ ncerr = nf90_def_var(ncid, trim(fldName), NF90_FLOAT, &
+ dimids_3d, varids(i), &
+ shuffle=shuffle,deflate_level=ideflate(grid_id),&
+ chunksizes=[ichunk3d(grid_id),jchunk3d(grid_id),kchunk3d(grid_id), 1]); NC_ERR_STOP(ncerr)
+ end if
+ end if
+ ! compression filters require collective access.
+ par_access = NF90_COLLECTIVE
else
- ncerr = nf90_def_var(ncid, trim(fldName), NF90_FLOAT, &
- (/im_dimid,jm_dimid,pfull_dimid,time_dimid/), varids(i), &
- shuffle=shuffle,deflate_level=ideflate,&
- chunksizes=(/ichunk3d,jchunk3d,kchunk3d,1/)); NC_ERR_STOP(ncerr)
- endif
+ ncerr = nf90_def_var(ncid, trim(fldName), NF90_FLOAT, &
+ dimids_3d, varids(i)); NC_ERR_STOP(ncerr)
+ end if
+ else if (typekind == ESMF_TYPEKIND_R8) then
+ ncerr = nf90_def_var(ncid, trim(fldName), NF90_DOUBLE, &
+ dimids_3d, varids(i)); NC_ERR_STOP(ncerr)
else
- ncerr = nf90_def_var(ncid, trim(fldName), NF90_FLOAT, &
- (/im_dimid,jm_dimid,pfull_dimid,time_dimid/), varids(i)); NC_ERR_STOP(ncerr)
- endif
- else if (typekind == ESMF_TYPEKIND_R8) then
- ncerr = nf90_def_var(ncid, trim(fldName), NF90_DOUBLE, &
- (/im_dimid,jm_dimid,pfull_dimid,time_dimid/), varids(i)); NC_ERR_STOP(ncerr)
- else
- write(0,*)'Unsupported typekind ', typekind
- stop
- end if
- end if
-
- ! define variable attributes
- call ESMF_AttributeGet(fcstField(i), convention="NetCDF", purpose="FV3", &
- attnestflag=ESMF_ATTNEST_OFF, Count=attcount, &
- rc=rc); ESMF_ERR_RETURN(rc)
-
- do j=1,attCount
- call ESMF_AttributeGet(fcstField(i), convention="NetCDF", purpose="FV3", &
- attnestflag=ESMF_ATTNEST_OFF, attributeIndex=j, &
- name=attName, typekind=attTypeKind, itemCount=n, &
- rc=rc); ESMF_ERR_RETURN(rc)
+ write(0,*)'Unsupported typekind ', typekind
+ call ESMF_Finalize(endflag=ESMF_END_ABORT)
+ end if
+ else
+ write(0,*)'Unsupported rank ', rank
+ call ESMF_Finalize(endflag=ESMF_END_ABORT)
+ end if
+ if (par) then
+ ncerr = nf90_var_par_access(ncid, varids(i), par_access); NC_ERR_STOP(ncerr)
+ end if
- if ( index(trim(attName),"ESMF") /= 0 ) then
- cycle
- endif
+ ! define variable attributes
+ call ESMF_AttributeGet(fcstField(i), convention="NetCDF", purpose="FV3", &
+ attnestflag=ESMF_ATTNEST_OFF, count=attCount, &
+ rc=rc); ESMF_ERR_RETURN(rc)
- if (attTypeKind==ESMF_TYPEKIND_I4) then
+ do j=1,attCount
call ESMF_AttributeGet(fcstField(i), convention="NetCDF", purpose="FV3", &
- name=trim(attName), value=varival, &
+ attnestflag=ESMF_ATTNEST_OFF, attributeIndex=j, &
+ name=attName, typekind=attTypeKind, &
rc=rc); ESMF_ERR_RETURN(rc)
- ncerr = nf90_put_att(ncid, varids(i), trim(attName), varival); NC_ERR_STOP(ncerr)
- else if (attTypeKind==ESMF_TYPEKIND_R4) then
- call ESMF_AttributeGet(fcstField(i), convention="NetCDF", purpose="FV3", &
- name=trim(attName), value=varr4val, &
- rc=rc); ESMF_ERR_RETURN(rc)
- ncerr = nf90_put_att(ncid, varids(i), trim(attName), varr4val); NC_ERR_STOP(ncerr)
+ if (index(trim(attName),"ESMF") /= 0) then
+ cycle
+ end if
- else if (attTypeKind==ESMF_TYPEKIND_R8) then
- call ESMF_AttributeGet(fcstField(i), convention="NetCDF", purpose="FV3", &
- name=trim(attName), value=varr8val, &
- rc=rc); ESMF_ERR_RETURN(rc)
- if (trim(attName) /= '_FillValue') then
- ! FIXME: _FillValue must be cast to var type for recent versions of netcdf
- ncerr = nf90_put_att(ncid, varids(i), trim(attName), varr8val); NC_ERR_STOP(ncerr)
- endif
+ if (attTypeKind==ESMF_TYPEKIND_I4) then
+ call ESMF_AttributeGet(fcstField(i), convention="NetCDF", purpose="FV3", &
+ name=trim(attName), value=varival, &
+ rc=rc); ESMF_ERR_RETURN(rc)
+ ncerr = nf90_put_att(ncid, varids(i), trim(attName), varival); NC_ERR_STOP(ncerr)
+
+ else if (attTypeKind==ESMF_TYPEKIND_R4) then
+ call ESMF_AttributeGet(fcstField(i), convention="NetCDF", purpose="FV3", &
+ name=trim(attName), value=varr4val, &
+ rc=rc); ESMF_ERR_RETURN(rc)
+ ncerr = nf90_put_att(ncid, varids(i), trim(attName), varr4val); NC_ERR_STOP(ncerr)
+
+ else if (attTypeKind==ESMF_TYPEKIND_R8) then
+ call ESMF_AttributeGet(fcstField(i), convention="NetCDF", purpose="FV3", &
+ name=trim(attName), value=varr8val, &
+ rc=rc); ESMF_ERR_RETURN(rc)
+ if (trim(attName) /= '_FillValue') then
+ ! FIXME: _FillValue must be cast to var type when using NF90_NETCDF4
+ ncerr = nf90_put_att(ncid, varids(i), trim(attName), varr8val); NC_ERR_STOP(ncerr)
+ end if
+
+ else if (attTypeKind==ESMF_TYPEKIND_CHARACTER) then
+ call ESMF_AttributeGet(fcstField(i), convention="NetCDF", purpose="FV3", &
+ name=trim(attName), value=varcval, &
+ rc=rc); ESMF_ERR_RETURN(rc)
+ ncerr = nf90_put_att(ncid, varids(i), trim(attName), trim(varcval)); NC_ERR_STOP(ncerr)
- else if (attTypeKind==ESMF_TYPEKIND_CHARACTER) then
- call ESMF_AttributeGet(fcstField(i), convention="NetCDF", purpose="FV3", &
- name=trim(attName), value=varcval, &
- rc=rc); ESMF_ERR_RETURN(rc)
- ncerr = nf90_put_att(ncid, varids(i), trim(attName), trim(varcval)); NC_ERR_STOP(ncerr)
-
- end if
-
- end do ! j=1,attCount
-
- end do ! i=1,fieldCount
-
- ! write grid_xt, grid_yt attributes
- if (trim(output_grid) == 'gaussian_grid' .or. &
- trim(output_grid) == 'global_latlon' .or. &
- trim(output_grid) == 'regional_latlon') then
- ncerr = nf90_put_att(ncid, im_varid, "long_name", "T-cell longitude"); NC_ERR_STOP(ncerr)
- ncerr = nf90_put_att(ncid, im_varid, "units", "degrees_E"); NC_ERR_STOP(ncerr)
- ncerr = nf90_put_att(ncid, jm_varid, "long_name", "T-cell latiitude"); NC_ERR_STOP(ncerr)
- ncerr = nf90_put_att(ncid, jm_varid, "units", "degrees_N"); NC_ERR_STOP(ncerr)
- else if (trim(output_grid) == 'rotated_latlon') then
- ncerr = nf90_put_att(ncid, im_varid, "long_name", "rotated T-cell longiitude"); NC_ERR_STOP(ncerr)
- ncerr = nf90_put_att(ncid, im_varid, "units", "degrees"); NC_ERR_STOP(ncerr)
- ncerr = nf90_put_att(ncid, jm_varid, "long_name", "rotated T-cell latiitude"); NC_ERR_STOP(ncerr)
- ncerr = nf90_put_att(ncid, jm_varid, "units", "degrees"); NC_ERR_STOP(ncerr)
- else if (trim(output_grid) == 'lambert_conformal') then
- ncerr = nf90_put_att(ncid, im_varid, "long_name", "x-coordinate of projection"); NC_ERR_STOP(ncerr)
- ncerr = nf90_put_att(ncid, im_varid, "units", "meters"); NC_ERR_STOP(ncerr)
- ncerr = nf90_put_att(ncid, jm_varid, "long_name", "y-coordinate of projection"); NC_ERR_STOP(ncerr)
- ncerr = nf90_put_att(ncid, jm_varid, "units", "meters"); NC_ERR_STOP(ncerr)
- endif
-
- ncerr = nf90_enddef(ncid); NC_ERR_STOP(ncerr)
+ end if
+
+ end do ! j=1,attCount
+
+ if (is_cubed_sphere) then
+ ncerr = nf90_put_att(ncid, varids(i), 'coordinates', 'lon lat'); NC_ERR_STOP(ncerr)
+ ncerr = nf90_put_att(ncid, varids(i), 'grid_mapping', 'cubed_sphere'); NC_ERR_STOP(ncerr)
+ end if
+
+ end do ! i=1,fieldCount
+
+ ncerr = nf90_enddef(ncid); NC_ERR_STOP(ncerr)
+ end if
+ ! end of define mode
+
+ !
+ ! write dimension variables and lon,lat variables
+ !
+ if (allocated(start_idx)) deallocate(start_idx)
+ if (is_cubed_sphere) then
+ allocate(start_idx(3))
+ start_idx = [start_i, start_j, my_tile]
+ else
+ allocate(start_idx(2))
+ start_idx = [start_i, start_j]
+ end if
+
+ ! write lon (lon_varid)
+ if (par) then
+ call ESMF_GridGetCoord(wrtGrid, coordDim=1, farrayPtr=array_r8, rc=rc); ESMF_ERR_RETURN(rc)
+ ncerr = nf90_put_var(ncid, lon_varid, values=array_r8, start=start_idx); NC_ERR_STOP(ncerr)
+ else
+ call ESMF_GridGetCoord(wrtGrid, coordDim=1, array=array, rc=rc); ESMF_ERR_RETURN(rc)
+ if (is_cubed_sphere) then
+ do t=1,tileCount
+ call ESMF_ArrayGather(array, array_r8_cube(:,:,t), rootPet=0, tile=t, rc=rc); ESMF_ERR_RETURN(rc)
+ end do
+ if (do_io) then
+ ncerr = nf90_put_var(ncid, lon_varid, values=array_r8_cube, start=start_idx); NC_ERR_STOP(ncerr)
+ end if
+ else
+ call ESMF_ArrayGather(array, array_r8, rootPet=0, rc=rc); ESMF_ERR_RETURN(rc)
+ if (do_io) then
+ ncerr = nf90_put_var(ncid, lon_varid, values=array_r8, start=start_idx); NC_ERR_STOP(ncerr)
+ end if
+ endif
end if
-! end of define mode
-
- ! write grid_xt, grid_yt values
- call ESMF_GridGetCoord(wrtGrid, coordDim=1, array=array, rc=rc); ESMF_ERR_RETURN(rc)
- call ESMF_ArrayGather(array, arrayr8, rootPet=0, rc=rc); ESMF_ERR_RETURN(rc)
- if (mype==0) then
- if (trim(output_grid) == 'gaussian_grid' .or. &
- trim(output_grid) == 'global_latlon' .or. &
- trim(output_grid) == 'regional_latlon') then
- ncerr = nf90_put_var(ncid, im_varid, values=arrayr8(:,1) ); NC_ERR_STOP(ncerr)
- else if (trim(output_grid) == 'rotated_latlon') then
+ istart = lbound(array_r8,1); iend = ubound(array_r8,1)
+ jstart = lbound(array_r8,2); jend = ubound(array_r8,2)
+
+ ! write grid_xt (im_varid)
+ if (do_io) then
+ allocate (x(im))
+ if (trim(output_grid(grid_id)) == 'gaussian_grid' .or. &
+ trim(output_grid(grid_id)) == 'global_latlon' .or. &
+ trim(output_grid(grid_id)) == 'regional_latlon') then
+ ncerr = nf90_put_var(ncid, im_varid, values=array_r8(:,jstart), start=[istart], count=[iend-istart+1]); NC_ERR_STOP(ncerr)
+ else if (trim(output_grid(grid_id)) == 'rotated_latlon') then
+ do i=1,im
+ x(i) = lon1(grid_id) + (lon2(grid_id)-lon1(grid_id))/(im-1) * (i-1)
+ end do
+ ncerr = nf90_put_var(ncid, im_varid, values=x); NC_ERR_STOP(ncerr)
+ else if (trim(output_grid(grid_id)) == 'lambert_conformal') then
do i=1,im
- x(i) = lon1 + (lon2-lon1)/(im-1) * (i-1)
- enddo
- ncerr = nf90_put_var(ncid, im_varid, values=x ); NC_ERR_STOP(ncerr)
- else if (trim(output_grid) == 'lambert_conformal') then
+ x(i) = dx(grid_id) * (i-1)
+ end do
+ ncerr = nf90_put_var(ncid, im_varid, values=x); NC_ERR_STOP(ncerr)
+ else if (trim(output_grid(grid_id)) == 'cubed_sphere_grid') then
do i=1,im
- x(i) = dx * (i-1)
- enddo
- ncerr = nf90_put_var(ncid, im_varid, values=x ); NC_ERR_STOP(ncerr)
+ x(i) = i
+ end do
+ ncerr = nf90_put_var(ncid, im_varid, values=x); NC_ERR_STOP(ncerr)
+ else
+ write(0,*)'unknown output_grid ', trim(output_grid(grid_id))
+ call ESMF_Finalize(endflag=ESMF_END_ABORT)
+ end if
+ end if
+
+ ! write lat (lat_varid)
+ if (par) then
+ call ESMF_GridGetCoord(wrtGrid, coordDim=2, farrayPtr=array_r8, rc=rc); ESMF_ERR_RETURN(rc)
+ ncerr = nf90_put_var(ncid, lat_varid, values=array_r8, start=start_idx); NC_ERR_STOP(ncerr)
+ else
+ call ESMF_GridGetCoord(wrtGrid, coordDim=2, array=array, rc=rc); ESMF_ERR_RETURN(rc)
+ if (is_cubed_sphere) then
+ do t=1,tileCount
+ call ESMF_ArrayGather(array, array_r8_cube(:,:,t), rootPet=0, tile=t, rc=rc); ESMF_ERR_RETURN(rc)
+ end do
+ if (do_io) then
+ ncerr = nf90_put_var(ncid, lat_varid, values=array_r8_cube, start=start_idx); NC_ERR_STOP(ncerr)
+ end if
+ else
+ call ESMF_ArrayGather(array, array_r8, rootPet=0, rc=rc); ESMF_ERR_RETURN(rc)
+ if (do_io) then
+ ncerr = nf90_put_var(ncid, lat_varid, values=array_r8, start=start_idx); NC_ERR_STOP(ncerr)
+ end if
endif
- ncerr = nf90_put_var(ncid, lon_varid, values=arrayr8 ); NC_ERR_STOP(ncerr)
- endif
-
- call ESMF_GridGetCoord(wrtGrid, coordDim=2, array=array, rc=rc); ESMF_ERR_RETURN(rc)
- call ESMF_ArrayGather(array, arrayr8, rootPet=0, rc=rc); ESMF_ERR_RETURN(rc)
- if (mype==0) then
- if (trim(output_grid) == 'gaussian_grid' .or. &
- trim(output_grid) == 'global_latlon' .or. &
- trim(output_grid) == 'regional_latlon') then
- ncerr = nf90_put_var(ncid, jm_varid, values=arrayr8(1,:) ); NC_ERR_STOP(ncerr)
- else if (trim(output_grid) == 'rotated_latlon') then
+ end if
+
+ ! write grid_yt (jm_varid)
+ if (do_io) then
+ allocate (y(jm))
+ if (trim(output_grid(grid_id)) == 'gaussian_grid' .or. &
+ trim(output_grid(grid_id)) == 'global_latlon' .or. &
+ trim(output_grid(grid_id)) == 'regional_latlon') then
+ ncerr = nf90_put_var(ncid, jm_varid, values=array_r8(istart,:), start=[jstart], count=[jend-jstart+1]); NC_ERR_STOP(ncerr)
+ else if (trim(output_grid(grid_id)) == 'rotated_latlon') then
do j=1,jm
- y(j) = lat1 + (lat2-lat1)/(jm-1) * (j-1)
- enddo
- ncerr = nf90_put_var(ncid, jm_varid, values=y ); NC_ERR_STOP(ncerr)
- else if (trim(output_grid) == 'lambert_conformal') then
+ y(j) = lat1(grid_id) + (lat2(grid_id)-lat1(grid_id))/(jm-1) * (j-1)
+ end do
+ ncerr = nf90_put_var(ncid, jm_varid, values=y); NC_ERR_STOP(ncerr)
+ else if (trim(output_grid(grid_id)) == 'lambert_conformal') then
do j=1,jm
- y(j) = dy * (j-1)
- enddo
- ncerr = nf90_put_var(ncid, jm_varid, values=y ); NC_ERR_STOP(ncerr)
- endif
- ncerr = nf90_put_var(ncid, lat_varid, values=arrayr8 ); NC_ERR_STOP(ncerr)
- endif
+ y(j) = dy(grid_id) * (j-1)
+ end do
+ ncerr = nf90_put_var(ncid, jm_varid, values=y); NC_ERR_STOP(ncerr)
+ else if (trim(output_grid(grid_id)) == 'cubed_sphere_grid') then
+ do j=1,jm
+ y(j) = j
+ end do
+ ncerr = nf90_put_var(ncid, jm_varid, values=y); NC_ERR_STOP(ncerr)
+ else
+ write(0,*)'unknown output_grid ', trim(output_grid(grid_id))
+ call ESMF_Finalize(endflag=ESMF_END_ABORT)
+ end if
+ end if
+ ! write tile (tile_varid)
+ if (do_io .and. is_cubed_sphere) then
+ ncerr = nf90_put_var(ncid, tile_varid, values=[1,2,3,4,5,6]); NC_ERR_STOP(ncerr)
+ end if
+
+ ! write variables (fields)
do i=1, fieldCount
- call ESMF_FieldGet(fcstField(i),name=fldName,typekind=typekind, rc=rc); ESMF_ERR_RETURN(rc)
+ call ESMF_FieldGet(fcstField(i),name=fldName,rank=rank,typekind=typekind, rc=rc); ESMF_ERR_RETURN(rc)
- if (fldlev(i) == 1) then
- if (typekind == ESMF_TYPEKIND_R4) then
- call ESMF_FieldGather(fcstField(i), arrayr4, rootPet=0, rc=rc); ESMF_ERR_RETURN(rc)
- if (mype==0) then
- ncerr = nf90_put_var(ncid, varids(i), values=arrayr4, start=(/1,1,1/),count=(/im,jm,1/) ); NC_ERR_STOP(ncerr)
- end if
- else if (typekind == ESMF_TYPEKIND_R8) then
- call ESMF_FieldGather(fcstField(i), arrayr8, rootPet=0, rc=rc); ESMF_ERR_RETURN(rc)
- if (mype==0) then
- ncerr = nf90_put_var(ncid, varids(i), values=arrayr8, start=(/1,1,1/),count=(/im,jm,1/) ); NC_ERR_STOP(ncerr)
- end if
+ if (rank == 2) then
+
+ if (allocated(start_idx)) deallocate(start_idx)
+ if (is_cubed_sphere) then
+ allocate(start_idx(4))
+ start_idx = [start_i,start_j,my_tile,1]
+ else
+ allocate(start_idx(3))
+ start_idx = [start_i,start_j, 1]
end if
- else if (fldlev(i) > 1) then
+
if (typekind == ESMF_TYPEKIND_R4) then
- call ESMF_FieldGather(fcstField(i), arrayr4_3d, rootPet=0, rc=rc); ESMF_ERR_RETURN(rc)
- if (mype==0) then
- if (ideflate > 0 .and. nbits > 0) then
- ! Lossy compression if nbits>0.
- ! The floating point data is quantized to improve compression
- ! See doi:10.5194/gmd-10-413-2017. The method employed
- ! here is identical to the 'scaled linear packing' method in
- ! that paper, except that the data are scaling into an arbitrary
- ! range (2**nbits-1 not just 2**16-1) and are stored as
- ! re-scaled floats instead of short integers.
- ! The zlib algorithm does almost as
- ! well packing the re-scaled floats as it does the scaled
- ! integers, and this avoids the need for the client to apply the
- ! rescaling (plus it allows the ability to adjust the packing
- ! range).
- arrayr4_3d_save = arrayr4_3d
- dataMax = maxval(arrayr4_3d); dataMin = minval(arrayr4_3d)
- arrayr4_3d = quantized(arrayr4_3d_save, nbits, dataMin, dataMax)
- ! compute max abs compression error, save as a variable
- ! attribute.
- compress_err(i) = maxval(abs(arrayr4_3d_save-arrayr4_3d))
- endif
- ncerr = nf90_put_var(ncid, varids(i), values=arrayr4_3d, start=(/1,1,1/),count=(/im,jm,lm,1/) ); NC_ERR_STOP(ncerr)
- end if
+ if (par) then
+ call ESMF_FieldGet(fcstField(i), localDe=0, farrayPtr=array_r4, rc=rc); ESMF_ERR_RETURN(rc)
+ ncerr = nf90_put_var(ncid, varids(i), values=array_r4, start=start_idx); NC_ERR_STOP(ncerr)
+ else
+ if (is_cubed_sphere) then
+ call ESMF_FieldGet(fcstField(i), array=array, rc=rc); ESMF_ERR_RETURN(rc)
+ do t=1,tileCount
+ call ESMF_ArrayGather(array, array_r4_cube(:,:,t), rootPet=0, tile=t, rc=rc); ESMF_ERR_RETURN(rc)
+ end do
+ if (do_io) then
+ ncerr = nf90_put_var(ncid, varids(i), values=array_r4_cube, start=start_idx); NC_ERR_STOP(ncerr)
+ end if
+ else
+ call ESMF_FieldGather(fcstField(i), array_r4, rootPet=0, rc=rc); ESMF_ERR_RETURN(rc)
+ if (do_io) then
+ ncerr = nf90_put_var(ncid, varids(i), values=array_r4, start=start_idx); NC_ERR_STOP(ncerr)
+ end if
+ end if
+ end if
else if (typekind == ESMF_TYPEKIND_R8) then
- call ESMF_FieldGather(fcstField(i), arrayr8_3d, rootPet=0, rc=rc); ESMF_ERR_RETURN(rc)
- if (mype==0) then
- ncerr = nf90_put_var(ncid, varids(i), values=arrayr8_3d, start=(/1,1,1/),count=(/im,jm,lm,1/) ); NC_ERR_STOP(ncerr)
- end if
+ if (par) then
+ call ESMF_FieldGet(fcstField(i), localDe=0, farrayPtr=array_r8, rc=rc); ESMF_ERR_RETURN(rc)
+ ncerr = nf90_put_var(ncid, varids(i), values=array_r8, start=start_idx); NC_ERR_STOP(ncerr)
+ else
+ if (is_cubed_sphere) then
+ call ESMF_FieldGet(fcstField(i), array=array, rc=rc); ESMF_ERR_RETURN(rc)
+ do t=1,tileCount
+ call ESMF_ArrayGather(array, array_r8_cube(:,:,t), rootPet=0, tile=t, rc=rc); ESMF_ERR_RETURN(rc)
+ end do
+ if (do_io) then
+ ncerr = nf90_put_var(ncid, varids(i), values=array_r8_cube, start=start_idx); NC_ERR_STOP(ncerr)
+ end if
+ else
+ call ESMF_FieldGather(fcstField(i), array_r8, rootPet=0, rc=rc); ESMF_ERR_RETURN(rc)
+ if (do_io) then
+ ncerr = nf90_put_var(ncid, varids(i), values=array_r8, start=start_idx); NC_ERR_STOP(ncerr)
+ end if
+ end if
+ end if
end if
- end if
+ else if (rank == 3) then
- end do
+ if (allocated(start_idx)) deallocate(start_idx)
+ if (is_cubed_sphere) then
+ allocate(start_idx(5))
+ start_idx = [start_i,start_j,1,my_tile,1]
+ else
+ allocate(start_idx(4))
+ start_idx = [start_i,start_j,1, 1]
+ end if
- if (ideflate > 0 .and. nbits > 0 .and. mype == 0) then
+ if (typekind == ESMF_TYPEKIND_R4) then
+ if (par) then
+ call ESMF_FieldGet(fcstField(i), localDe=0, farrayPtr=array_r4_3d, rc=rc); ESMF_ERR_RETURN(rc)
+ if (ideflate(grid_id) > 0 .and. nbits(grid_id) > 0) then
+ dataMax = maxval(array_r4_3d)
+ dataMin = minval(array_r4_3d)
+ call mpi_allreduce(mpi_in_place,dataMax,1,mpi_real4,mpi_max,mpi_comm,ierr)
+ call mpi_allreduce(mpi_in_place,dataMin,1,mpi_real4,mpi_min,mpi_comm,ierr)
+ call quantize_array(array_r4_3d, dataMin, dataMax, nbits(grid_id), compress_err(i))
+ call mpi_allreduce(mpi_in_place,compress_err(i),1,mpi_real4,mpi_max,mpi_comm,ierr)
+ end if
+ ncerr = nf90_put_var(ncid, varids(i), values=array_r4_3d, start=start_idx); NC_ERR_STOP(ncerr)
+ else
+ if (is_cubed_sphere) then
+ call ESMF_FieldGet(fcstField(i), array=array, rc=rc); ESMF_ERR_RETURN(rc)
+ do t=1,tileCount
+ call ESMF_ArrayGather(array, array_r4_3d_cube(:,:,:,t), rootPet=0, tile=t, rc=rc); ESMF_ERR_RETURN(rc)
+ end do
+ if (mype==0) then
+ if (ideflate(grid_id) > 0 .and. nbits(grid_id) > 0) then
+ call quantize_array(array_r4_3d_cube, minval(array_r4_3d_cube), maxval(array_r4_3d_cube), nbits(grid_id), compress_err(i))
+ end if
+ ncerr = nf90_put_var(ncid, varids(i), values=array_r4_3d_cube, start=start_idx); NC_ERR_STOP(ncerr)
+ end if
+ else
+ call ESMF_FieldGather(fcstField(i), array_r4_3d, rootPet=0, rc=rc); ESMF_ERR_RETURN(rc)
+ if (mype==0) then
+ if (ideflate(grid_id) > 0 .and. nbits(grid_id) > 0) then
+ call quantize_array(array_r4_3d, minval(array_r4_3d), maxval(array_r4_3d), nbits(grid_id), compress_err(i))
+ end if
+ ncerr = nf90_put_var(ncid, varids(i), values=array_r4_3d, start=start_idx); NC_ERR_STOP(ncerr)
+ end if
+ end if
+ end if
+ else if (typekind == ESMF_TYPEKIND_R8) then
+ if (par) then
+ call ESMF_FieldGet(fcstField(i), localDe=0, farrayPtr=array_r8_3d, rc=rc); ESMF_ERR_RETURN(rc)
+ ncerr = nf90_put_var(ncid, varids(i), values=array_r8_3d, start=start_idx); NC_ERR_STOP(ncerr)
+ else
+ if (is_cubed_sphere) then
+ call ESMF_FieldGet(fcstField(i), array=array, rc=rc); ESMF_ERR_RETURN(rc)
+ do t=1,tileCount
+ call ESMF_ArrayGather(array, array_r8_3d_cube(:,:,:,t), rootPet=0, tile=t, rc=rc); ESMF_ERR_RETURN(rc)
+ end do
+ if (mype==0) then
+ ncerr = nf90_put_var(ncid, varids(i), values=array_r8_3d_cube, start=start_idx); NC_ERR_STOP(ncerr)
+ end if
+ else
+ call ESMF_FieldGather(fcstField(i), array_r8_3d, rootPet=0, rc=rc); ESMF_ERR_RETURN(rc)
+ if (mype==0) then
+ ncerr = nf90_put_var(ncid, varids(i), values=array_r8_3d, start=start_idx); NC_ERR_STOP(ncerr)
+ end if
+ end if
+ end if
+ end if ! end typekind
+
+ else
+
+ write(0,*)'Unsupported rank ', rank
+ call ESMF_Finalize(endflag=ESMF_END_ABORT)
+
+ end if ! end rank
+
+ end do ! end fieldCount
+
+ if (ideflate(grid_id) > 0 .and. nbits(grid_id) > 0 .and. do_io) then
ncerr = nf90_redef(ncid=ncid); NC_ERR_STOP(ncerr)
do i=1, fieldCount
if (compress_err(i) > 0) then
ncerr = nf90_put_att(ncid, varids(i), 'max_abs_compression_error', compress_err(i)); NC_ERR_STOP(ncerr)
- ncerr = nf90_put_att(ncid, varids(i), 'nbits', nbits); NC_ERR_STOP(ncerr)
- endif
- enddo
+ ncerr = nf90_put_att(ncid, varids(i), 'nbits', nbits(grid_id)); NC_ERR_STOP(ncerr)
+ end if
+ end do
ncerr = nf90_enddef(ncid=ncid); NC_ERR_STOP(ncerr)
- endif
+ end if
- deallocate(arrayr4)
- deallocate(arrayr8)
- deallocate(arrayr4_3d,arrayr4_3d_save)
- deallocate(arrayr8_3d)
+ if (.not. par) then
+ deallocate(array_r4)
+ deallocate(array_r8)
+ deallocate(array_r4_3d)
+ deallocate(array_r8_3d)
+ if (is_cubed_sphere) then
+ deallocate(array_r4_cube)
+ deallocate(array_r8_cube)
+ deallocate(array_r4_3d_cube)
+ deallocate(array_r8_3d_cube)
+ end if
+ end if
+
+ if (do_io) then
+ deallocate(dimids_2d)
+ deallocate(dimids_3d)
+ end if
deallocate(fcstField)
deallocate(varids)
deallocate(compress_err)
- if (mype==0) then
- ncerr = nf90_close(ncid=ncid); NC_ERR_STOP(ncerr)
+ if (do_io) then
+ ncerr = nf90_close(ncid=ncid); NC_ERR_STOP(ncerr)
end if
end subroutine write_netcdf
-!
+
!----------------------------------------------------------------------------------------
subroutine get_global_attr(fldbundle, ncid, rc)
type(ESMF_FieldBundle), intent(in) :: fldbundle
@@ -406,21 +740,19 @@ subroutine get_global_attr(fldbundle, ncid, rc)
integer, intent(out) :: rc
! local variable
- integer :: i, attcount
+ integer :: i, attCount
integer :: ncerr
character(len=ESMF_MAXSTR) :: attName
type(ESMF_TypeKind_Flag) :: typekind
integer :: varival
- real(ESMF_KIND_R4) :: varr4val
real(ESMF_KIND_R4), dimension(:), allocatable :: varr4list
- real(ESMF_KIND_R8) :: varr8val
real(ESMF_KIND_R8), dimension(:), allocatable :: varr8list
integer :: itemCount
character(len=ESMF_MAXSTR) :: varcval
!
call ESMF_AttributeGet(fldbundle, convention="NetCDF", purpose="FV3", &
- attnestflag=ESMF_ATTNEST_OFF, Count=attcount, &
+ attnestflag=ESMF_ATTNEST_OFF, count=attCount, &
rc=rc); ESMF_ERR_RETURN(rc)
do i=1,attCount
@@ -458,7 +790,7 @@ subroutine get_global_attr(fldbundle, ncid, rc)
end do
end subroutine get_global_attr
-!
+
!----------------------------------------------------------------------------------------
subroutine get_grid_attr(grid, prefix, ncid, varid, rc)
type(ESMF_Grid), intent(in) :: grid
@@ -468,7 +800,7 @@ subroutine get_grid_attr(grid, prefix, ncid, varid, rc)
integer, intent(out) :: rc
! local variable
- integer :: i, attcount, n, ind
+ integer :: i, attCount, n, ind
integer :: ncerr
character(len=ESMF_MAXSTR) :: attName
type(ESMF_TypeKind_Flag) :: typekind
@@ -479,16 +811,14 @@ subroutine get_grid_attr(grid, prefix, ncid, varid, rc)
character(len=ESMF_MAXSTR) :: varcval
!
call ESMF_AttributeGet(grid, convention="NetCDF", purpose="FV3", &
- attnestflag=ESMF_ATTNEST_OFF, Count=attcount, &
+ attnestflag=ESMF_ATTNEST_OFF, count=attCount, &
rc=rc); ESMF_ERR_RETURN(rc)
- !write(0,*)'grid attcount = ', attcount
do i=1,attCount
call ESMF_AttributeGet(grid, convention="NetCDF", purpose="FV3", &
attnestflag=ESMF_ATTNEST_OFF, attributeIndex=i, name=attName, &
typekind=typekind, itemCount=n, rc=rc); ESMF_ERR_RETURN(rc)
- !write(0,*)'grid att = ',i,trim(attName), ' itemCount = ' , n
if (index(trim(attName), trim(prefix)//":")==1) then
ind = len(trim(prefix)//":")
@@ -507,10 +837,10 @@ subroutine get_grid_attr(grid, prefix, ncid, varid, rc)
call ESMF_AttributeGet(grid, convention="NetCDF", purpose="FV3", &
name=trim(attName), value=varr8val, rc=rc); ESMF_ERR_RETURN(rc)
if (trim(attName) /= '_FillValue') then
- ! FIXME: _FillValue must be cast to var type for recent versions
- ! of netcdf
+ ! FIXME: _FillValue must be cast to var type when using
+ ! NF90_NETCDF4. Until this is fixed, using netCDF default _FillValue.
ncerr = nf90_put_att(ncid, varid, trim(attName(ind+1:len(attName))), varr8val); NC_ERR_STOP(ncerr)
- endif
+ end if
else if (typekind==ESMF_TYPEKIND_CHARACTER) then
call ESMF_AttributeGet(grid, convention="NetCDF", purpose="FV3", &
@@ -525,6 +855,7 @@ subroutine get_grid_attr(grid, prefix, ncid, varid, rc)
end subroutine get_grid_attr
+!----------------------------------------------------------------------------------------
subroutine add_dim(ncid, dim_name, dimid, grid, rc)
integer, intent(in) :: ncid
character(len=*), intent(in) :: dim_name
@@ -533,75 +864,127 @@ subroutine add_dim(ncid, dim_name, dimid, grid, rc)
integer, intent(out) :: rc
! local variable
- integer :: i, attcount, n, dim_varid
+ integer :: n, dim_varid
integer :: ncerr
- character(len=ESMF_MAXSTR) :: attName
type(ESMF_TypeKind_Flag) :: typekind
- integer, allocatable :: valueListI(:)
real(ESMF_KIND_R4), allocatable :: valueListR4(:)
real(ESMF_KIND_R8), allocatable :: valueListR8(:)
- character(len=ESMF_MAXSTR), allocatable :: valueListC(:)
!
call ESMF_AttributeGet(grid, convention="NetCDF", purpose="FV3", &
attnestflag=ESMF_ATTNEST_OFF, name=dim_name, &
typekind=typekind, itemCount=n, rc=rc); ESMF_ERR_RETURN(rc)
- if ( trim(dim_name) == "time" ) then
- ncerr = nf90_def_dim(ncid, trim(dim_name), NF90_UNLIMITED, dimid); NC_ERR_STOP(ncerr)
+ if (trim(dim_name) == "time") then
+ ! using an unlimited dim requires collective mode (NF90_COLLECTIVE)
+ ! for parallel writes, which seems to slow things down on hera.
+ !ncerr = nf90_def_dim(ncid, trim(dim_name), NF90_UNLIMITED, dimid); NC_ERR_STOP(ncerr)
+ ncerr = nf90_def_dim(ncid, trim(dim_name), 1, dimid); NC_ERR_STOP(ncerr)
else
ncerr = nf90_def_dim(ncid, trim(dim_name), n, dimid); NC_ERR_STOP(ncerr)
end if
if (typekind==ESMF_TYPEKIND_R8) then
- ncerr = nf90_def_var(ncid, dim_name, NF90_REAL8, dimids=(/dimid/), varid=dim_varid); NC_ERR_STOP(ncerr)
+ ncerr = nf90_def_var(ncid, dim_name, NF90_REAL8, dimids=[dimid], varid=dim_varid); NC_ERR_STOP(ncerr)
allocate(valueListR8(n))
call ESMF_AttributeGet(grid, convention="NetCDF", purpose="FV3", &
name=trim(dim_name), valueList=valueListR8, rc=rc); ESMF_ERR_RETURN(rc)
ncerr = nf90_enddef(ncid=ncid); NC_ERR_STOP(ncerr)
- ncerr = nf90_put_var(ncid, dim_varid, values=valueListR8 ); NC_ERR_STOP(ncerr)
+ ncerr = nf90_put_var(ncid, dim_varid, values=valueListR8); NC_ERR_STOP(ncerr)
ncerr = nf90_redef(ncid=ncid); NC_ERR_STOP(ncerr)
deallocate(valueListR8)
else if (typekind==ESMF_TYPEKIND_R4) then
- ncerr = nf90_def_var(ncid, dim_name, NF90_REAL4, dimids=(/dimid/), varid=dim_varid); NC_ERR_STOP(ncerr)
+ ncerr = nf90_def_var(ncid, dim_name, NF90_REAL4, dimids=[dimid], varid=dim_varid); NC_ERR_STOP(ncerr)
allocate(valueListR4(n))
call ESMF_AttributeGet(grid, convention="NetCDF", purpose="FV3", &
name=trim(dim_name), valueList=valueListR4, rc=rc); ESMF_ERR_RETURN(rc)
ncerr = nf90_enddef(ncid=ncid); NC_ERR_STOP(ncerr)
- ncerr = nf90_put_var(ncid, dim_varid, values=valueListR4 ); NC_ERR_STOP(ncerr)
+ ncerr = nf90_put_var(ncid, dim_varid, values=valueListR4); NC_ERR_STOP(ncerr)
ncerr = nf90_redef(ncid=ncid); NC_ERR_STOP(ncerr)
deallocate(valueListR4)
else
write(0,*)'Error in module_write_netcdf.F90(add_dim) unknown typekind for ',trim(dim_name)
call ESMF_Finalize(endflag=ESMF_END_ABORT)
end if
+ if (par) then
+ ncerr = nf90_var_par_access(ncid, dim_varid, NF90_INDEPENDENT); NC_ERR_STOP(ncerr)
+ end if
call get_grid_attr(grid, dim_name, ncid, dim_varid, rc)
end subroutine add_dim
-!
+
!----------------------------------------------------------------------------------------
- subroutine nccheck(status)
- use netcdf
- implicit none
- integer, intent (in) :: status
-
- if (status /= nf90_noerr) then
- write(0,*) status, trim(nf90_strerror(status))
- stop "stopped"
+ subroutine quantize_array_3d(array, dataMin, dataMax, nbits, compress_err)
+
+ real(4), dimension(:,:,:), intent(inout) :: array
+ real(4), intent(in) :: dataMin, dataMax
+ integer, intent(in) :: nbits
+ real(4), intent(out) :: compress_err
+
+ real(4) :: scale_fact, offset
+ real(4), dimension(:,:,:), allocatable :: array_save
+ ! Lossy compression if nbits>0.
+ ! The floating point data is quantized to improve compression
+ ! See doi:10.5194/gmd-10-413-2017. The method employed
+ ! here is identical to the 'scaled linear packing' method in
+ ! that paper, except that the data are scaling into an arbitrary
+ ! range (2**nbits-1 not just 2**16-1) and are stored as
+ ! re-scaled floats instead of short integers.
+ ! The zlib algorithm does almost as
+ ! well packing the re-scaled floats as it does the scaled
+ ! integers, and this avoids the need for the client to apply the
+ ! rescaling (plus it allows the ability to adjust the packing
+ ! range).
+ scale_fact = (dataMax - dataMin) / (2**nbits-1)
+ offset = dataMin
+ if (scale_fact > 0.) then
+ allocate(array_save, source=array)
+ array = scale_fact*(nint((array_save - offset) / scale_fact)) + offset
+ ! compute max abs compression error
+ compress_err = maxval(abs(array_save-array))
+ deallocate(array_save)
+ else
+ ! field is constant
+ compress_err = 0.
end if
- end subroutine nccheck
-
- elemental real function quantized(dataIn, nbits, dataMin, dataMax)
- integer, intent(in) :: nbits
- real(4), intent(in) :: dataIn, dataMin, dataMax
- real(4) offset, scale_fact
- ! convert data to 32 bit integers in range 0 to 2**nbits-1, then cast
- ! cast back to 32 bit floats (data is then quantized in steps
- ! proportional to 2**nbits so last 32-nbits in floating
- ! point representation should be zero for efficient zlib compression).
- scale_fact = (dataMax - dataMin) / (2**nbits-1); offset = dataMin
- quantized = scale_fact*(nint((dataIn - offset) / scale_fact)) + offset
- end function quantized
+ end subroutine quantize_array_3d
+
+ subroutine quantize_array_4d(array, dataMin, dataMax, nbits, compress_err)
+
+ real(4), dimension(:,:,:,:), intent(inout) :: array
+ real(4), intent(in) :: dataMin, dataMax
+ integer, intent(in) :: nbits
+ real(4), intent(out) :: compress_err
+
+ real(4) :: scale_fact, offset
+ real(4), dimension(:,:,:,:), allocatable :: array_save
+
+ ! Lossy compression if nbits>0.
+ ! The floating point data is quantized to improve compression
+ ! See doi:10.5194/gmd-10-413-2017. The method employed
+ ! here is identical to the 'scaled linear packing' method in
+ ! that paper, except that the data are scaling into an arbitrary
+ ! range (2**nbits-1 not just 2**16-1) and are stored as
+ ! re-scaled floats instead of short integers.
+ ! The zlib algorithm does almost as
+ ! well packing the re-scaled floats as it does the scaled
+ ! integers, and this avoids the need for the client to apply the
+ ! rescaling (plus it allows the ability to adjust the packing
+ ! range).
+ scale_fact = (dataMax - dataMin) / (2**nbits-1)
+ offset = dataMin
+ if (scale_fact > 0.) then
+ allocate(array_save, source=array)
+ array = scale_fact*(nint((array_save - offset) / scale_fact)) + offset
+ ! compute max abs compression error
+ compress_err = maxval(abs(array_save-array))
+ deallocate(array_save)
+ else
+ ! field is constant
+ compress_err = 0.
+ end if
+ end subroutine quantize_array_4d
+!----------------------------------------------------------------------------------------
end module module_write_netcdf
diff --git a/io/module_write_netcdf_parallel.F90 b/io/module_write_netcdf_parallel.F90
deleted file mode 100644
index 0506d794a..000000000
--- a/io/module_write_netcdf_parallel.F90
+++ /dev/null
@@ -1,627 +0,0 @@
-#define ESMF_ERR_RETURN(rc) if (ESMF_LogFoundError(rc, msg="Breaking out of subroutine", line=__LINE__, file=__FILE__)) call ESMF_Finalize(endflag=ESMF_END_ABORT)
-
-#define NC_ERR_STOP(status) \
- if (status /= nf90_noerr) write(0,*) "line ", __LINE__, trim(nf90_strerror(status)); \
- if (status /= nf90_noerr) call ESMF_Finalize(endflag=ESMF_END_ABORT)
-
-module module_write_netcdf_parallel
-
- use esmf
- use netcdf
- use module_fv3_io_def,only : ideflate, nbits, &
- output_grid,dx,dy,lon1,lat1,lon2,lat2
- use mpi
-
- implicit none
- private
- public write_netcdf_parallel
-
- contains
-
-#ifdef NO_PARALLEL_NETCDF
-!----------------------------------------------------------------------------------------
- subroutine write_netcdf_parallel(fieldbundle, wrtfb, filename, mpi_comm, mype, im, jm, ichunk2d, jchunk2d, ichunk3d, jchunk3d, kchunk3d, rc)
- type(ESMF_FieldBundle), intent(in) :: fieldbundle
- type(ESMF_FieldBundle), intent(in) :: wrtfb
- character(*), intent(in) :: filename
- integer, intent(in) :: mpi_comm
- integer, intent(in) :: mype
- integer, intent(in) :: im, jm, ichunk2d, jchunk2d, &
- ichunk3d, jchunk3d, kchunk3d
- integer, optional,intent(out) :: rc
- print *,'in stub write_netcdf_parallel - model not built with parallel netcdf support, return'
- end subroutine write_netcdf_parallel
-#else
-!----------------------------------------------------------------------------------------
- subroutine write_netcdf_parallel(fieldbundle, wrtfb, filename, mpi_comm, mype, im, jm, ichunk2d, jchunk2d, ichunk3d, jchunk3d, kchunk3d, rc)
-!
- type(ESMF_FieldBundle), intent(in) :: fieldbundle
- type(ESMF_FieldBundle), intent(in) :: wrtfb
- character(*), intent(in) :: filename
- integer, intent(in) :: mpi_comm
- integer, intent(in) :: mype
- integer, intent(in) :: im, jm, ichunk2d, jchunk2d, &
- ichunk3d, jchunk3d, kchunk3d
- integer, optional,intent(out) :: rc
-!
-!** local vars
- integer :: i,j,m,n,k,istart,iend,jstart,jend,i1,i2,j1,j2,k1,k2
- integer :: lm
-
- integer, dimension(:), allocatable :: fldlev
- real(ESMF_KIND_R4), dimension(:,:), pointer :: arrayr4
- real(ESMF_KIND_R8), dimension(:,:), pointer :: arrayr8
- real(ESMF_KIND_R4), dimension(:,:,:), pointer :: arrayr4_3d,arrayr4_3d_save
- real(ESMF_KIND_R8), dimension(:,:,:), pointer :: arrayr8_3d
-
- real(8) x(im),y(jm)
- integer :: fieldCount, fieldDimCount, gridDimCount
- integer, dimension(:), allocatable :: ungriddedLBound, ungriddedUBound
-
- type(ESMF_Field), allocatable :: fcstField(:)
- type(ESMF_TypeKind_Flag) :: typekind
- type(ESMF_TypeKind_Flag) :: attTypeKind
- type(ESMF_Grid) :: wrtgrid
- type(ESMF_Array) :: array
-
- integer :: attcount
- character(len=ESMF_MAXSTR) :: attName, fldName
- integer :: totalLBound2d(2),totalUBound2d(2),totalLBound3d(3),totalUBound3d(3)
-
- integer :: varival
- real(4) :: varr4val, scale_fact, offset, dataMin, dataMax
- real(4), allocatable, dimension(:) :: compress_err
- real(8) :: varr8val
- character(len=ESMF_MAXSTR) :: varcval
-
- character(128) :: time_units
-
- integer :: ncerr,ierr
- integer :: ncid
- integer :: oldMode
- integer :: im_dimid, jm_dimid, pfull_dimid, phalf_dimid, time_dimid
- integer :: im_varid, jm_varid, lm_varid, time_varid, lon_varid, lat_varid
- integer, dimension(:), allocatable :: varids
- logical shuffle
-!
- call ESMF_FieldBundleGet(fieldbundle, fieldCount=fieldCount, rc=rc); ESMF_ERR_RETURN(rc)
-
- allocate(compress_err(fieldCount)); compress_err=-999.
- allocate(fldlev(fieldCount)) ; fldlev = 0
- allocate(fcstField(fieldCount))
- allocate(varids(fieldCount))
-
- call ESMF_FieldBundleGet(fieldbundle, fieldList=fcstField, grid=wrtGrid, &
-! itemorderflag=ESMF_ITEMORDER_ADDORDER, &
- rc=rc); ESMF_ERR_RETURN(rc)
-
- call ESMF_GridGet(wrtgrid, dimCount=gridDimCount, rc=rc); ESMF_ERR_RETURN(rc)
-
- do i=1,fieldCount
- call ESMF_FieldGet(fcstField(i), dimCount=fieldDimCount, rc=rc); ESMF_ERR_RETURN(rc)
- if (fieldDimCount > 3) then
- write(0,*)"write_netcdf: Only 2D and 3D fields are supported!"
- stop
- end if
- if (fieldDimCount > gridDimCount) then
- allocate(ungriddedLBound(fieldDimCount-gridDimCount))
- allocate(ungriddedUBound(fieldDimCount-gridDimCount))
- call ESMF_FieldGet(fcstField(i), &
- ungriddedLBound=ungriddedLBound, &
- ungriddedUBound=ungriddedUBound, rc=rc); ESMF_ERR_RETURN(rc)
- fldlev(i) = ungriddedUBound(fieldDimCount-gridDimCount) - &
- ungriddedLBound(fieldDimCount-gridDimCount) + 1
- deallocate(ungriddedLBound)
- deallocate(ungriddedUBound)
- else if (fieldDimCount == 2) then
- fldlev(i) = 1
- end if
- end do
-
- lm = maxval(fldlev(:))
-
-! create netcdf file for parallel access
-
- ncerr = nf90_create(trim(filename),&
- cmode=IOR(IOR(NF90_CLOBBER,NF90_NETCDF4),NF90_CLASSIC_MODEL),&
- comm=mpi_comm, info = MPI_INFO_NULL, ncid=ncid); NC_ERR_STOP(ncerr)
-! disable auto filling.
- ncerr = nf90_set_fill(ncid, NF90_NOFILL, oldMode); NC_ERR_STOP(ncerr)
-
- ! define dimensions
- ncerr = nf90_def_dim(ncid, "grid_xt", im, im_dimid); NC_ERR_STOP(ncerr)
- ncerr = nf90_def_dim(ncid, "grid_yt", jm, jm_dimid); NC_ERR_STOP(ncerr)
- ! define coordinate variables
- ncerr = nf90_def_var(ncid, "grid_xt", NF90_DOUBLE, im_dimid, im_varid); NC_ERR_STOP(ncerr)
- ncerr = nf90_var_par_access(ncid, im_varid, NF90_INDEPENDENT)
- ncerr = nf90_def_var(ncid, "lon", NF90_DOUBLE, (/im_dimid,jm_dimid/), lon_varid); NC_ERR_STOP(ncerr)
- !ncerr = nf90_var_par_access(ncid, lon_varid, NF90_INDEPENDENT)
- ncerr = nf90_put_att(ncid, lon_varid, "long_name", "T-cell longitude"); NC_ERR_STOP(ncerr)
- ncerr = nf90_put_att(ncid, lon_varid, "units", "degrees_E"); NC_ERR_STOP(ncerr)
- ncerr = nf90_put_att(ncid, im_varid, "cartesian_axis", "X"); NC_ERR_STOP(ncerr)
- ncerr = nf90_def_var(ncid, "grid_yt", NF90_DOUBLE, jm_dimid, jm_varid); NC_ERR_STOP(ncerr)
- ncerr = nf90_var_par_access(ncid, jm_varid, NF90_INDEPENDENT)
- ncerr = nf90_def_var(ncid, "lat", NF90_DOUBLE, (/im_dimid,jm_dimid/), lat_varid); NC_ERR_STOP(ncerr)
- ncerr = nf90_var_par_access(ncid, lat_varid, NF90_INDEPENDENT)
- ncerr = nf90_put_att(ncid, lat_varid, "long_name", "T-cell latitude"); NC_ERR_STOP(ncerr)
- ncerr = nf90_put_att(ncid, lat_varid, "units", "degrees_N"); NC_ERR_STOP(ncerr)
- ncerr = nf90_put_att(ncid, jm_varid, "cartesian_axis", "Y"); NC_ERR_STOP(ncerr)
-
- if (lm > 1) then
- call add_dim(ncid, "pfull", pfull_dimid, wrtgrid, rc)
- call add_dim(ncid, "phalf", phalf_dimid, wrtgrid, rc)
- end if
-
- call add_dim(ncid, "time", time_dimid, wrtgrid, rc)
-
- call get_global_attr(wrtfb, ncid, rc)
-
- do i=1, fieldCount
- call ESMF_FieldGet(fcstField(i), name=fldName, typekind=typekind, rc=rc); ESMF_ERR_RETURN(rc)
-
- ! define variables
- if (fldlev(i) == 1) then
- if (typekind == ESMF_TYPEKIND_R4) then
- if (ideflate > 0) then
- if (ichunk2d < 0 .or. jchunk2d < 0) then
- ! let netcdf lib choose chunksize
- ! shuffle filter on for 2d fields (lossless compression)
- ncerr = nf90_def_var(ncid, trim(fldName), NF90_FLOAT, &
- (/im_dimid,jm_dimid,time_dimid/), varids(i), &
- shuffle=.true.,deflate_level=ideflate); NC_ERR_STOP(ncerr)
- else
- ncerr = nf90_def_var(ncid, trim(fldName), NF90_FLOAT, &
- (/im_dimid,jm_dimid,time_dimid/), varids(i), &
- shuffle=.true.,deflate_level=ideflate,&
- chunksizes=(/ichunk2d,jchunk2d,1/)); NC_ERR_STOP(ncerr)
- endif
- ! compression filters require collective access.
- ncerr = nf90_var_par_access(ncid, varids(i), NF90_COLLECTIVE)
- else
- ncerr = nf90_def_var(ncid, trim(fldName), NF90_FLOAT, &
- (/im_dimid,jm_dimid,time_dimid/), varids(i)); NC_ERR_STOP(ncerr)
- ncerr = nf90_var_par_access(ncid, varids(i), NF90_INDEPENDENT)
- endif
- else if (typekind == ESMF_TYPEKIND_R8) then
- ncerr = nf90_def_var(ncid, trim(fldName), NF90_DOUBLE, &
- (/im_dimid,jm_dimid,time_dimid/), varids(i)); NC_ERR_STOP(ncerr)
- ncerr = nf90_var_par_access(ncid, varids(i), NF90_INDEPENDENT)
- else
- write(0,*)'Unsupported typekind ', typekind
- stop
- end if
- else if (fldlev(i) > 1) then
- if (typekind == ESMF_TYPEKIND_R4) then
- if (ideflate > 0) then
- ! shuffle filter off for 3d fields using lossy compression
- if (nbits > 0) then
- shuffle=.false.
- else
- shuffle=.true.
- endif
- if (ichunk3d < 0 .or. jchunk3d < 0 .or. kchunk3d < 0) then
- ! let netcdf lib choose chunksize
- ncerr = nf90_def_var(ncid, trim(fldName), NF90_FLOAT, &
- (/im_dimid,jm_dimid,pfull_dimid,time_dimid/), varids(i), &
- shuffle=shuffle,deflate_level=ideflate); NC_ERR_STOP(ncerr)
- else
- ncerr = nf90_def_var(ncid, trim(fldName), NF90_FLOAT, &
- (/im_dimid,jm_dimid,pfull_dimid,time_dimid/), varids(i), &
- shuffle=shuffle,deflate_level=ideflate,&
- chunksizes=(/ichunk3d,jchunk3d,kchunk3d,1/)); NC_ERR_STOP(ncerr)
- endif
- ! compression filters require collective access.
- ncerr = nf90_var_par_access(ncid, varids(i), NF90_COLLECTIVE)
- else
- ncerr = nf90_def_var(ncid, trim(fldName), NF90_FLOAT, &
- (/im_dimid,jm_dimid,pfull_dimid,time_dimid/), varids(i)); NC_ERR_STOP(ncerr)
- ncerr = nf90_var_par_access(ncid, varids(i), NF90_INDEPENDENT)
- endif
- else if (typekind == ESMF_TYPEKIND_R8) then
- ncerr = nf90_def_var(ncid, trim(fldName), NF90_DOUBLE, &
- (/im_dimid,jm_dimid,pfull_dimid,time_dimid/), varids(i)); NC_ERR_STOP(ncerr)
- ncerr = nf90_var_par_access(ncid, varids(i), NF90_INDEPENDENT)
- else
- write(0,*)'Unsupported typekind ', typekind
- stop
- end if
- end if
-
- ! define variable attributes
- call ESMF_AttributeGet(fcstField(i), convention="NetCDF", purpose="FV3", &
- attnestflag=ESMF_ATTNEST_OFF, Count=attcount, &
- rc=rc); ESMF_ERR_RETURN(rc)
-
- do j=1,attCount
- call ESMF_AttributeGet(fcstField(i), convention="NetCDF", purpose="FV3", &
- attnestflag=ESMF_ATTNEST_OFF, attributeIndex=j, &
- name=attName, typekind=attTypeKind, itemCount=n, &
- rc=rc); ESMF_ERR_RETURN(rc)
-
- if ( index(trim(attName),"ESMF") /= 0 ) then
- cycle
- endif
-
- if (attTypeKind==ESMF_TYPEKIND_I4) then
- call ESMF_AttributeGet(fcstField(i), convention="NetCDF", purpose="FV3", &
- name=trim(attName), value=varival, &
- rc=rc); ESMF_ERR_RETURN(rc)
- ncerr = nf90_put_att(ncid, varids(i), trim(attName), varival); NC_ERR_STOP(ncerr)
-
- else if (attTypeKind==ESMF_TYPEKIND_R4) then
- call ESMF_AttributeGet(fcstField(i), convention="NetCDF", purpose="FV3", &
- name=trim(attName), value=varr4val, &
- rc=rc); ESMF_ERR_RETURN(rc)
- ncerr = nf90_put_att(ncid, varids(i), trim(attName), varr4val); NC_ERR_STOP(ncerr)
-
- else if (attTypeKind==ESMF_TYPEKIND_R8) then
- call ESMF_AttributeGet(fcstField(i), convention="NetCDF", purpose="FV3", &
- name=trim(attName), value=varr8val, &
- rc=rc); ESMF_ERR_RETURN(rc)
- if (trim(attName) /= '_FillValue') then
- ! FIXME: _FillValue must be cast to var type when using NF90_NETCDF4
- ncerr = nf90_put_att(ncid, varids(i), trim(attName), varr8val); NC_ERR_STOP(ncerr)
- endif
-
- else if (attTypeKind==ESMF_TYPEKIND_CHARACTER) then
- call ESMF_AttributeGet(fcstField(i), convention="NetCDF", purpose="FV3", &
- name=trim(attName), value=varcval, &
- rc=rc); ESMF_ERR_RETURN(rc)
- ncerr = nf90_put_att(ncid, varids(i), trim(attName), trim(varcval)); NC_ERR_STOP(ncerr)
-
- end if
-
- end do ! j=1,attCount
-
- end do ! i=1,fieldCount
-
- ! write grid_xt, grid_yt attributes
- if (trim(output_grid) == 'gaussian_grid' .or. &
- trim(output_grid) == 'global_latlon' .or. &
- trim(output_grid) == 'regional_latlon') then
- ncerr = nf90_put_att(ncid, im_varid, "long_name", "T-cell longitude"); NC_ERR_STOP(ncerr)
- ncerr = nf90_put_att(ncid, im_varid, "units", "degrees_E"); NC_ERR_STOP(ncerr)
- ncerr = nf90_put_att(ncid, jm_varid, "long_name", "T-cell latiitude"); NC_ERR_STOP(ncerr)
- ncerr = nf90_put_att(ncid, jm_varid, "units", "degrees_N"); NC_ERR_STOP(ncerr)
- else if (trim(output_grid) == 'rotated_latlon') then
- ncerr = nf90_put_att(ncid, im_varid, "long_name", "rotated T-cell longiitude"); NC_ERR_STOP(ncerr)
- ncerr = nf90_put_att(ncid, im_varid, "units", "degrees"); NC_ERR_STOP(ncerr)
- ncerr = nf90_put_att(ncid, jm_varid, "long_name", "rotated T-cell latiitude"); NC_ERR_STOP(ncerr)
- ncerr = nf90_put_att(ncid, jm_varid, "units", "degrees"); NC_ERR_STOP(ncerr)
- else if (trim(output_grid) == 'lambert_conformal') then
- ncerr = nf90_put_att(ncid, im_varid, "long_name", "x-coordinate of projection"); NC_ERR_STOP(ncerr)
- ncerr = nf90_put_att(ncid, im_varid, "units", "meters"); NC_ERR_STOP(ncerr)
- ncerr = nf90_put_att(ncid, jm_varid, "long_name", "y-coordinate of projection"); NC_ERR_STOP(ncerr)
- ncerr = nf90_put_att(ncid, jm_varid, "units", "meters"); NC_ERR_STOP(ncerr)
- endif
-
- ncerr = nf90_enddef(ncid); NC_ERR_STOP(ncerr)
-
-! end of define mode
-
- ! write grid_xt, grid_yt values
- call ESMF_GridGetCoord(wrtGrid, coordDim=1, farrayPtr=arrayr8, rc=rc); ESMF_ERR_RETURN(rc)
- istart = lbound(arrayr8,1); iend = ubound(arrayr8,1)
- jstart = lbound(arrayr8,2); jend = ubound(arrayr8,2)
- !print *,'in write netcdf mpi dim 1',istart,iend,jstart,jend,shape(arrayr8),minval(arrayr8(:,jstart)),maxval(arrayr8(:,jstart))
-
- if (trim(output_grid) == 'gaussian_grid' .or. &
- trim(output_grid) == 'global_latlon' .or. &
- trim(output_grid) == 'regional_latlon') then
- ncerr = nf90_put_var(ncid, im_varid, values=arrayr8(:,jstart),start=(/istart/), count=(/iend-istart+1/)); NC_ERR_STOP(ncerr)
- else if (trim(output_grid) == 'rotated_latlon') then
- do i=1,im
- x(i) = lon1 + (lon2-lon1)/(im-1) * (i-1)
- enddo
- ncerr = nf90_put_var(ncid, im_varid, values=x ); NC_ERR_STOP(ncerr)
- else if (trim(output_grid) == 'lambert_conformal') then
- do i=1,im
- x(i) = dx * (i-1)
- enddo
- ncerr = nf90_put_var(ncid, im_varid, values=x ); NC_ERR_STOP(ncerr)
- endif
- ncerr = nf90_put_var(ncid, lon_varid, values=arrayr8, start=(/istart,jstart/)); NC_ERR_STOP(ncerr)
-
- call ESMF_GridGetCoord(wrtGrid, coordDim=2, farrayPtr=arrayr8, rc=rc); ESMF_ERR_RETURN(rc)
- !print *,'in write netcdf mpi dim 2',istart,iend,jstart,jend,shape(arrayr8),minval(arrayr8(istart,:)),maxval(arrayr8(istart,:))
- if (trim(output_grid) == 'gaussian_grid' .or. &
- trim(output_grid) == 'global_latlon' .or. &
- trim(output_grid) == 'regional_latlon') then
- ncerr = nf90_put_var(ncid, jm_varid, values=arrayr8(istart,:),start=(/jstart/),count=(/jend-jstart+1/)); NC_ERR_STOP(ncerr)
- else if (trim(output_grid) == 'rotated_latlon') then
- do j=1,jm
- y(j) = lat1 + (lat2-lat1)/(jm-1) * (j-1)
- enddo
- ncerr = nf90_put_var(ncid, jm_varid, values=y ); NC_ERR_STOP(ncerr)
- else if (trim(output_grid) == 'lambert_conformal') then
- do j=1,jm
- y(j) = dy * (j-1)
- enddo
- ncerr = nf90_put_var(ncid, jm_varid, values=y ); NC_ERR_STOP(ncerr)
- endif
- ncerr = nf90_put_var(ncid, lat_varid, values=arrayr8, start=(/istart,jstart/)); NC_ERR_STOP(ncerr)
-
- do i=1, fieldCount
-
- call ESMF_FieldGet(fcstField(i),name=fldName,typekind=typekind, rc=rc); ESMF_ERR_RETURN(rc)
-
- if (fldlev(i) == 1) then
- if (typekind == ESMF_TYPEKIND_R4) then
- call ESMF_FieldGet(fcstField(i), localDe=0, farrayPtr=arrayr4, totalLBound=totalLBound2d, totalUBound=totalUBound2d,rc=rc); ESMF_ERR_RETURN(rc)
- !print *,'field name=',trim(fldName),'bound=',totalLBound2d,'ubound=',totalUBound2d
- ncerr = nf90_put_var(ncid, varids(i), values=arrayr4, start=(/totalLBound2d(1),totalLBound2d(2),1/)); NC_ERR_STOP(ncerr)
- else if (typekind == ESMF_TYPEKIND_R8) then
- call ESMF_FieldGet(fcstField(i), localDe=0, farrayPtr=arrayr8, totalLBound=totalLBound2d, totalUBound=totalUBound2d,rc=rc); ESMF_ERR_RETURN(rc)
- ncerr = nf90_put_var(ncid, varids(i), values=arrayr8, start=(/totalLBound2d(1),totalLBound2d(2),1/)); NC_ERR_STOP(ncerr)
- end if
- else if (fldlev(i) > 1) then
- if (typekind == ESMF_TYPEKIND_R4) then
- call ESMF_FieldGet(fcstField(i), localDe=0, farrayPtr=arrayr4_3d, totalLBound=totalLBound3d, totalUBound=totalUBound3d,rc=rc); ESMF_ERR_RETURN(rc)
- if (ideflate > 0 .and. nbits > 0) then
- i1=totalLBound3d(1);i2=totalUBound3d(1)
- j1=totalLBound3d(2);j2=totalUBound3d(2)
- k1=totalLBound3d(3);k2=totalUBound3d(3)
- dataMax = maxval(arrayr4_3d(i1:i2,j1:j2,k1:k2))
- dataMin = minval(arrayr4_3d(i1:i2,j1:j2,k1:k2))
- call mpi_allreduce(mpi_in_place,dataMax,1,mpi_real4,mpi_max,mpi_comm,ierr)
- call mpi_allreduce(mpi_in_place,dataMin,1,mpi_real4,mpi_min,mpi_comm,ierr)
- ! Lossy compression if nbits>0.
- ! The floating point data is quantized to improve compression
- ! See doi:10.5194/gmd-10-413-2017. The method employed
- ! here is identical to the 'scaled linear packing' method in
- ! that paper, except that the data are scaling into an arbitrary
- ! range (2**nbits-1 not just 2**16-1) and are stored as
- ! re-scaled floats instead of short integers.
- ! The zlib algorithm does almost as
- ! well packing the re-scaled floats as it does the scaled
- ! integers, and this avoids the need for the client to apply the
- ! rescaling (plus it allows the ability to adjust the packing
- ! range)
- scale_fact = (dataMax - dataMin) / (2**nbits-1); offset = dataMin
- if (scale_fact > 0.) then
- allocate(arrayr4_3d_save(i1:i2,j1:j2,k1:k2))
- arrayr4_3d_save(i1:i2,j1:j2,k1:k2)=arrayr4_3d(i1:i2,j1:j2,k1:k2)
- arrayr4_3d = scale_fact*(nint((arrayr4_3d_save - offset) / scale_fact)) + offset
- ! compute max abs compression error.
- compress_err(i) = &
- maxval(abs(arrayr4_3d_save(i1:i2,j1:j2,k1:k2)-arrayr4_3d(i1:i2,j1:j2,k1:k2)))
- deallocate(arrayr4_3d_save)
- call mpi_allreduce(mpi_in_place,compress_err(i),1,mpi_real4,mpi_max,mpi_comm,ierr)
- !print *,'field name=',trim(fldName),dataMin,dataMax,compress_err(i)
- else
- ! field is constant
- compress_err(i) = 0.
- endif
- endif
- ncerr = nf90_put_var(ncid, varids(i), values=arrayr4_3d, start=(/totalLBound3d(1),totalLBound3d(2),totalLBound3d(3),1/)); NC_ERR_STOP(ncerr)
- else if (typekind == ESMF_TYPEKIND_R8) then
- call ESMF_FieldGet(fcstField(i), localDe=0, farrayPtr=arrayr8_3d, totalLBound=totalLBound3d, totalUBound=totalUBound3d,rc=rc); ESMF_ERR_RETURN(rc)
- !print *,'field name=',trim(fldName),'bound=',totalLBound3d,'ubound=',totalUBound3d
- ncerr = nf90_put_var(ncid, varids(i), values=arrayr8_3d, start=(/totalLBound3d(1),totalLBound3d(2),totalLBound3d(3),1/)); NC_ERR_STOP(ncerr)
- end if
-
- end if !end fldlev(i)
-
- end do ! end fieldCount
-
- if (ideflate > 0 .and. nbits > 0) then
- ncerr = nf90_redef(ncid=ncid); NC_ERR_STOP(ncerr)
- do i=1, fieldCount
- if (compress_err(i) > 0) then
- ncerr = nf90_put_att(ncid, varids(i), 'max_abs_compression_error', compress_err(i)); NC_ERR_STOP(ncerr)
- ncerr = nf90_put_att(ncid, varids(i), 'nbits', nbits); NC_ERR_STOP(ncerr)
- endif
- enddo
- ncerr = nf90_enddef(ncid=ncid); NC_ERR_STOP(ncerr)
- endif
-
- deallocate(fcstField)
- deallocate(varids)
- deallocate(compress_err)
-
- ncerr = nf90_close(ncid=ncid); NC_ERR_STOP(ncerr)
- !call mpi_barrier(mpi_comm,ierr)
- !print *,'netcdf parallel close, finished write_netcdf_parallel'
-
- end subroutine write_netcdf_parallel
-#endif
-
-!----------------------------------------------------------------------------------------
- subroutine get_global_attr(fldbundle, ncid, rc)
- type(ESMF_FieldBundle), intent(in) :: fldbundle
- integer, intent(in) :: ncid
- integer, intent(out) :: rc
-
-! local variable
- integer :: i, attcount
- integer :: ncerr
- character(len=ESMF_MAXSTR) :: attName
- type(ESMF_TypeKind_Flag) :: typekind
-
- integer :: varival
- real(ESMF_KIND_R4) :: varr4val
- real(ESMF_KIND_R4), dimension(:), allocatable :: varr4list
- real(ESMF_KIND_R8) :: varr8val
- real(ESMF_KIND_R8), dimension(:), allocatable :: varr8list
- integer :: itemCount
- character(len=ESMF_MAXSTR) :: varcval
-!
- call ESMF_AttributeGet(fldbundle, convention="NetCDF", purpose="FV3", &
- attnestflag=ESMF_ATTNEST_OFF, Count=attcount, &
- rc=rc); ESMF_ERR_RETURN(rc)
-
- do i=1,attCount
-
- call ESMF_AttributeGet(fldbundle, convention="NetCDF", purpose="FV3", &
- attnestflag=ESMF_ATTNEST_OFF, attributeIndex=i, name=attName, &
- typekind=typekind, itemCount=itemCount, rc=rc); ESMF_ERR_RETURN(rc)
-
- if (typekind==ESMF_TYPEKIND_I4) then
- call ESMF_AttributeGet(fldbundle, convention="NetCDF", purpose="FV3", &
- name=trim(attName), value=varival, rc=rc); ESMF_ERR_RETURN(rc)
- ncerr = nf90_put_att(ncid, NF90_GLOBAL, trim(attName), varival); NC_ERR_STOP(ncerr)
-
- else if (typekind==ESMF_TYPEKIND_R4) then
- allocate (varr4list(itemCount))
- call ESMF_AttributeGet(fldbundle, convention="NetCDF", purpose="FV3", &
- name=trim(attName), valueList=varr4list, rc=rc); ESMF_ERR_RETURN(rc)
- ncerr = nf90_put_att(ncid, NF90_GLOBAL, trim(attName), varr4list); NC_ERR_STOP(ncerr)
- deallocate(varr4list)
-
- else if (typekind==ESMF_TYPEKIND_R8) then
- allocate (varr8list(itemCount))
- call ESMF_AttributeGet(fldbundle, convention="NetCDF", purpose="FV3", &
- name=trim(attName), valueList=varr8list, rc=rc); ESMF_ERR_RETURN(rc)
- ncerr = nf90_put_att(ncid, NF90_GLOBAL, trim(attName), varr8list); NC_ERR_STOP(ncerr)
- deallocate(varr8list)
-
- else if (typekind==ESMF_TYPEKIND_CHARACTER) then
- call ESMF_AttributeGet(fldbundle, convention="NetCDF", purpose="FV3", &
- name=trim(attName), value=varcval, rc=rc); ESMF_ERR_RETURN(rc)
- ncerr = nf90_put_att(ncid, NF90_GLOBAL, trim(attName), trim(varcval)); NC_ERR_STOP(ncerr)
-
- end if
-
- end do
-
- end subroutine get_global_attr
-!
-!----------------------------------------------------------------------------------------
- subroutine get_grid_attr(grid, prefix, ncid, varid, rc)
- type(ESMF_Grid), intent(in) :: grid
- character(len=*), intent(in) :: prefix
- integer, intent(in) :: ncid
- integer, intent(in) :: varid
- integer, intent(out) :: rc
-
-! local variable
- integer :: i, attcount, n, ind
- integer :: ncerr
- character(len=ESMF_MAXSTR) :: attName
- type(ESMF_TypeKind_Flag) :: typekind
-
- integer :: varival
- real(ESMF_KIND_R4) :: varr4val
- real(ESMF_KIND_R8) :: varr8val
- character(len=ESMF_MAXSTR) :: varcval
-!
- call ESMF_AttributeGet(grid, convention="NetCDF", purpose="FV3", &
- attnestflag=ESMF_ATTNEST_OFF, Count=attcount, &
- rc=rc); ESMF_ERR_RETURN(rc)
-
- !write(0,*)'grid attcount = ', attcount
- do i=1,attCount
-
- call ESMF_AttributeGet(grid, convention="NetCDF", purpose="FV3", &
- attnestflag=ESMF_ATTNEST_OFF, attributeIndex=i, name=attName, &
- typekind=typekind, itemCount=n, rc=rc); ESMF_ERR_RETURN(rc)
- !write(0,*)'grid att = ',i,trim(attName), ' itemCount = ' , n
-
- if (index(trim(attName), trim(prefix)//":")==1) then
- ind = len(trim(prefix)//":")
-
- if (typekind==ESMF_TYPEKIND_I4) then
- call ESMF_AttributeGet(grid, convention="NetCDF", purpose="FV3", &
- name=trim(attName), value=varival, rc=rc); ESMF_ERR_RETURN(rc)
- ncerr = nf90_put_att(ncid, varid, trim(attName(ind+1:len(attName))), varival); NC_ERR_STOP(ncerr)
-
- else if (typekind==ESMF_TYPEKIND_R4) then
- call ESMF_AttributeGet(grid, convention="NetCDF", purpose="FV3", &
- name=trim(attName), value=varr4val, rc=rc); ESMF_ERR_RETURN(rc)
- ncerr = nf90_put_att(ncid, varid, trim(attName(ind+1:len(attName))), varr4val); NC_ERR_STOP(ncerr)
-
- else if (typekind==ESMF_TYPEKIND_R8) then
- call ESMF_AttributeGet(grid, convention="NetCDF", purpose="FV3", &
- name=trim(attName), value=varr8val, rc=rc); ESMF_ERR_RETURN(rc)
- if (trim(attName) /= '_FillValue') then
- ! FIXME: _FillValue must be cast to var type when using
- ! NF90_NETCDF4. Until this is fixed, using netCDF default _FillValue.
- ncerr = nf90_put_att(ncid, varid, trim(attName(ind+1:len(attName))), varr8val); NC_ERR_STOP(ncerr)
- endif
-
- else if (typekind==ESMF_TYPEKIND_CHARACTER) then
- call ESMF_AttributeGet(grid, convention="NetCDF", purpose="FV3", &
- name=trim(attName), value=varcval, rc=rc); ESMF_ERR_RETURN(rc)
- ncerr = nf90_put_att(ncid, varid, trim(attName(ind+1:len(attName))), trim(varcval)); NC_ERR_STOP(ncerr)
-
- end if
-
- end if
-
- end do
-
- end subroutine get_grid_attr
-
- subroutine add_dim(ncid, dim_name, dimid, grid, rc)
- integer, intent(in) :: ncid
- character(len=*), intent(in) :: dim_name
- integer, intent(inout) :: dimid
- type(ESMF_Grid), intent(in) :: grid
- integer, intent(out) :: rc
-
-! local variable
- integer :: i, attcount, n, dim_varid
- integer :: ncerr
- character(len=ESMF_MAXSTR) :: attName
- type(ESMF_TypeKind_Flag) :: typekind
-
- integer, allocatable :: valueListI(:)
- real(ESMF_KIND_R4), allocatable :: valueListR4(:)
- real(ESMF_KIND_R8), allocatable :: valueListR8(:)
- character(len=ESMF_MAXSTR), allocatable :: valueListC(:)
-!
- call ESMF_AttributeGet(grid, convention="NetCDF", purpose="FV3", &
- attnestflag=ESMF_ATTNEST_OFF, name=dim_name, &
- typekind=typekind, itemCount=n, rc=rc); ESMF_ERR_RETURN(rc)
-
- if ( trim(dim_name) == "time" ) then
- ! using an unlimited dim requires collective mode (NF90_COLLECTIVE)
- ! for parallel writes, which seems to slow things down on hera.
- !ncerr = nf90_def_dim(ncid, trim(dim_name), NF90_UNLIMITED, dimid); NC_ERR_STOP(ncerr)
- ncerr = nf90_def_dim(ncid, trim(dim_name), 1, dimid); NC_ERR_STOP(ncerr)
- else
- ncerr = nf90_def_dim(ncid, trim(dim_name), n, dimid); NC_ERR_STOP(ncerr)
- end if
-
- if (typekind==ESMF_TYPEKIND_R8) then
- ncerr = nf90_def_var(ncid, dim_name, NF90_REAL8, dimids=(/dimid/), varid=dim_varid); NC_ERR_STOP(ncerr)
- ncerr = nf90_var_par_access(ncid, dim_varid, NF90_INDEPENDENT)
- allocate(valueListR8(n))
- call ESMF_AttributeGet(grid, convention="NetCDF", purpose="FV3", &
- name=trim(dim_name), valueList=valueListR8, rc=rc); ESMF_ERR_RETURN(rc)
- ncerr = nf90_enddef(ncid=ncid); NC_ERR_STOP(ncerr)
- ncerr = nf90_put_var(ncid, dim_varid, values=valueListR8 ); NC_ERR_STOP(ncerr)
- ncerr = nf90_redef(ncid=ncid); NC_ERR_STOP(ncerr)
- deallocate(valueListR8)
- else if (typekind==ESMF_TYPEKIND_R4) then
- ncerr = nf90_def_var(ncid, dim_name, NF90_REAL4, dimids=(/dimid/), varid=dim_varid); NC_ERR_STOP(ncerr)
- ncerr = nf90_var_par_access(ncid, dim_varid, NF90_INDEPENDENT)
- allocate(valueListR4(n))
- call ESMF_AttributeGet(grid, convention="NetCDF", purpose="FV3", &
- name=trim(dim_name), valueList=valueListR4, rc=rc); ESMF_ERR_RETURN(rc)
- ncerr = nf90_enddef(ncid=ncid); NC_ERR_STOP(ncerr)
- ncerr = nf90_put_var(ncid, dim_varid, values=valueListR4 ); NC_ERR_STOP(ncerr)
- ncerr = nf90_redef(ncid=ncid); NC_ERR_STOP(ncerr)
- deallocate(valueListR4)
- else
- write(0,*)'Error in module_write_netcdf.F90(add_dim) unknown typekind for ',trim(dim_name)
- call ESMF_Finalize(endflag=ESMF_END_ABORT)
- end if
-
- call get_grid_attr(grid, dim_name, ncid, dim_varid, rc)
-
- end subroutine add_dim
-!
-!----------------------------------------------------------------------------------------
- subroutine nccheck(status)
- use netcdf
- implicit none
- integer, intent (in) :: status
-
- if (status /= nf90_noerr) then
- write(0,*) status, trim(nf90_strerror(status))
- stop "stopped"
- end if
- end subroutine nccheck
-
-end module module_write_netcdf_parallel
diff --git a/io/module_wrt_grid_comp.F90 b/io/module_wrt_grid_comp.F90
index 70257b8d6..0bbf2a221 100644
--- a/io/module_wrt_grid_comp.F90
+++ b/io/module_wrt_grid_comp.F90
@@ -31,7 +31,7 @@ module module_wrt_grid_comp
use write_internal_state
use module_fv3_io_def, only : num_pes_fcst, &
- n_group, num_files, app_domain, &
+ n_group, num_files, &
filename_base, output_grid, output_file, &
imo,jmo,ichunk2d,jchunk2d, &
ichunk3d,jchunk3d,kchunk3d,nbits, &
@@ -43,7 +43,6 @@ module module_wrt_grid_comp
use module_write_netcdf, only : write_netcdf
use physcons, only : pi => con_pi
use inline_post, only : inline_post_run, inline_post_getattr
- use module_write_netcdf_parallel, only : write_netcdf_parallel
!
!-----------------------------------------------------------------------
!
@@ -55,31 +54,22 @@ module module_wrt_grid_comp
!
!-----------------------------------------------------------------------
!
- real, parameter :: rdgas=287.04, grav=9.80
- real, parameter :: stndrd_atmos_ps = 101325.
- real, parameter :: stndrd_atmos_lapse = 0.0065
!
integer,save :: lead_write_task !<-- Rank of the first write task in the write group
integer,save :: last_write_task !<-- Rank of the last write task in the write group
integer,save :: ntasks !<-- # of write tasks in the current group
+ integer,save :: itasks, jtasks !<-- # of write tasks in i/j direction in the current group
- integer,save :: mytile !<-- the tile number in write task
integer,save :: wrt_mpi_comm !<-- the mpi communicator in the write comp
integer,save :: idate(7)
logical,save :: write_nsflip
- logical,save :: first_init=.false.
- logical,save :: first_run=.false.
- logical,save :: first_getlatlon=.true.
- logical,save :: first_getmaskwrt=.true. !<-- for mask the output grid of the write comp
logical,save :: change_wrtidate=.false.
!
!-----------------------------------------------------------------------
!
- type(wrt_internal_state),pointer :: wrt_int_state ! The internal state pointer.
type(ESMF_FieldBundle) :: gridFB
integer :: FBcount
character(len=esmf_maxstr),allocatable :: fcstItemNameList(:)
- real(ESMF_KIND_R4), dimension(:,:), allocatable :: maskwrt
!
!-----------------------------------------------------------------------
REAL(KIND=8) :: btim,btim0
@@ -113,15 +103,15 @@ subroutine SetServices(wrt_comp, rc)
call ESMF_GridCompSetEntryPoint(wrt_comp, ESMF_METHOD_INITIALIZE, &
userRoutine=wrt_initialize, rc=rc)
- if(rc/=0) write(*,*)'Error: write grid comp, initial'
-!
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+
call ESMF_GridCompSetEntryPoint(wrt_comp, ESMF_METHOD_RUN, &
userRoutine=wrt_run, rc=rc)
- if(rc/=0) write(*,*)'Error: write grid comp, run'
-!
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+
call ESMF_GridCompSetEntryPoint(wrt_comp, ESMF_METHOD_FINALIZE, &
userRoutine=wrt_finalize, rc=rc)
- if(rc/=0) write(*,*)'Error: write grid comp, run'
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
end subroutine SetServices
!
@@ -146,21 +136,20 @@ subroutine wrt_initialize(wrt_comp, imp_state_write, exp_state_write, clock, rc)
type(write_wrap) :: WRAP
type(wrt_internal_state),pointer :: wrt_int_state
- integer :: ISTAT, tl, i, j, n, k
+ integer :: tl, i, j, n, k
integer,dimension(2,6) :: decomptile
integer,dimension(2) :: regDecomp !define delayout for the nest grid
integer :: fieldCount
integer :: vm_mpi_comm
- character(40) :: fieldName, axesname,longname
- type(ESMF_Config) :: cf
+ character(40) :: fieldName
+ type(ESMF_Config) :: cf, cf_output_grid
type(ESMF_DELayout) :: delayout
- type(ESMF_Grid) :: wrtGrid, fcstGrid
+ type(ESMF_Grid) :: fcstGrid
+ type(ESMF_Grid), allocatable :: wrtGrid(:)
type(ESMF_Array) :: array
- type(ESMF_FieldBundle) :: fieldbdl_work
type(ESMF_Field) :: field_work, field
type(ESMF_Decomp_Flag) :: decompflagPTile(2,6)
- character(len=80) :: attrValueSList(2)
type(ESMF_StateItem_Flag), allocatable :: fcstItemTypeList(:)
type(ESMF_FieldBundle) :: fcstFB, fieldbundle
type(ESMF_Field), allocatable :: fcstField(:)
@@ -179,7 +168,7 @@ subroutine wrt_initialize(wrt_comp, imp_state_write, exp_state_write, clock, rc)
real(ESMF_KIND_R4) :: valueR4
real(ESMF_KIND_R8) :: valueR8
- integer :: attCount, axeslen, jidx, idx, noutfile
+ integer :: attCount, jidx, idx, noutfile
character(19) :: newdate
character(128) :: FBlist_outfilename(100), outfile_name
character(128),dimension(:,:), allocatable :: outfilename
@@ -189,19 +178,17 @@ subroutine wrt_initialize(wrt_comp, imp_state_write, exp_state_write, clock, rc)
real(ESMF_KIND_R8) :: rot_lon, rot_lat
real(ESMF_KIND_R8) :: geo_lon, geo_lat
real(ESMF_KIND_R8) :: lon1_r8, lat1_r8
- real(ESMF_KIND_R8) :: x1, y1, x, y, delat
+ real(ESMF_KIND_R8) :: x1, y1, x, y, delat, delon
type(ESMF_TimeInterval) :: IAU_offsetTI
- type(ESMF_DataCopy_Flag) :: copyflag=ESMF_DATACOPY_REFERENCE
-! real(8),parameter :: PI=3.14159265358979d0
+ character(256) :: cf_open, cf_close
character(256) :: gridfile
integer :: num_output_file
-
!
- logical,save :: first=.true.
logical :: lprnt
-!test
- real(ESMF_KIND_R8),dimension(:,:), pointer :: glatPtr, glonPtr
+
+ integer :: ngrids, grid_id
+ logical :: top_parent_is_global
!
!-----------------------------------------------------------------------
!***********************************************************************
@@ -243,6 +230,7 @@ subroutine wrt_initialize(wrt_comp, imp_state_write, exp_state_write, clock, rc)
! lead_write_task,'last_write_task=',last_write_task, &
! 'mype=',wrt_int_state%mype,'jidx=',jidx,' comm=',wrt_mpi_comm
!
+
!-----------------------------------------------------------------------
!*** get configuration variables
!-----------------------------------------------------------------------
@@ -265,115 +253,206 @@ subroutine wrt_initialize(wrt_comp, imp_state_write, exp_state_write, clock, rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, &
line=__LINE__, file=__FILE__)) return
+ if( wrt_int_state%write_dopost ) then
+#ifdef NO_INLINE_POST
+ rc = ESMF_RC_NOT_IMPL
+ print *,'inline post not available on this machine'
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, &
+ line=__LINE__, file=__FILE__)) return
+#endif
+ call esmf_configgetattribute(cf,wrt_int_state%post_nlunit,default=777,label='nlunit:',rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, &
+ line=__LINE__, file=__FILE__)) return
+ call ESMF_ConfigGetAttribute(config=CF,value=wrt_int_state%post_namelist,default='itag', &
+ label ='post_namelist:',rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, &
+ line=__LINE__, file=__FILE__)) return
+ endif
- ! chunksizes for netcdf_parallel
- call ESMF_ConfigGetAttribute(config=CF,value=ichunk2d,default=0,label ='ichunk2d:',rc=rc)
- call ESMF_ConfigGetAttribute(config=CF,value=jchunk2d,default=0,label ='jchunk2d:',rc=rc)
- call ESMF_ConfigGetAttribute(config=CF,value=ichunk3d,default=0,label ='ichunk3d:',rc=rc)
- call ESMF_ConfigGetAttribute(config=CF,value=jchunk3d,default=0,label ='jchunk3d:',rc=rc)
- call ESMF_ConfigGetAttribute(config=CF,value=kchunk3d,default=0,label ='kchunk3d:',rc=rc)
-
- ! zlib compression flag
- call ESMF_ConfigGetAttribute(config=CF,value=ideflate,default=0,label ='ideflate:',rc=rc)
- if (ideflate < 0) ideflate=0
-
- call ESMF_ConfigGetAttribute(config=CF,value=nbits,default=0,label ='nbits:',rc=rc)
- ! nbits quantization level for lossy compression (must be between 1 and 31)
- ! 1 is most compression, 31 is least. If outside this range, set to zero
- ! which means use lossless compression.
- if (nbits < 1 .or. nbits > 31) nbits=0 ! lossless compression (no quantization)
-! variables for I/O options
- call ESMF_ConfigGetAttribute(config=CF,value=app_domain, default="global", &
- label ='app_domain:',rc=rc)
+ allocate(output_file(num_files))
+ num_output_file = ESMF_ConfigGetLen(config=CF, label ='output_file:',rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+ if (num_files == num_output_file) then
+ call ESMF_ConfigGetAttribute(CF,valueList=output_file,label='output_file:', &
+ count=num_files, rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+ do i = 1, num_files
+ if(output_file(i) /= "netcdf" .and. output_file(i) /= "netcdf_parallel") then
+ write(0,*)"Only netcdf and netcdf_parallel are allowed for multiple values of output_file"
+ call ESMF_Finalize(endflag=ESMF_END_ABORT)
+ endif
+ enddo
+ else if ( num_output_file == 1) then
+ call ESMF_ConfigGetAttribute(CF,valuelist=output_file,label='output_file:', count=1, rc=rc)
+ output_file(1:num_files) = output_file(1)
+ else
+ output_file(1:num_files) = 'netcdf'
+ endif
+ if(lprnt) then
+ print *,'num_files=',num_files
+ do i=1,num_files
+ print *,'num_file=',i,'filename_base= ',trim(filename_base(i)),' output_file= ',trim(output_file(i))
+ enddo
+ endif
+
+ call ESMF_AttributeGet(imp_state_write, convention="NetCDF", purpose="FV3", &
+ name="ngrids", value=ngrids, rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+
+ call ESMF_AttributeGet(imp_state_write, convention="NetCDF", purpose="FV3", &
+ name="top_parent_is_global", value=top_parent_is_global, rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+
+ allocate(wrtGrid(ngrids))
+
+ allocate(output_grid(ngrids))
+
+ allocate(imo(ngrids))
+ allocate(jmo(ngrids))
+
+ allocate(cen_lon(ngrids))
+ allocate(cen_lat(ngrids))
+ allocate(lon1(ngrids))
+ allocate(lat1(ngrids))
+ allocate(lon2(ngrids))
+ allocate(lat2(ngrids))
+ allocate(dlon(ngrids))
+ allocate(dlat(ngrids))
+
+ allocate(stdlat1(ngrids))
+ allocate(stdlat2(ngrids))
+ allocate(dx(ngrids))
+ allocate(dy(ngrids))
+
+ allocate(ichunk2d(ngrids))
+ allocate(jchunk2d(ngrids))
+ allocate(ichunk3d(ngrids))
+ allocate(jchunk3d(ngrids))
+ allocate(kchunk3d(ngrids))
+ allocate(ideflate(ngrids))
+ allocate(nbits(ngrids))
+
+ do n=1, ngrids
- call ESMF_ConfigGetAttribute(config=CF, value=output_grid, label ='output_grid:',rc=rc)
+ if (n == 1) then
+ ! for top level domain look directly in cf
+ cf_output_grid = cf
+ else
+ ! for nest domains, look under specific section
+ write(cf_open,'("")') n
+ write(cf_close,'("")') n
+ cf_output_grid = ESMF_ConfigCreate(cf, openLabel=trim(cf_open), closeLabel=trim(cf_close), rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+ end if
+
+ if (allocated(wrt_int_state%lat_start_wrtgrp)) deallocate (wrt_int_state%lat_start_wrtgrp)
+ if (allocated(wrt_int_state%lat_end_wrtgrp )) deallocate (wrt_int_state%lat_end_wrtgrp )
+ if (allocated(wrt_int_state%lon_start_wrtgrp)) deallocate (wrt_int_state%lon_start_wrtgrp)
+ if (allocated(wrt_int_state%lon_end_wrtgrp )) deallocate (wrt_int_state%lon_end_wrtgrp )
+ if (allocated(wrt_int_state%latPtr) ) deallocate (wrt_int_state%latPtr)
+ if (allocated(wrt_int_state%lonPtr) ) deallocate (wrt_int_state%lonPtr)
+
+ call ESMF_ConfigGetAttribute(config=cf_output_grid, value=output_grid(n), label ='output_grid:',rc=rc)
if (lprnt) then
- print *,'output_grid=',trim(output_grid)
+ print *,'grid_id= ', n, ' output_grid= ', trim(output_grid(n))
end if
- if(trim(output_grid) == 'gaussian_grid' .or. trim(output_grid) == 'global_latlon') then
- call ESMF_ConfigGetAttribute(config=CF, value=imo, label ='imo:',rc=rc)
- call ESMF_ConfigGetAttribute(config=CF, value=jmo, label ='jmo:',rc=rc)
+ call ESMF_ConfigGetAttribute(config=CF, value=itasks,default=1,label ='itasks:',rc=rc)
+ jtasks = ntasks
+ if(itasks > 0 ) jtasks = ntasks/itasks
+ if( itasks*jtasks /= ntasks ) then
+ itasks = 1
+ jtasks = ntasks
+ endif
+
+ if(trim(output_grid(n)) == 'gaussian_grid' .or. trim(output_grid(n)) == 'global_latlon') then
+ call ESMF_ConfigGetAttribute(config=cf_output_grid, value=imo(n), label ='imo:',rc=rc)
+ call ESMF_ConfigGetAttribute(config=cf_output_grid, value=jmo(n), label ='jmo:',rc=rc)
if (lprnt) then
- print *,'imo=',imo,'jmo=',jmo
+ print *,'imo=',imo(n),'jmo=',jmo(n)
end if
- else if(trim(output_grid) == 'regional_latlon') then
- call ESMF_ConfigGetAttribute(config=CF, value=lon1, label ='lon1:',rc=rc)
- call ESMF_ConfigGetAttribute(config=CF, value=lat1, label ='lat1:',rc=rc)
- call ESMF_ConfigGetAttribute(config=CF, value=lon2, label ='lon2:',rc=rc)
- call ESMF_ConfigGetAttribute(config=CF, value=lat2, label ='lat2:',rc=rc)
- call ESMF_ConfigGetAttribute(config=CF, value=dlon, label ='dlon:',rc=rc)
- call ESMF_ConfigGetAttribute(config=CF, value=dlat, label ='dlat:',rc=rc)
- imo = (lon2-lon1)/dlon + 1
- jmo = (lat2-lat1)/dlat + 1
+ else if(trim(output_grid(n)) == 'regional_latlon') then
+ call ESMF_ConfigGetAttribute(config=cf_output_grid, value=lon1(n), label ='lon1:',rc=rc)
+ call ESMF_ConfigGetAttribute(config=cf_output_grid, value=lat1(n), label ='lat1:',rc=rc)
+ call ESMF_ConfigGetAttribute(config=cf_output_grid, value=lon2(n), label ='lon2:',rc=rc)
+ call ESMF_ConfigGetAttribute(config=cf_output_grid, value=lat2(n), label ='lat2:',rc=rc)
+ call ESMF_ConfigGetAttribute(config=cf_output_grid, value=dlon(n), label ='dlon:',rc=rc)
+ call ESMF_ConfigGetAttribute(config=cf_output_grid, value=dlat(n), label ='dlat:',rc=rc)
+ imo(n) = (lon2(n)-lon1(n))/dlon(n) + 1
+ jmo(n) = (lat2(n)-lat1(n))/dlat(n) + 1
if (lprnt) then
- print *,'lon1=',lon1,' lat1=',lat1
- print *,'lon2=',lon2,' lat2=',lat2
- print *,'dlon=',dlon,' dlat=',dlat
- print *,'imo =',imo, ' jmo=',jmo
+ print *,'lon1=',lon1(n),' lat1=',lat1(n)
+ print *,'lon2=',lon2(n),' lat2=',lat2(n)
+ print *,'dlon=',dlon(n),' dlat=',dlat(n)
+ print *,'imo =',imo(n), ' jmo =',jmo(n)
end if
- else if (trim(output_grid) == 'rotated_latlon') then
- call ESMF_ConfigGetAttribute(config=CF, value=cen_lon, label ='cen_lon:',rc=rc)
- call ESMF_ConfigGetAttribute(config=CF, value=cen_lat, label ='cen_lat:',rc=rc)
- call ESMF_ConfigGetAttribute(config=CF, value=lon1, label ='lon1:', rc=rc)
- call ESMF_ConfigGetAttribute(config=CF, value=lat1, label ='lat1:', rc=rc)
- call ESMF_ConfigGetAttribute(config=CF, value=lon2, label ='lon2:', rc=rc)
- call ESMF_ConfigGetAttribute(config=CF, value=lat2, label ='lat2:', rc=rc)
- call ESMF_ConfigGetAttribute(config=CF, value=dlon, label ='dlon:', rc=rc)
- call ESMF_ConfigGetAttribute(config=CF, value=dlat, label ='dlat:', rc=rc)
- imo = (lon2-lon1)/dlon + 1
- jmo = (lat2-lat1)/dlat + 1
+ else if (trim(output_grid(n)) == 'rotated_latlon') then
+ call ESMF_ConfigGetAttribute(config=cf_output_grid, value=cen_lon(n), label ='cen_lon:',rc=rc)
+ call ESMF_ConfigGetAttribute(config=cf_output_grid, value=cen_lat(n), label ='cen_lat:',rc=rc)
+ call ESMF_ConfigGetAttribute(config=cf_output_grid, value=lon1(n), label ='lon1:', rc=rc)
+ call ESMF_ConfigGetAttribute(config=cf_output_grid, value=lat1(n), label ='lat1:', rc=rc)
+ call ESMF_ConfigGetAttribute(config=cf_output_grid, value=lon2(n), label ='lon2:', rc=rc)
+ call ESMF_ConfigGetAttribute(config=cf_output_grid, value=lat2(n), label ='lat2:', rc=rc)
+ call ESMF_ConfigGetAttribute(config=cf_output_grid, value=dlon(n), label ='dlon:', rc=rc)
+ call ESMF_ConfigGetAttribute(config=cf_output_grid, value=dlat(n), label ='dlat:', rc=rc)
+ imo(n) = (lon2(n)-lon1(n))/dlon(n) + 1
+ jmo(n) = (lat2(n)-lat1(n))/dlat(n) + 1
if (lprnt) then
- print *,'lon1=',lon1,' lat1=',lat1
- print *,'lon2=',lon2,' lat2=',lat2
- print *,'dlon=',dlon,' dlat=',dlat
- print *,'imo =',imo, ' jmo=',jmo
+ print *,'lon1=',lon1(n),' lat1=',lat1(n)
+ print *,'lon2=',lon2(n),' lat2=',lat2(n)
+ print *,'dlon=',dlon(n),' dlat=',dlat(n)
+ print *,'imo =',imo(n), ' jmo =',jmo(n)
end if
- else if (trim(output_grid) == 'lambert_conformal') then
- call ESMF_ConfigGetAttribute(config=CF, value=cen_lon, label ='cen_lon:',rc=rc)
- call ESMF_ConfigGetAttribute(config=CF, value=cen_lat, label ='cen_lat:',rc=rc)
- call ESMF_ConfigGetAttribute(config=CF, value=stdlat1, label ='stdlat1:',rc=rc)
- call ESMF_ConfigGetAttribute(config=CF, value=stdlat2, label ='stdlat2:',rc=rc)
- call ESMF_ConfigGetAttribute(config=CF, value=imo, label ='nx:', rc=rc)
- call ESMF_ConfigGetAttribute(config=CF, value=jmo, label ='ny:', rc=rc)
- call ESMF_ConfigGetAttribute(config=CF, value=lon1, label ='lon1:', rc=rc)
- call ESMF_ConfigGetAttribute(config=CF, value=lat1, label ='lat1:', rc=rc)
- call ESMF_ConfigGetAttribute(config=CF, value=dx, label ='dx:', rc=rc)
- call ESMF_ConfigGetAttribute(config=CF, value=dy, label ='dy:', rc=rc)
+ else if (trim(output_grid(n)) == 'lambert_conformal') then
+ call ESMF_ConfigGetAttribute(config=cf_output_grid, value=cen_lon(n), label ='cen_lon:',rc=rc)
+ call ESMF_ConfigGetAttribute(config=cf_output_grid, value=cen_lat(n), label ='cen_lat:',rc=rc)
+ call ESMF_ConfigGetAttribute(config=cf_output_grid, value=stdlat1(n), label ='stdlat1:',rc=rc)
+ call ESMF_ConfigGetAttribute(config=cf_output_grid, value=stdlat2(n), label ='stdlat2:',rc=rc)
+ call ESMF_ConfigGetAttribute(config=cf_output_grid, value=imo(n), label ='nx:', rc=rc)
+ call ESMF_ConfigGetAttribute(config=cf_output_grid, value=jmo(n), label ='ny:', rc=rc)
+ call ESMF_ConfigGetAttribute(config=cf_output_grid, value=lon1(n), label ='lon1:', rc=rc)
+ call ESMF_ConfigGetAttribute(config=cf_output_grid, value=lat1(n), label ='lat1:', rc=rc)
+ call ESMF_ConfigGetAttribute(config=cf_output_grid, value=dx(n), label ='dx:', rc=rc)
+ call ESMF_ConfigGetAttribute(config=cf_output_grid, value=dy(n), label ='dy:', rc=rc)
if (lprnt) then
- print *,'cen_lon=',cen_lon,' cen_lat=',cen_lat
- print *,'stdlat1=',stdlat1,' stdlat2=',stdlat2
- print *,'lon1=',lon1,' lat1=',lat1
- print *,'nx=',imo, ' ny=',jmo
- print *,'dx=',dx,' dy=',dy
+ print *,'cen_lon=',cen_lon(n),' cen_lat=',cen_lat(n)
+ print *,'stdlat1=',stdlat1(n),' stdlat2=',stdlat2(n)
+ print *,'lon1=',lon1(n),' lat1=',lat1(n)
+ print *,'nx=',imo(n), ' ny=',jmo(n)
+ print *,'dx=',dx(n),' dy=',dy(n)
endif
endif ! output_grid
- if( wrt_int_state%write_dopost ) then
-#ifdef NO_INLINE_POST
- rc = ESMF_RC_NOT_IMPL
- print *,'inline post not available on this machine'
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, &
- line=__LINE__, file=__FILE__)) return
-#endif
- call esmf_configgetattribute(cf,wrt_int_state%post_nlunit,default=777,label='nlunit:',rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, &
- line=__LINE__, file=__FILE__)) return
- call ESMF_ConfigGetAttribute(config=CF,value=wrt_int_state%post_namelist,default='itag', &
- label ='post_namelist:',rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, &
- line=__LINE__, file=__FILE__)) return
+
+ ! chunksizes for netcdf_parallel
+ call ESMF_ConfigGetAttribute(config=CF,value=ichunk2d(n),default=0,label ='ichunk2d:',rc=rc)
+ call ESMF_ConfigGetAttribute(config=CF,value=jchunk2d(n),default=0,label ='jchunk2d:',rc=rc)
+ call ESMF_ConfigGetAttribute(config=CF,value=ichunk3d(n),default=0,label ='ichunk3d:',rc=rc)
+ call ESMF_ConfigGetAttribute(config=CF,value=jchunk3d(n),default=0,label ='jchunk3d:',rc=rc)
+ call ESMF_ConfigGetAttribute(config=CF,value=kchunk3d(n),default=0,label ='kchunk3d:',rc=rc)
+
+ ! zlib compression flag
+ call ESMF_ConfigGetAttribute(config=CF,value=ideflate(n),default=0,label ='ideflate:',rc=rc)
+ if (ideflate(n) < 0) ideflate(n)=0
+
+ call ESMF_ConfigGetAttribute(config=CF,value=nbits(n),default=0,label ='nbits:',rc=rc)
+ if (lprnt) then
+ print *,'ideflate=',ideflate(n),' nbits=',nbits(n)
+ end if
+ ! nbits quantization level for lossy compression (must be between 1 and 31)
+ ! 1 is most compression, 31 is least. If outside this range, set to zero
+ ! which means use lossless compression.
+ if (nbits(n) < 1 .or. nbits(n) > 31) nbits(n)=0 ! lossless compression (no quantization)
+
+ if (cf_output_grid /= cf) then
+ ! destroy the temporary config object created for nest domains
+ call ESMF_ConfigDestroy(config=cf_output_grid, rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
endif
-!
-!-----------------------------------------------------------------------
-!*** Create the cubed sphere grid with field on PETs
-!*** first try: Create cubed sphere grid from file
-!-----------------------------------------------------------------------
-!
- if ( trim(output_grid) == 'cubed_sphere_grid' ) then
- mytile = mod(wrt_int_state%mype,ntasks)+1
- if ( trim(app_domain) == 'global' ) then
+ if ( trim(output_grid(n)) == 'cubed_sphere_grid' ) then
+ !*** Create cubed sphere grid from file
+ if (top_parent_is_global .and. n==1) then
+ gridfile = 'grid_spec.nc' ! global top-level parent
do tl=1,6
decomptile(1,tl) = 1
decomptile(2,tl) = jidx
@@ -383,19 +462,23 @@ subroutine wrt_initialize(wrt_comp, imp_state_write, exp_state_write, clock, rc)
name="gridfile", value=gridfile, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- CALL ESMF_LogWrite("wrtComp: gridfile:"//trim(gridfile),ESMF_LOGMSG_INFO,rc=rc)
- wrtgrid = ESMF_GridCreateMosaic(filename="INPUT/"//trim(gridfile), &
+ call ESMF_LogWrite("wrtComp: gridfile:"//trim(gridfile),ESMF_LOGMSG_INFO,rc=rc)
+ wrtGrid(n) = ESMF_GridCreateMosaic(filename="INPUT/"//trim(gridfile), &
regDecompPTile=decomptile,tileFilePath="INPUT/", &
decompflagPTile=decompflagPTile, &
staggerlocList=(/ESMF_STAGGERLOC_CENTER, ESMF_STAGGERLOC_CORNER/), &
name='wrt_grid', rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
else
- if(trim(app_domain) == 'nested') then
- gridfile='grid.nest02.tile7.nc'
- else if(trim(app_domain) == 'regional') then
- gridfile='grid.tile7.halo0.nc'
- endif
+ if (top_parent_is_global) then
+ write(gridfile,'(A,I2.2,A,I1,A)') 'grid.nest', n, '.tile', n+5, '.nc'
+ else
+ if (n == 1) then
+ gridfile='grid.tile7.halo0.nc' ! regional top-level parent
+ else
+ write(gridfile,'(A,I2.2,A,I1,A)') 'grid.nest', n, '.tile', n, '.nc'
+ endif
+ end if
regDecomp(1) = 1
regDecomp(2) = ntasks
allocate(petMap(ntasks))
@@ -406,62 +489,63 @@ subroutine wrt_initialize(wrt_comp, imp_state_write, exp_state_write, clock, rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
! create the nest Grid by reading it from file but use DELayout
- wrtGrid = ESMF_GridCreate(filename="INPUT/"//trim(gridfile), &
+ call ESMF_LogWrite("wrtComp: gridfile:"//trim(gridfile),ESMF_LOGMSG_INFO,rc=rc)
+ wrtGrid(n) = ESMF_GridCreate(filename="INPUT/"//trim(gridfile), &
fileformat=ESMF_FILEFORMAT_GRIDSPEC, regDecomp=regDecomp, &
decompflag=(/ESMF_DECOMP_SYMMEDGEMAX,ESMF_DECOMP_SYMMEDGEMAX/), &
delayout=delayout, isSphere=.false., indexflag=ESMF_INDEX_DELOCAL, &
rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- print *,'in nested/regional cubed_sphere grid, regDecomp=',regDecomp,' PetMap=',petMap(1),petMap(ntasks), &
+ if (lprnt) print *,'in nested/regional cubed_sphere grid, regDecomp=',regDecomp,' PetMap=',petMap(1),petMap(ntasks), &
'gridfile=',trim(gridfile)
deallocate(petMap)
endif
- else if ( trim(output_grid) == 'gaussian_grid') then
+ else if ( trim(output_grid(n)) == 'gaussian_grid') then
- wrtgrid = ESMF_GridCreate1PeriDim(minIndex=(/1,1/), &
- maxIndex=(/imo,jmo/), regDecomp=(/1,ntasks/), &
- indexflag=ESMF_INDEX_GLOBAL, &
- name='wrt_grid',rc=rc)
-! indexflag=ESMF_INDEX_GLOBAL, coordSys=ESMF_COORDSYS_SPH_DEG
+ wrtGrid(n) = ESMF_GridCreate1PeriDim(minIndex=(/1,1/), &
+ maxIndex=(/imo(n),jmo(n)/), regDecomp=(/itasks,jtasks/), &
+ indexflag=ESMF_INDEX_GLOBAL, &
+ name='wrt_grid',rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- call ESMF_GridAddCoord(wrtgrid, staggerLoc=ESMF_STAGGERLOC_CENTER, rc=rc)
+ call ESMF_GridAddCoord(wrtGrid(n), staggerLoc=ESMF_STAGGERLOC_CENTER, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- call ESMF_GridGetCoord(wrtgrid, coordDim=1, farrayPtr=lonPtr, rc=rc)
+ call ESMF_GridGetCoord(wrtGrid(n), coordDim=1, farrayPtr=lonPtr, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- call ESMF_GridGetCoord(wrtgrid, coordDim=2, farrayPtr=latPtr, rc=rc)
+ call ESMF_GridGetCoord(wrtGrid(n), coordDim=2, farrayPtr=latPtr, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
!
- allocate(slat(jmo), lat(jmo), lon(imo))
- call splat(4, jmo, slat)
+ allocate(slat(jmo(n)), lat(jmo(n)), lon(imo(n)))
+ call splat(4, jmo(n), slat)
if(write_nsflip) then
- do j=1,jmo
+ do j=1,jmo(n)
lat(j) = asin(slat(j)) * radi
enddo
else
- do j=1,jmo
- lat(jmo-j+1) = asin(slat(j)) * radi
+ do j=1,jmo(n)
+ lat(jmo(n)-j+1) = asin(slat(j)) * radi
enddo
endif
wrt_int_state%latstart = lat(1)
- wrt_int_state%latlast = lat(jmo)
- do j=1,imo
- lon(j) = 360.d0/real(imo,8) *real(j-1,8)
+ wrt_int_state%latlast = lat(jmo(n))
+ do j=1,imo(n)
+ lon(j) = 360.d0/real(imo(n),8) *real(j-1,8)
enddo
wrt_int_state%lonstart = lon(1)
- wrt_int_state%lonlast = lon(imo)
+ wrt_int_state%lonlast = lon(imo(n))
do j=lbound(latPtr,2),ubound(latPtr,2)
do i=lbound(lonPtr,1),ubound(lonPtr,1)
- lonPtr(i,j) = 360.d0/real(imo,8) * real(i-1,8)
+ lonPtr(i,j) = 360.d0/real(imo(n),8) * real(i-1,8)
latPtr(i,j) = lat(j)
enddo
enddo
-! print *,'aft wrtgrd, Gaussian, dimi,i=',lbound(lonPtr,1),ubound(lonPtr,1), &
-! ' j=',lbound(lonPtr,2),ubound(lonPtr,2),'imo=',imo,'jmo=',jmo
+ if(lprnt) print *,'aft wrtgrd, Gaussian, dimi,i=',lbound(lonPtr,1),ubound(lonPtr,1), &
+ lbound(lonPtr,2),ubound(lonPtr,2),'j(i)=',lbound(latPtr,1),ubound(latPtr,1),&
+ ' j(j)=',lbound(latPtr,2),ubound(latPtr,2),'imo=',imo,'jmo=',jmo
! if(wrt_int_state%mype==0) print *,'aft wrtgrd, lon=',lonPtr(1:5,1), &
! 'lat=',latPtr(1,1:5),'imo,jmo=',imo,jmo
! lonPtr(lbound(lonPtr,1),ubound(lonPtr,2)),'lat=',latPtr(lbound(lonPtr,1),lbound(lonPtr,2)), &
@@ -472,12 +556,20 @@ subroutine wrt_initialize(wrt_comp, imp_state_write, exp_state_write, clock, rc)
wrt_int_state%lon_end = ubound(lonPtr,1)
allocate( wrt_int_state%lat_start_wrtgrp(wrt_int_state%petcount))
allocate( wrt_int_state%lat_end_wrtgrp (wrt_int_state%petcount))
+ allocate( wrt_int_state%lon_start_wrtgrp(wrt_int_state%petcount))
+ allocate( wrt_int_state%lon_end_wrtgrp (wrt_int_state%petcount))
call mpi_allgather(wrt_int_state%lat_start,1,MPI_INTEGER, &
wrt_int_state%lat_start_wrtgrp, 1, MPI_INTEGER, wrt_mpi_comm, rc)
call mpi_allgather(wrt_int_state%lat_end, 1,MPI_INTEGER, &
wrt_int_state%lat_end_wrtgrp, 1, MPI_INTEGER, wrt_mpi_comm, rc)
+ call mpi_allgather(wrt_int_state%lon_start,1,MPI_INTEGER, &
+ wrt_int_state%lon_start_wrtgrp, 1, MPI_INTEGER, wrt_mpi_comm, rc)
+ call mpi_allgather(wrt_int_state%lon_end, 1,MPI_INTEGER, &
+ wrt_int_state%lon_end_wrtgrp, 1, MPI_INTEGER, wrt_mpi_comm, rc)
if( lprnt ) print *,'aft wrtgrd, Gaussian, dimj_start=',wrt_int_state%lat_start_wrtgrp, &
- 'dimj_end=',wrt_int_state%lat_end_wrtgrp, 'wrt_group=',n_group
+ 'dimj_end=',wrt_int_state%lat_end_wrtgrp, 'wrt_group=',n_group, &
+ 'lon_start,end=',wrt_int_state%lon_start,wrt_int_state%lon_end, &
+ 'lat_start,end=',wrt_int_state%lat_start, wrt_int_state%lat_end
allocate( wrt_int_state%latPtr(wrt_int_state%lon_start:wrt_int_state%lon_end, &
wrt_int_state%lat_start:wrt_int_state%lat_end))
allocate( wrt_int_state%lonPtr(wrt_int_state%lon_start:wrt_int_state%lon_end, &
@@ -488,60 +580,63 @@ subroutine wrt_initialize(wrt_comp, imp_state_write, exp_state_write, clock, rc)
wrt_int_state%lonPtr(i,j) = lonPtr(i,j)
enddo
enddo
- wrt_int_state%im = imo
- wrt_int_state%jm = jmo
+ wrt_int_state%im = imo(n)
+ wrt_int_state%jm = jmo(n)
wrt_int_state%post_maptype = 4
- deallocate(slat)
- else if ( trim(output_grid) == 'global_latlon') then
- wrtgrid = ESMF_GridCreate1PeriDim(minIndex=(/1,1/), &
- maxIndex=(/imo,jmo/), regDecomp=(/1,ntasks/), &
- indexflag=ESMF_INDEX_GLOBAL, name='wrt_grid',rc=rc)
+ deallocate(slat, lat, lon)
+
+ else if ( trim(output_grid(n)) == 'global_latlon') then
+ wrtGrid(n) = ESMF_GridCreate1PeriDim(minIndex=(/1,1/), &
+ maxIndex=(/imo(n),jmo(n)/), regDecomp=(/itasks,jtasks/), &
+ indexflag=ESMF_INDEX_GLOBAL, &
+ name='wrt_grid',rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- call ESMF_GridAddCoord(wrtgrid, staggerLoc=ESMF_STAGGERLOC_CENTER, rc=rc)
+ call ESMF_GridAddCoord(wrtGrid(n), staggerLoc=ESMF_STAGGERLOC_CENTER, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- call ESMF_GridGetCoord(wrtgrid, coordDim=1, farrayPtr=lonPtr, rc=rc)
+ call ESMF_GridGetCoord(wrtGrid(n), coordDim=1, farrayPtr=lonPtr, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- call ESMF_GridGetCoord(wrtgrid, coordDim=2, farrayPtr=latPtr, rc=rc)
+ call ESMF_GridGetCoord(wrtGrid(n), coordDim=2, farrayPtr=latPtr, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
!
- allocate(lat(jmo), lon(imo))
- if (mod(jmo,2) == 0) then
+ allocate(lat(jmo(n)), lon(imo(n)))
+ if (mod(jmo(n),2) == 0) then
! if jmo even, lats do not include poles and equator
- delat = 180.d0/real(jmo,8)
+ delat = 180.d0/real(jmo(n),8)
if(write_nsflip) then
- do j=1,jmo
+ do j=1,jmo(n)
lat(j) = 90.d0 - 0.5*delat - real(j-1,8)*delat
enddo
else
- do j=1,jmo
+ do j=1,jmo(n)
lat(j) = -90.d0 + 0.5*delat + real(j-1,8)*delat
enddo
endif
else
! if jmo odd, lats include poles and equator
- delat = 180.d0/real(jmo-1,8)
+ delat = 180.d0/real(jmo(n)-1,8)
if(write_nsflip) then
- do j=1,jmo
+ do j=1,jmo(n)
lat(j) = 90.d0 - real(j-1,8)*delat
enddo
else
- do j=1,jmo
+ do j=1,jmo(n)
lat(j) = -90.d0 + real(j-1,8)*delat
enddo
endif
endif
wrt_int_state%latstart = lat(1)
- wrt_int_state%latlast = lat(jmo)
- do i=1,imo
- lon(i) = 360.d0/real(imo,8) *real(i-1,8)
+ wrt_int_state%latlast = lat(jmo(n))
+ delon = 360.d0/real(imo(n),8)
+ do i=1,imo(n)
+ lon(i) = real(i-1,8)*delon
enddo
wrt_int_state%lonstart = lon(1)
- wrt_int_state%lonlast = lon(imo)
+ wrt_int_state%lonlast = lon(imo(n))
do j=lbound(latPtr,2),ubound(latPtr,2)
do i=lbound(lonPtr,1),ubound(lonPtr,1)
lonPtr(i,j) = lon(i)
@@ -552,12 +647,24 @@ subroutine wrt_initialize(wrt_comp, imp_state_write, exp_state_write, clock, rc)
wrt_int_state%lat_end = ubound(latPtr,2)
wrt_int_state%lon_start = lbound(lonPtr,1)
wrt_int_state%lon_end = ubound(lonPtr,1)
+ lon1(n) = wrt_int_state%lonstart
+ lon2(n) = wrt_int_state%lonlast
+ lat1(n) = wrt_int_state%latstart
+ lat2(n) = wrt_int_state%latlast
+ dlon(n) = delon
+ dlat(n) = delat
allocate( wrt_int_state%lat_start_wrtgrp(wrt_int_state%petcount))
allocate( wrt_int_state%lat_end_wrtgrp (wrt_int_state%petcount))
+ allocate( wrt_int_state%lon_start_wrtgrp(wrt_int_state%petcount))
+ allocate( wrt_int_state%lon_end_wrtgrp (wrt_int_state%petcount))
call mpi_allgather(wrt_int_state%lat_start,1,MPI_INTEGER, &
wrt_int_state%lat_start_wrtgrp, 1, MPI_INTEGER, wrt_mpi_comm, rc)
call mpi_allgather(wrt_int_state%lat_end, 1,MPI_INTEGER, &
wrt_int_state%lat_end_wrtgrp, 1, MPI_INTEGER, wrt_mpi_comm, rc)
+ call mpi_allgather(wrt_int_state%lon_start,1,MPI_INTEGER, &
+ wrt_int_state%lon_start_wrtgrp, 1, MPI_INTEGER, wrt_mpi_comm, rc)
+ call mpi_allgather(wrt_int_state%lon_end, 1,MPI_INTEGER, &
+ wrt_int_state%lon_end_wrtgrp, 1, MPI_INTEGER, wrt_mpi_comm, rc)
if( lprnt ) print *,'aft wrtgrd, latlon, dimj_start=',wrt_int_state%lat_start_wrtgrp, &
'dimj_end=',wrt_int_state%lat_end_wrtgrp, 'wrt_group=',n_group
allocate( wrt_int_state%latPtr(wrt_int_state%lon_start:wrt_int_state%lon_end, &
@@ -570,62 +677,64 @@ subroutine wrt_initialize(wrt_comp, imp_state_write, exp_state_write, clock, rc)
wrt_int_state%lonPtr(i,j) = lonPtr(i,j)
enddo
enddo
- wrt_int_state%im = imo
- wrt_int_state%jm = jmo
+ wrt_int_state%im = imo(n)
+ wrt_int_state%jm = jmo(n)
wrt_int_state%post_maptype = 0
- else if ( trim(output_grid) == 'regional_latlon' .or. &
- trim(output_grid) == 'rotated_latlon' .or. &
- trim(output_grid) == 'lambert_conformal' ) then
+ deallocate(lat, lon)
- wrtgrid = ESMF_GridCreate1PeriDim(minIndex=(/1,1/), &
- maxIndex=(/imo,jmo/), regDecomp=(/1,ntasks/), &
- indexflag=ESMF_INDEX_GLOBAL, &
- name='wrt_grid',rc=rc)
+ else if ( trim(output_grid(n)) == 'regional_latlon' .or. &
+ trim(output_grid(n)) == 'rotated_latlon' .or. &
+ trim(output_grid(n)) == 'lambert_conformal' ) then
+
+ wrtGrid(n) = ESMF_GridCreateNoPeriDim(minIndex=(/1,1/), &
+ maxIndex=(/imo(n),jmo(n)/), regDecomp=(/itasks,jtasks/), &
+ indexflag=ESMF_INDEX_GLOBAL, &
+ name='wrt_grid',rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- call ESMF_GridAddCoord(wrtgrid, staggerLoc=ESMF_STAGGERLOC_CENTER, rc=rc)
+ call ESMF_GridAddCoord(wrtGrid(n), staggerLoc=ESMF_STAGGERLOC_CENTER, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- call ESMF_GridGetCoord(wrtgrid, coordDim=1, farrayPtr=lonPtr, rc=rc)
+ call ESMF_GridGetCoord(wrtGrid(n), coordDim=1, farrayPtr=lonPtr, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- call ESMF_GridGetCoord(wrtgrid, coordDim=2, farrayPtr=latPtr, rc=rc)
+ call ESMF_GridGetCoord(wrtGrid(n), coordDim=2, farrayPtr=latPtr, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- wrt_int_state%im = imo
- wrt_int_state%jm = jmo
- if ( trim(output_grid) == 'regional_latlon' ) then
+ wrt_int_state%im = imo(n)
+ wrt_int_state%jm = jmo(n)
+ if ( trim(output_grid(n)) == 'regional_latlon' ) then
do j=lbound(lonPtr,2),ubound(lonPtr,2)
do i=lbound(lonPtr,1),ubound(lonPtr,1)
- lonPtr(i,j) = lon1 + (lon2-lon1)/(imo-1) * (i-1)
- latPtr(i,j) = lat1 + (lat2-lat1)/(jmo-1) * (j-1)
+ lonPtr(i,j) = lon1(n) + (lon2(n)-lon1(n))/(imo(n)-1) * (i-1)
+ latPtr(i,j) = lat1(n) + (lat2(n)-lat1(n))/(jmo(n)-1) * (j-1)
enddo
enddo
wrt_int_state%post_maptype = 0
- else if ( trim(output_grid) == 'rotated_latlon' ) then
+ else if ( trim(output_grid(n)) == 'rotated_latlon' ) then
do j=lbound(lonPtr,2),ubound(lonPtr,2)
do i=lbound(lonPtr,1),ubound(lonPtr,1)
- rot_lon = lon1 + (lon2-lon1)/(imo-1) * (i-1)
- rot_lat = lat1 + (lat2-lat1)/(jmo-1) * (j-1)
- call rtll(rot_lon, rot_lat, geo_lon, geo_lat, dble(cen_lon), dble(cen_lat))
+ rot_lon = lon1(n) + (lon2(n)-lon1(n))/(imo(n)-1) * (i-1)
+ rot_lat = lat1(n) + (lat2(n)-lat1(n))/(jmo(n)-1) * (j-1)
+ call rtll(rot_lon, rot_lat, geo_lon, geo_lat, dble(cen_lon(n)), dble(cen_lat(n)))
if (geo_lon < 0.0) geo_lon = geo_lon + 360.0
lonPtr(i,j) = geo_lon
latPtr(i,j) = geo_lat
enddo
enddo
wrt_int_state%post_maptype = 207
- else if ( trim(output_grid) == 'lambert_conformal' ) then
- lon1_r8 = dble(lon1)
- lat1_r8 = dble(lat1)
- call lambert(dble(stdlat1),dble(stdlat2),dble(cen_lat),dble(cen_lon), &
+ else if ( trim(output_grid(n)) == 'lambert_conformal' ) then
+ lon1_r8 = dble(lon1(n))
+ lat1_r8 = dble(lat1(n))
+ call lambert(dble(stdlat1(n)),dble(stdlat2(n)),dble(cen_lat(n)),dble(cen_lon(n)), &
lon1_r8,lat1_r8,x1,y1, 1)
do j=lbound(lonPtr,2),ubound(lonPtr,2)
do i=lbound(lonPtr,1),ubound(lonPtr,1)
- x = x1 + dx * (i-1)
- y = y1 + dy * (j-1)
- call lambert(dble(stdlat1),dble(stdlat2),dble(cen_lat),dble(cen_lon), &
+ x = x1 + dx(n) * (i-1)
+ y = y1 + dy(n) * (j-1)
+ call lambert(dble(stdlat1(n)),dble(stdlat2(n)),dble(cen_lat(n)),dble(cen_lon(n)), &
geo_lon,geo_lat,x,y,-1)
if (geo_lon <0.0) geo_lon = geo_lon + 360.0
lonPtr(i,j) = geo_lon
@@ -641,10 +750,16 @@ subroutine wrt_initialize(wrt_comp, imp_state_write, exp_state_write, clock, rc)
wrt_int_state%lon_end = ubound(lonPtr,1)
allocate( wrt_int_state%lat_start_wrtgrp(wrt_int_state%petcount))
allocate( wrt_int_state%lat_end_wrtgrp (wrt_int_state%petcount))
+ allocate( wrt_int_state%lon_start_wrtgrp(wrt_int_state%petcount))
+ allocate( wrt_int_state%lon_end_wrtgrp (wrt_int_state%petcount))
call mpi_allgather(wrt_int_state%lat_start,1,MPI_INTEGER, &
- wrt_int_state%lat_start_wrtgrp, 1, MPI_INTEGER, wrt_mpi_comm, rc)
+ wrt_int_state%lat_start_wrtgrp, 1, MPI_INTEGER, wrt_mpi_comm, rc)
call mpi_allgather(wrt_int_state%lat_end, 1,MPI_INTEGER, &
wrt_int_state%lat_end_wrtgrp, 1, MPI_INTEGER, wrt_mpi_comm, rc)
+ call mpi_allgather(wrt_int_state%lon_start,1,MPI_INTEGER, &
+ wrt_int_state%lon_start_wrtgrp, 1, MPI_INTEGER, wrt_mpi_comm, rc)
+ call mpi_allgather(wrt_int_state%lon_end, 1,MPI_INTEGER, &
+ wrt_int_state%lon_end_wrtgrp, 1, MPI_INTEGER, wrt_mpi_comm, rc)
allocate( wrt_int_state%latPtr(wrt_int_state%lon_start:wrt_int_state%lon_end, &
wrt_int_state%lat_start:wrt_int_state%lat_end))
allocate( wrt_int_state%lonPtr(wrt_int_state%lon_start:wrt_int_state%lon_end, &
@@ -658,11 +773,13 @@ subroutine wrt_initialize(wrt_comp, imp_state_write, exp_state_write, clock, rc)
else
- write(0,*)"wrt_initialize: Unknown output_grid ", trim(output_grid)
- call ESMF_LogWrite("wrt_initialize: Unknown output_grid "//trim(output_grid),ESMF_LOGMSG_ERROR,rc=RC)
+ write(0,*)"wrt_initialize: Unknown output_grid ", trim(output_grid(n))
+ call ESMF_LogWrite("wrt_initialize: Unknown output_grid "//trim(output_grid(n)),ESMF_LOGMSG_ERROR,rc=RC)
call ESMF_Finalize(endflag=ESMF_END_ABORT)
endif
+
+ end do ! n = 1, ngrids
!
!-----------------------------------------------------------------------
!*** get write grid component initial time from clock
@@ -686,17 +803,9 @@ subroutine wrt_initialize(wrt_comp, imp_state_write, exp_state_write, clock, rc)
m=idate(5),s=idate(6),rc=rc)
wrt_int_state%idate = idate
change_wrtidate = .true.
- if (lprnt) print *,'in wrt initial, with iau, io_baseline time=',idate,'rc=',rc
+ if (lprnt) print *,'in wrt initial, with iau, io_baseline time=',idate,'rc=',rc
endif
!
-! Create field bundle
-!-------------------------------------------------------------------
-!
-!--- check grid dim count first
- call ESMF_GridGet(wrtgrid, dimCount=gridDimCount, rc=rc)
-
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
-!
!--- Look at the incoming FieldBundles in the imp_state_write, and mirror them
!
call ESMF_StateGet(imp_state_write, itemCount=FBCount, rc=rc)
@@ -716,12 +825,14 @@ subroutine wrt_initialize(wrt_comp, imp_state_write, exp_state_write, clock, rc)
outfilename = ''
call ESMF_StateGet(imp_state_write, itemNameList=fcstItemNameList, &
- itemTypeList=fcstItemTypeList, rc=rc)
+ itemTypeList=fcstItemTypeList, &
+ !itemorderflag=ESMF_ITEMORDER_ADDORDER, &
+ rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
!loop over all items in the imp_state_write and collect all FieldBundles
- do i=1, FBcount
+ do i=1, FBCount
if (fcstItemTypeList(i) == ESMF_STATEITEM_FIELDBUNDLE) then
@@ -730,6 +841,15 @@ subroutine wrt_initialize(wrt_comp, imp_state_write, exp_state_write, clock, rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+ call ESMF_AttributeGet(fcstFB, convention="NetCDF", purpose="FV3", &
+ name="grid_id", value=grid_id, rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+
+!--- check grid dim count first
+ call ESMF_GridGet(wrtGrid(grid_id), dimCount=gridDimCount, rc=rc)
+
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+
! create a mirror FieldBundle and add it to importState
fieldbundle = ESMF_FieldBundleCreate(name="mirror_"//trim(fcstItemNameList(i)), rc=rc)
@@ -772,7 +892,7 @@ subroutine wrt_initialize(wrt_comp, imp_state_write, exp_state_write, clock, rc)
call ESMF_FieldGet(fcstField(j), gridToFieldMap=gridToFieldMap, &
ungriddedLBound=ungriddedLBound, ungriddedUBound=ungriddedUBound, &
rc=rc)
- CALL ESMF_LogWrite("after field create on wrt comp",ESMF_LOGMSG_INFO,rc=RC)
+ call ESMF_LogWrite("after field create on wrt comp",ESMF_LOGMSG_INFO,rc=RC)
! if (lprnt) print *,'in wrt,fcstfld,fieldname=', &
! trim(fieldname),'fieldDimCount=',fieldDimCount,'gridDimCount=',gridDimCount, &
@@ -783,12 +903,12 @@ subroutine wrt_initialize(wrt_comp, imp_state_write, exp_state_write, clock, rc)
! create the mirror field
- CALL ESMF_LogWrite("call field create on wrt comp",ESMF_LOGMSG_INFO,rc=RC)
- field_work = ESMF_FieldCreate(wrtGrid, typekind, name=fieldName, &
+ call ESMF_LogWrite("call field create on wrt comp",ESMF_LOGMSG_INFO,rc=RC)
+ field_work = ESMF_FieldCreate(wrtGrid(grid_id), typekind, name=fieldName, &
gridToFieldMap=gridToFieldMap, &
ungriddedLBound=ungriddedLBound, &
ungriddedUBound=ungriddedUBound, rc=rc)
- CALL ESMF_LogWrite("aft call field create on wrt comp",ESMF_LOGMSG_INFO,rc=RC)
+ call ESMF_LogWrite("aft call field create on wrt comp",ESMF_LOGMSG_INFO,rc=RC)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
@@ -803,11 +923,11 @@ subroutine wrt_initialize(wrt_comp, imp_state_write, exp_state_write, clock, rc)
name="output_file", value=outfile_name, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- CALL ESMF_LogWrite("bf fcstfield, get output_file "//trim(outfile_name)//" "//trim(fieldName),ESMF_LOGMSG_INFO,rc=RC)
+ call ESMF_LogWrite("bf fcstfield, get output_file "//trim(outfile_name)//" "//trim(fieldName),ESMF_LOGMSG_INFO,rc=RC)
if (trim(outfile_name) /= '') then
outfilename(j,i) = trim(outfile_name)
endif
- CALL ESMF_LogWrite("af fcstfield, get output_file",ESMF_LOGMSG_INFO,rc=RC)
+ call ESMF_LogWrite("af fcstfield, get output_file",ESMF_LOGMSG_INFO,rc=RC)
! if (lprnt) print *,' i=',i,' j=',j,' outfilename=',trim(outfilename(j,i))
@@ -820,7 +940,7 @@ subroutine wrt_initialize(wrt_comp, imp_state_write, exp_state_write, clock, rc)
deallocate(gridToFieldMap, ungriddedLBound, ungriddedUBound)
enddo
!
- call ESMF_AttributeCopy(fcstGrid, wrtGrid, &
+ call ESMF_AttributeCopy(fcstGrid, wrtGrid(grid_id), &
attcopy=ESMF_ATTCOPY_REFERENCE, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
@@ -845,7 +965,7 @@ subroutine wrt_initialize(wrt_comp, imp_state_write, exp_state_write, clock, rc)
!
!create output field bundles
- allocate(wrt_int_state%wrtFB(num_files))
+ allocate(wrt_int_state%wrtFB(wrt_int_state%FBcount))
do i=1, wrt_int_state%FBcount
wrt_int_state%wrtFB_names(i) = trim(FBlist_outfilename(i))
@@ -858,13 +978,17 @@ subroutine wrt_initialize(wrt_comp, imp_state_write, exp_state_write, clock, rc)
call ESMF_StateGet(imp_state_write, itemName="mirror_"//trim(fcstItemNameList(n)), &
fieldbundle=fcstFB, rc=rc)
- if( index(trim(fcstItemNameList(n)),trim(FBlist_outfilename(i))) > 0 ) then
+ if( index(trim(fcstItemNameList(n)),trim(FBlist_outfilename(i))) == 1 ) then
!
! copy the mirror fcstfield bundle Attributes to the output field bundle
call ESMF_AttributeCopy(fcstFB, wrt_int_state%wrtFB(i), &
attcopy=ESMF_ATTCOPY_REFERENCE, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+
+ call ESMF_AttributeGet(wrt_int_state%wrtFB(i), convention="NetCDF", purpose="FV3", &
+ name="grid_id", value=grid_id, rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
!
call ESMF_FieldBundleGet(fcstFB, fieldCount=fieldCount, rc=rc)
@@ -905,57 +1029,43 @@ subroutine wrt_initialize(wrt_comp, imp_state_write, exp_state_write, clock, rc)
call ESMF_AttributeSet(wrt_int_state%wrtFB(i), convention="NetCDF", purpose="FV3", &
name="source", value="FV3GFS", rc=rc)
- if (trim(output_grid) == 'cubed_sphere_grid') then
+ if (trim(output_grid(grid_id)) == 'cubed_sphere_grid') then
call ESMF_AttributeSet(wrt_int_state%wrtFB(i), convention="NetCDF", purpose="FV3", &
name="grid", value="cubed_sphere", rc=rc)
- else if (trim(output_grid) == 'gaussian_grid') then
+ else if (trim(output_grid(grid_id)) == 'gaussian_grid') then
call ESMF_AttributeSet(wrt_int_state%wrtFB(i), convention="NetCDF", purpose="FV3", &
name="grid", value="gaussian", rc=rc)
call ESMF_AttributeAdd(wrt_int_state%wrtFB(i), convention="NetCDF", purpose="FV3", &
attrList=(/"im","jm"/), rc=rc)
call ESMF_AttributeSet(wrt_int_state%wrtFB(i), convention="NetCDF", purpose="FV3", &
- name="im", value=imo, rc=rc)
- call ESMF_AttributeSet(wrt_int_state%wrtFB(i), convention="NetCDF", purpose="FV3", &
- name="jm", value=jmo, rc=rc)
-
- else if (trim(output_grid) == 'global_latlon') then
-
- call ESMF_AttributeSet(wrt_int_state%wrtFB(i), convention="NetCDF", purpose="FV3", &
- name="grid", value="latlon", rc=rc)
- call ESMF_AttributeAdd(wrt_int_state%wrtFB(i), convention="NetCDF", purpose="FV3", &
- attrList=(/"lonstart","latstart","lonlast ","latlast "/), rc=rc)
- call ESMF_AttributeSet(wrt_int_state%wrtFB(i), convention="NetCDF", purpose="FV3", &
- name="lonstart", value=wrt_int_state%lonstart, rc=rc)
+ name="im", value=imo(grid_id), rc=rc)
call ESMF_AttributeSet(wrt_int_state%wrtFB(i), convention="NetCDF", purpose="FV3", &
- name="latstart", value=wrt_int_state%latstart, rc=rc)
- call ESMF_AttributeSet(wrt_int_state%wrtFB(i), convention="NetCDF", purpose="FV3", &
- name="lonlast", value=wrt_int_state%lonlast, rc=rc)
- call ESMF_AttributeSet(wrt_int_state%wrtFB(i), convention="NetCDF", purpose="FV3", &
- name="latlast", value=wrt_int_state%latlast, rc=rc)
+ name="jm", value=jmo(grid_id), rc=rc)
- else if (trim(output_grid) == 'regional_latlon') then
+ else if (trim(output_grid(grid_id)) == 'regional_latlon' &
+ .or. trim(output_grid(grid_id)) == 'global_latlon') then
call ESMF_AttributeSet(wrt_int_state%wrtFB(i), convention="NetCDF", purpose="FV3", &
name="grid", value="latlon", rc=rc)
call ESMF_AttributeAdd(wrt_int_state%wrtFB(i), convention="NetCDF", purpose="FV3", &
attrList=(/"lon1","lat1","lon2","lat2","dlon","dlat"/), rc=rc)
call ESMF_AttributeSet(wrt_int_state%wrtFB(i), convention="NetCDF", purpose="FV3", &
- name="lon1", value=lon1, rc=rc)
+ name="lon1", value=lon1(grid_id), rc=rc)
call ESMF_AttributeSet(wrt_int_state%wrtFB(i), convention="NetCDF", purpose="FV3", &
- name="lat1", value=lat1, rc=rc)
+ name="lat1", value=lat1(grid_id), rc=rc)
call ESMF_AttributeSet(wrt_int_state%wrtFB(i), convention="NetCDF", purpose="FV3", &
- name="lon2", value=lon2, rc=rc)
+ name="lon2", value=lon2(grid_id), rc=rc)
call ESMF_AttributeSet(wrt_int_state%wrtFB(i), convention="NetCDF", purpose="FV3", &
- name="lat2", value=lat2, rc=rc)
+ name="lat2", value=lat2(grid_id), rc=rc)
call ESMF_AttributeSet(wrt_int_state%wrtFB(i), convention="NetCDF", purpose="FV3", &
- name="dlon", value=dlon, rc=rc)
+ name="dlon", value=dlon(grid_id), rc=rc)
call ESMF_AttributeSet(wrt_int_state%wrtFB(i), convention="NetCDF", purpose="FV3", &
- name="dlat", value=dlat, rc=rc)
+ name="dlat", value=dlat(grid_id), rc=rc)
- else if (trim(output_grid) == 'rotated_latlon') then
+ else if (trim(output_grid(grid_id)) == 'rotated_latlon') then
call ESMF_AttributeSet(wrt_int_state%wrtFB(i), convention="NetCDF", purpose="FV3", &
name="grid", value="rotated_latlon", rc=rc)
@@ -969,23 +1079,23 @@ subroutine wrt_initialize(wrt_comp, imp_state_write, exp_state_write, clock, rc)
"dlon ",&
"dlat "/), rc=rc)
call ESMF_AttributeSet(wrt_int_state%wrtFB(i), convention="NetCDF", purpose="FV3", &
- name="cen_lon", value=cen_lon, rc=rc)
+ name="cen_lon", value=cen_lon(grid_id), rc=rc)
call ESMF_AttributeSet(wrt_int_state%wrtFB(i), convention="NetCDF", purpose="FV3", &
- name="cen_lat", value=cen_lat, rc=rc)
+ name="cen_lat", value=cen_lat(grid_id), rc=rc)
call ESMF_AttributeSet(wrt_int_state%wrtFB(i), convention="NetCDF", purpose="FV3", &
- name="lon1", value=lon1, rc=rc)
+ name="lon1", value=lon1(grid_id), rc=rc)
call ESMF_AttributeSet(wrt_int_state%wrtFB(i), convention="NetCDF", purpose="FV3", &
- name="lat1", value=lat1, rc=rc)
+ name="lat1", value=lat1(grid_id), rc=rc)
call ESMF_AttributeSet(wrt_int_state%wrtFB(i), convention="NetCDF", purpose="FV3", &
- name="lon2", value=lon2, rc=rc)
+ name="lon2", value=lon2(grid_id), rc=rc)
call ESMF_AttributeSet(wrt_int_state%wrtFB(i), convention="NetCDF", purpose="FV3", &
- name="lat2", value=lat2, rc=rc)
+ name="lat2", value=lat2(grid_id), rc=rc)
call ESMF_AttributeSet(wrt_int_state%wrtFB(i), convention="NetCDF", purpose="FV3", &
- name="dlon", value=dlon, rc=rc)
+ name="dlon", value=dlon(grid_id), rc=rc)
call ESMF_AttributeSet(wrt_int_state%wrtFB(i), convention="NetCDF", purpose="FV3", &
- name="dlat", value=dlat, rc=rc)
+ name="dlat", value=dlat(grid_id), rc=rc)
- else if (trim(output_grid) == 'lambert_conformal') then
+ else if (trim(output_grid(grid_id)) == 'lambert_conformal') then
call ESMF_AttributeSet(wrt_int_state%wrtFB(i), convention="NetCDF", purpose="FV3", &
name="grid", value="lambert_conformal", rc=rc)
@@ -1001,25 +1111,25 @@ subroutine wrt_initialize(wrt_comp, imp_state_write, exp_state_write, clock, rc)
"dx ",&
"dy "/), rc=rc)
call ESMF_AttributeSet(wrt_int_state%wrtFB(i), convention="NetCDF", purpose="FV3", &
- name="cen_lon", value=cen_lon, rc=rc)
+ name="cen_lon", value=cen_lon(grid_id), rc=rc)
call ESMF_AttributeSet(wrt_int_state%wrtFB(i), convention="NetCDF", purpose="FV3", &
- name="cen_lat", value=cen_lat, rc=rc)
+ name="cen_lat", value=cen_lat(grid_id), rc=rc)
call ESMF_AttributeSet(wrt_int_state%wrtFB(i), convention="NetCDF", purpose="FV3", &
- name="stdlat1", value=stdlat1, rc=rc)
+ name="stdlat1", value=stdlat1(grid_id), rc=rc)
call ESMF_AttributeSet(wrt_int_state%wrtFB(i), convention="NetCDF", purpose="FV3", &
- name="stdlat2", value=stdlat2, rc=rc)
+ name="stdlat2", value=stdlat2(grid_id), rc=rc)
call ESMF_AttributeSet(wrt_int_state%wrtFB(i), convention="NetCDF", purpose="FV3", &
- name="nx", value=imo, rc=rc)
+ name="nx", value=imo(grid_id), rc=rc)
call ESMF_AttributeSet(wrt_int_state%wrtFB(i), convention="NetCDF", purpose="FV3", &
- name="ny", value=jmo, rc=rc)
+ name="ny", value=jmo(grid_id), rc=rc)
call ESMF_AttributeSet(wrt_int_state%wrtFB(i), convention="NetCDF", purpose="FV3", &
- name="lat1", value=lat1, rc=rc)
+ name="lat1", value=lat1(grid_id), rc=rc)
call ESMF_AttributeSet(wrt_int_state%wrtFB(i), convention="NetCDF", purpose="FV3", &
- name="lon1", value=lon1, rc=rc)
+ name="lon1", value=lon1(grid_id), rc=rc)
call ESMF_AttributeSet(wrt_int_state%wrtFB(i), convention="NetCDF", purpose="FV3", &
- name="dx", value=dx, rc=rc)
+ name="dx", value=dx(grid_id), rc=rc)
call ESMF_AttributeSet(wrt_int_state%wrtFB(i), convention="NetCDF", purpose="FV3", &
- name="dy", value=dy, rc=rc)
+ name="dy", value=dy(grid_id), rc=rc)
end if
@@ -1066,8 +1176,10 @@ subroutine wrt_initialize(wrt_comp, imp_state_write, exp_state_write, clock, rc)
endif
enddo
+
+ do n = 1, ngrids
! add the transfer attributes from importState to grid
- call ESMF_AttributeAdd(wrtgrid, convention="NetCDF", purpose="FV3", &
+ call ESMF_AttributeAdd(wrtGrid(n), convention="NetCDF", purpose="FV3", &
attrList=attNameList(1:j-1), rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
@@ -1093,7 +1205,7 @@ subroutine wrt_initialize(wrt_comp, imp_state_write, exp_state_write, clock, rc)
if(lprnt) print *,'in write grid comp, new time:unit=',trim(valueS)
endif
endif
- call ESMF_AttributeSet(wrtgrid, convention="NetCDF", purpose="FV3", &
+ call ESMF_AttributeSet(wrtGrid(n), convention="NetCDF", purpose="FV3", &
name=trim(attNameList(i)), value=valueS, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
@@ -1105,7 +1217,7 @@ subroutine wrt_initialize(wrt_comp, imp_state_write, exp_state_write, clock, rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- call ESMF_AttributeSet(wrtgrid, convention="NetCDF", purpose="FV3", &
+ call ESMF_AttributeSet(wrtGrid(n), convention="NetCDF", purpose="FV3", &
name=trim(attNameList(i)), value=valueI4, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
@@ -1117,7 +1229,7 @@ subroutine wrt_initialize(wrt_comp, imp_state_write, exp_state_write, clock, rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- call ESMF_AttributeSet(wrtgrid, convention="NetCDF", purpose="FV3", &
+ call ESMF_AttributeSet(wrtGrid(n), convention="NetCDF", purpose="FV3", &
name=trim(attNameList(i)), value=valueR4, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
@@ -1129,7 +1241,7 @@ subroutine wrt_initialize(wrt_comp, imp_state_write, exp_state_write, clock, rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- call ESMF_AttributeSet(wrtgrid, convention="NetCDF", purpose="FV3", &
+ call ESMF_AttributeSet(wrtGrid(n), convention="NetCDF", purpose="FV3", &
name=trim(attNameList(i)), value=valueR8, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
@@ -1138,17 +1250,16 @@ subroutine wrt_initialize(wrt_comp, imp_state_write, exp_state_write, clock, rc)
! Add special attribute that holds names of "time" related attributes
! for faster access during Run().
- call ESMF_AttributeAdd(wrtgrid, convention="NetCDF", purpose="FV3", &
+ call ESMF_AttributeAdd(wrtGrid(n), convention="NetCDF", purpose="FV3", &
attrList=(/"TimeAttributes"/), rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- call ESMF_AttributeSet(wrtgrid, convention="NetCDF", purpose="FV3", &
+ call ESMF_AttributeSet(wrtGrid(n), convention="NetCDF", purpose="FV3", &
name="TimeAttributes", valueList=attNameList2(1:k-1), rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- deallocate(attNameList, attNameList2, typekindList)
!
!*** create temporary field bundle for axes information
@@ -1158,20 +1269,14 @@ subroutine wrt_initialize(wrt_comp, imp_state_write, exp_state_write, clock, rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- call ESMF_AttributeGet(wrtGrid, convention="NetCDF", purpose="FV3", &
- name="ESMF:gridded_dim_labels", valueList=attrValueSList, rc=rc)
-
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- call ESMF_GridGetCoord(wrtGrid, coordDim=1, &
+ call ESMF_GridGetCoord(wrtGrid(n), coordDim=1, &
staggerloc=ESMF_STAGGERLOC_CENTER, array=array, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
-! write(0,*) 'create gridFB,fieldname=',trim(attrValueSList(1)),trim(attrValueSList(2)), &
-! 'lon value=',array(1:5)
-
- field = ESMF_FieldCreate(wrtGrid, array, name=trim(attrValueSList(1)), rc=rc)
+ field = ESMF_FieldCreate(wrtGrid(n), array, name="grid_xt", rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
@@ -1213,15 +1318,12 @@ subroutine wrt_initialize(wrt_comp, imp_state_write, exp_state_write, clock, rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
!
! get 2nd dimension
- call ESMF_GridGetCoord(wrtGrid, coordDim=2, &
+ call ESMF_GridGetCoord(wrtGrid(n), coordDim=2, &
staggerloc=ESMF_STAGGERLOC_CENTER, array=array, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
-! write(0,*) 'create gridFB,fieldname=',trim(attrValueSList(1)),trim(attrValueSList(2)), &
-! 'lat value=',array(1:5,1),array(1,1:5)
-
- field = ESMF_FieldCreate(wrtGrid, array, name=trim(attrValueSList(2)), rc=rc)
+ field = ESMF_FieldCreate(wrtGrid(n), array, name="grid_yt", rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
!add attribute info
@@ -1260,6 +1362,10 @@ subroutine wrt_initialize(wrt_comp, imp_state_write, exp_state_write, clock, rc)
call ESMF_FieldBundleAdd(gridFB, (/field/), rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+
+ end do ! n=1, ngrids
+
+ deallocate(attNameList, attNameList2, typekindList)
!
!-----------------------------------------------------------------------
!*** SET THE FIRST HISTORY FILE'S TIME INDEX.
@@ -1274,17 +1380,9 @@ subroutine wrt_initialize(wrt_comp, imp_state_write, exp_state_write, clock, rc)
call ESMF_LogWrite("before initialize for POST", ESMF_LOGMSG_INFO, rc=rc)
if (lprnt) print *,'in wrt grid comp, dopost=',wrt_int_state%write_dopost
if( wrt_int_state%write_dopost ) then
- call inline_post_getattr(wrt_int_state)
+ call inline_post_getattr(wrt_int_state,1)
endif
!
-!-----------------------------------------------------------------------
-!
- IF(RC /= ESMF_SUCCESS) THEN
- WRITE(0,*)"FAIL: Write_Initialize."
-! ELSE
-! WRITE(0,*)"PASS: Write_Initialize."
- ENDIF
-!
! write_init_tim = MPI_Wtime() - btim0
!
!-----------------------------------------------------------------------
@@ -1312,9 +1410,7 @@ subroutine wrt_run(wrt_comp, imp_state_write, exp_state_write,clock,rc)
TYPE(ESMF_VM) :: VM
type(ESMF_FieldBundle) :: file_bundle
type(ESMF_Time) :: currtime
- type(ESMF_TypeKind_Flag) :: datatype
- type(ESMF_Field) :: field_work
- type(ESMF_Grid) :: fbgrid, wrtgrid
+ type(ESMF_Grid) :: fbgrid, wrtGrid
type(ESMF_State),save :: stateGridFB
type(optimizeT), save :: optimize(4)
type(ESMF_GridComp), save, allocatable :: compsGridFB(:)
@@ -1322,34 +1418,27 @@ subroutine wrt_run(wrt_comp, imp_state_write, exp_state_write,clock,rc)
type(write_wrap) :: wrap
type(wrt_internal_state),pointer :: wrt_int_state
!
- integer :: i,j,n,mype,nolog
+ integer :: i,j,n,mype,nolog, grid_id
!
integer :: nf_hours,nf_seconds, nf_minutes, &
nseconds,nseconds_num,nseconds_den
!
- integer :: id
- integer :: nbdl, idx, date(6), ndig
+ integer :: nbdl, date(6), ndig, nnnn
integer :: step=1
!
logical :: opened
logical :: lmask_fields
- logical,save :: first=.true.
- logical,save :: file_first=.true.
!
- character(esmf_maxstr) :: filename,compname,bundle_name
+ character(esmf_maxstr) :: filename,compname
character(40) :: cfhour, cform
real(ESMF_KIND_R8) :: time
!
- real(kind=8) :: wait_time, MPI_Wtime
- real(kind=8) :: times,times2,etim
- character(10) :: timeb
- real(kind=8) :: tbeg,tend
+ real(kind=8) :: MPI_Wtime
+ real(kind=8) :: tbeg
real(kind=8) :: wbeg,wend
- real(kind=ESMF_KIND_R8), dimension(:,:,:), pointer :: datar8
- real(kind=ESMF_KIND_R8), dimension(:,:), pointer :: datar82d
-!
- logical lprnt
+ logical :: use_parallel_netcdf
+ logical :: lprnt
!
!-----------------------------------------------------------------------
!***********************************************************************
@@ -1359,16 +1448,11 @@ subroutine wrt_run(wrt_comp, imp_state_write, exp_state_write,clock,rc)
rc = esmf_success
!
!-----------------------------------------------------------------------
-!*** get the current write grid comp name, id, and internal state
+!*** get the current write grid comp name, and internal state
!
call ESMF_GridCompGet(wrt_comp, name=compname, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
-! print *,'in wrt run. compname=',trim(compname),' rc=',rc
-
-! instance id from name
- read(compname(10:11),"(I2)") id
-
! Provide log message indicating which wrtComp is active
call ESMF_LogWrite("Write component activated: "//trim(compname), &
ESMF_LOGMSG_INFO, rc=rc)
@@ -1424,7 +1508,7 @@ subroutine wrt_run(wrt_comp, imp_state_write, exp_state_write,clock,rc)
nf_seconds = nf_hours*3600+nf_minuteS*60+nseconds+real(nseconds_num)/real(nseconds_den)
wrt_int_state%nfhour = nf_seconds/3600.
nf_hours = int(nf_seconds/3600.)
- if(mype == lead_write_task) print *,'in write grid comp, nf_hours=',nf_hours
+ if(lprnt) print *,'in write grid comp, nf_hours=',nf_hours
! if iau_offset > nf_hours, don't write out anything
if (nf_hours < 0) return
@@ -1448,25 +1532,20 @@ subroutine wrt_run(wrt_comp, imp_state_write, exp_state_write,clock,rc)
! ' nf_seconds=',nf_seconds,wrt_int_state%nfhour
! access the time Attribute which is updated by the driver each time
- call ESMF_LogWrite("before Write component get time", ESMF_LOGMSG_INFO, rc=rc)
call ESMF_AttributeGet(imp_state_write, convention="NetCDF", purpose="FV3", &
name="time", value=time, rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
-
- call ESMF_LogWrite("before Write component af get time", ESMF_LOGMSG_INFO, rc=rc)
!
!-----------------------------------------------------------------------
!*** loop on the files that need to write out
!-----------------------------------------------------------------------
do i=1, FBCount
- call ESMF_LogWrite("before Write component get mirror file bundle", ESMF_LOGMSG_INFO, rc=rc)
call ESMF_StateGet(imp_state_write, itemName="mirror_"//trim(fcstItemNameList(i)), &
fieldbundle=file_bundle, rc=rc)
-
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- call ESMF_LogWrite("before Write component af get mirror file bundle", ESMF_LOGMSG_INFO, rc=rc)
+
!recover fields from cartesian vector and sfc pressure
call recover_fields(file_bundle,rc)
enddo
@@ -1478,26 +1557,25 @@ subroutine wrt_run(wrt_comp, imp_state_write, exp_state_write,clock,rc)
if( wrt_int_state%write_dopost ) then
!
wbeg = MPI_Wtime()
- if (trim(output_grid) == 'regional_latlon' .or. &
- trim(output_grid) == 'rotated_latlon' .or. &
- trim(output_grid) == 'lambert_conformal') then
+ if (trim(output_grid(1)) == 'regional_latlon' .or. &
+ trim(output_grid(1)) == 'rotated_latlon' .or. &
+ trim(output_grid(1)) == 'lambert_conformal') then
!mask fields according to sfc pressure
do nbdl=1, wrt_int_state%FBCount
- call ESMF_LogWrite("before mask_fields for wrt field bundle", ESMF_LOGMSG_INFO, rc=rc)
call mask_fields(wrt_int_state%wrtFB(nbdl),rc)
- call ESMF_LogWrite("after mask_fields for wrt field bundle", ESMF_LOGMSG_INFO, rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
enddo
lmask_fields = .true.
endif
- call inline_post_run(wrt_int_state, mype, wrt_mpi_comm, lead_write_task, &
+ call inline_post_run(wrt_int_state, 1, mype, wrt_mpi_comm, lead_write_task, &
nf_hours, nf_minutes,nseconds)
wend = MPI_Wtime()
if (lprnt) then
write(*,'(A,F10.5,A,I4.2,A,I2.2)')' actual inline post Time is ',wend-wbeg &
,' at Fcst ',nf_hours,':',nf_minutes
- endif
+ endif
endif
!
@@ -1509,52 +1587,60 @@ subroutine wrt_run(wrt_comp, imp_state_write, exp_state_write,clock,rc)
file_loop_all: do nbdl=1, wrt_int_state%FBCount
!
+ ! get grid_id
+ call ESMF_AttributeGet(wrt_int_state%wrtFB(nbdl), convention="NetCDF", purpose="FV3", &
+ name="grid_id", value=grid_id, rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+
if(step == 1) then
file_bundle = wrt_int_state%wrtFB(nbdl)
endif
+ ! FIXME map nbdl to [1:num_files], only used for output_file
+ nnnn = mod(nbdl-1, num_files) + 1
+
! set default chunksizes for netcdf output
! (use MPI decomposition size).
! if chunksize parameter set to negative value,
! netcdf library default is used.
- if (output_file(nbdl)(1:6) == 'netcdf') then
- if (ichunk2d == 0) then
+ if (output_file(nnnn)(1:6) == 'netcdf') then
+ if (ichunk2d(grid_id) == 0) then
if( wrt_int_state%mype == 0 ) &
- ichunk2d = wrt_int_state%lon_end-wrt_int_state%lon_start+1
- call mpi_bcast(ichunk2d,1,mpi_integer,0,wrt_mpi_comm,rc)
+ ichunk2d(grid_id) = wrt_int_state%lon_end-wrt_int_state%lon_start+1
+ call mpi_bcast(ichunk2d(grid_id),1,mpi_integer,0,wrt_mpi_comm,rc)
endif
- if (jchunk2d == 0) then
+ if (jchunk2d(grid_id) == 0) then
if( wrt_int_state%mype == 0 ) &
- jchunk2d = wrt_int_state%lat_end-wrt_int_state%lat_start+1
- call mpi_bcast(jchunk2d,1,mpi_integer,0,wrt_mpi_comm,rc)
+ jchunk2d(grid_id) = wrt_int_state%lat_end-wrt_int_state%lat_start+1
+ call mpi_bcast(jchunk2d(grid_id),1,mpi_integer,0,wrt_mpi_comm,rc)
endif
- if (ichunk3d == 0) then
+ if (ichunk3d(grid_id) == 0) then
if( wrt_int_state%mype == 0 ) &
- ichunk3d = wrt_int_state%lon_end-wrt_int_state%lon_start+1
- call mpi_bcast(ichunk3d,1,mpi_integer,0,wrt_mpi_comm,rc)
+ ichunk3d(grid_id) = wrt_int_state%lon_end-wrt_int_state%lon_start+1
+ call mpi_bcast(ichunk3d(grid_id),1,mpi_integer,0,wrt_mpi_comm,rc)
endif
- if (jchunk3d == 0) then
+ if (jchunk3d(grid_id) == 0) then
if( wrt_int_state%mype == 0 ) &
- jchunk3d = wrt_int_state%lat_end-wrt_int_state%lat_start+1
- call mpi_bcast(jchunk3d,1,mpi_integer,0,wrt_mpi_comm,rc)
+ jchunk3d(grid_id) = wrt_int_state%lat_end-wrt_int_state%lat_start+1
+ call mpi_bcast(jchunk3d(grid_id),1,mpi_integer,0,wrt_mpi_comm,rc)
endif
- if (kchunk3d == 0 .and. nbdl == 1) then
+ if (kchunk3d(grid_id) == 0 .and. nbdl == 1) then
if( wrt_int_state%mype == 0 ) then
- call ESMF_FieldBundleGet(wrt_int_state%wrtFB(nbdl), grid=wrtgrid)
- call ESMF_AttributeGet(wrtgrid, convention="NetCDF", purpose="FV3", &
+ call ESMF_FieldBundleGet(wrt_int_state%wrtFB(nbdl), grid=wrtGrid)
+ call ESMF_AttributeGet(wrtGrid, convention="NetCDF", purpose="FV3", &
attnestflag=ESMF_ATTNEST_OFF, name='pfull', &
- itemCount=kchunk3d, rc=rc)
+ itemCount=kchunk3d(grid_id), rc=rc)
endif
- call mpi_bcast(kchunk3d,1,mpi_integer,0,wrt_mpi_comm,rc)
+ call mpi_bcast(kchunk3d(grid_id),1,mpi_integer,0,wrt_mpi_comm,rc)
endif
if (wrt_int_state%mype == 0) then
- print *,'ichunk2d,jchunk2d',ichunk2d,jchunk2d
- print *,'ichunk3d,jchunk3d,kchunk3d',ichunk3d,jchunk3d,kchunk3d
+ print *,'ichunk2d,jchunk2d',ichunk2d(grid_id),jchunk2d(grid_id)
+ print *,'ichunk3d,jchunk3d,kchunk3d',ichunk3d(grid_id),jchunk3d(grid_id),kchunk3d(grid_id)
endif
endif
filename = trim(wrt_int_state%wrtFB_names(nbdl))//'f'//trim(cfhour)//'.nc'
-! if(mype == lead_write_task) print *,'in wrt run,filename=',trim(filename)
+ if(mype == lead_write_task) print *,'in wrt run,filename= ',nbdl,trim(filename)
!
! set the time Attribute on the grid to carry it into the lower levels
@@ -1573,183 +1659,86 @@ subroutine wrt_run(wrt_comp, imp_state_write, exp_state_write,clock,rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- if (trim(output_grid) == 'cubed_sphere_grid') then
-
- wbeg = MPI_Wtime()
- call ESMFproto_FieldBundleWrite(gridFB, filename=trim(filename), &
- convention="NetCDF", purpose="FV3", &
- status=ESMF_FILESTATUS_REPLACE, &
- state=stateGridFB, comps=compsGridFB,rc=rc)
-
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
-
- call ESMFproto_FieldBundleWrite(wrt_int_state%wrtFB(nbdl), &
- filename=trim(filename), convention="NetCDF", &
- purpose="FV3", status=ESMF_FILESTATUS_OLD, &
- timeslice=step, state=optimize(nbdl)%state, &
- comps=optimize(nbdl)%comps, rc=rc)
-
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
-
- wend = MPI_Wtime()
- if (lprnt) then
- write(*,'(A,F10.5,A,I4.2,A,I2.2)')' actual netcdf Write Time is ',wend-wbeg &
- ,' at Fcst ',NF_HOURS,':',NF_MINUTES
- endif
-
- else if (trim(output_grid) == 'gaussian_grid') then
-
- if (trim(output_file(nbdl)) == 'netcdf') then
-
- wbeg = MPI_Wtime()
- call write_netcdf(file_bundle,wrt_int_state%wrtFB(nbdl),trim(filename), &
- wrt_mpi_comm,wrt_int_state%mype,imo,jmo,&
- ichunk2d,jchunk2d,ichunk3d,jchunk3d,kchunk3d,rc)
- wend = MPI_Wtime()
- if (lprnt) then
- write(*,'(A,F10.5,A,I4.2,A,I2.2)')' netcdf Write Time is ',wend-wbeg &
- ,' at Fcst ',NF_HOURS,':',NF_MINUTES
- endif
-
- else if (trim(output_file(nbdl)) == 'netcdf_parallel') then
-
-#ifdef NO_PARALLEL_NETCDF
- rc = ESMF_RC_NOT_IMPL
- print *,'netcdf_parallel not available on this machine'
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, &
- line=__LINE__, file=__FILE__)) return
-#endif
- wbeg = MPI_Wtime()
- call write_netcdf_parallel(file_bundle,wrt_int_state%wrtFB(nbdl), &
- trim(filename), wrt_mpi_comm,wrt_int_state%mype,imo,jmo,&
- ichunk2d,jchunk2d,ichunk3d,jchunk3d,kchunk3d,rc)
- wend = MPI_Wtime()
- if (lprnt) then
- write(*,'(A,F10.5,A,I4.2,A,I2.2)')' parallel netcdf Write Time is ',wend-wbeg &
- ,' at Fcst ',NF_HOURS,':',NF_MINUTES
- endif
+ if (trim(output_file(nnnn)) == 'netcdf') then
+ use_parallel_netcdf = .false.
+ else if (trim(output_file(nnnn)) == 'netcdf_parallel') then
+ use_parallel_netcdf = .true.
+ else
+ call ESMF_LogWrite("wrt_run: Unknown output_file",ESMF_LOGMSG_ERROR,rc=RC)
+ call ESMF_Finalize(endflag=ESMF_END_ABORT)
+ endif
- else if (trim(output_file(nbdl)) == 'netcdf_esmf') then
+ if (trim(output_grid(grid_id)) == 'cubed_sphere_grid') then
- wbeg = MPI_Wtime()
- call ESMFproto_FieldBundleWrite(gridFB, filename=trim(filename), &
- convention="NetCDF", purpose="FV3", &
- status=ESMF_FILESTATUS_REPLACE, state=stateGridFB, comps=compsGridFB,rc=rc)
+ wbeg = MPI_Wtime()
+ if (trim(output_file(nnnn)) == 'netcdf_parallel') then
+ call write_netcdf(wrt_int_state%wrtFB(nbdl),trim(filename), &
+ .true., wrt_mpi_comm,wrt_int_state%mype, &
+ grid_id,rc)
+ else
+ call ESMFproto_FieldBundleWrite(gridFB, filename=trim(filename), &
+ convention="NetCDF", purpose="FV3", &
+ status=ESMF_FILESTATUS_REPLACE, &
+ state=stateGridFB, comps=compsGridFB,rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- call ESMFproto_FieldBundleWrite(wrt_int_state%wrtFB(nbdl), &
- filename=trim(filename), convention="NetCDF", &
- purpose="FV3", status=ESMF_FILESTATUS_OLD, &
- timeslice=step, state=optimize(nbdl)%state, &
+ call ESMFproto_FieldBundleWrite(wrt_int_state%wrtFB(nbdl), &
+ filename=trim(filename), convention="NetCDF", &
+ purpose="FV3", status=ESMF_FILESTATUS_OLD, &
+ timeslice=step, state=optimize(nbdl)%state, &
comps=optimize(nbdl)%comps, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- wend = MPI_Wtime()
- if (lprnt) then
- write(*,'(A,F10.5,A,I4.2,A,I2.2)')' netcdf_esmf Write Time is ',wend-wbeg &
- ,' at Fcst ',NF_HOURS,':',NF_MINUTES
- endif
+ end if
+ wend = MPI_Wtime()
+ if (lprnt) then
+ write(*,'(A15,A,F10.5,A,I4.2,A,I2.2,1X,A)')trim(output_file(nnnn)),' Write Time is ',wend-wbeg &
+ ,' at Fcst ',NF_HOURS,':',NF_MINUTES
endif
- else if (trim(output_grid) == 'global_latlon') then
-
- if (trim(output_file(nbdl)) == 'netcdf') then
-
- wbeg = MPI_Wtime()
- call write_netcdf(file_bundle,wrt_int_state%wrtFB(nbdl),trim(filename), &
- wrt_mpi_comm,wrt_int_state%mype,imo,jmo,&
- ichunk2d,jchunk2d,ichunk3d,jchunk3d,kchunk3d,rc)
- wend = MPI_Wtime()
- if (lprnt) then
- write(*,'(A,F10.5,A,I4.2,A,I2.2)')' netcdf Write Time is ',wend-wbeg &
- ,' at Fcst ',NF_HOURS,':',NF_MINUTES
- endif
-
- else if (trim(output_file(nbdl)) == 'netcdf_parallel') then
-
-#ifdef NO_PARALLEL_NETCDF
- rc = ESMF_RC_NOT_IMPL
- print *,'netcdf_parallel not available on this machine'
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, &
- line=__LINE__, file=__FILE__)) return
-#endif
- wbeg = MPI_Wtime()
- call write_netcdf_parallel(file_bundle,wrt_int_state%wrtFB(nbdl), &
- trim(filename), wrt_mpi_comm,wrt_int_state%mype,imo,jmo,&
- ichunk2d,jchunk2d,ichunk3d,jchunk3d,kchunk3d,rc)
- wend = MPI_Wtime()
- if (lprnt) then
- write(*,'(A,F10.5,A,I4.2,A,I2.2)')' parallel netcdf Write Time is ',wend-wbeg &
- ,' at Fcst ',NF_HOURS,':',NF_MINUTES
- endif
-
- else ! unknown output_file
-
- call ESMF_LogWrite("wrt_run: Unknown output_file",ESMF_LOGMSG_ERROR,rc=RC)
- call ESMF_Finalize(endflag=ESMF_END_ABORT)
+ else if (trim(output_grid(grid_id)) == 'gaussian_grid' .or. &
+ trim(output_grid(grid_id)) == 'global_latlon') then
+ wbeg = MPI_Wtime()
+ call write_netcdf(wrt_int_state%wrtFB(nbdl),trim(filename), &
+ use_parallel_netcdf, wrt_mpi_comm,wrt_int_state%mype, &
+ grid_id,rc)
+ wend = MPI_Wtime()
+ if (lprnt) then
+ write(*,'(A15,A,F10.5,A,I4.2,A,I2.2,1X,A)')trim(output_file(nnnn)),' Write Time is ',wend-wbeg &
+ ,' at Fcst ',NF_HOURS,':',NF_MINUTES
endif
- else if (trim(output_grid) == 'regional_latlon' .or. &
- trim(output_grid) == 'rotated_latlon' .or. &
- trim(output_grid) == 'lambert_conformal') then
+ else if (trim(output_grid(grid_id)) == 'regional_latlon' .or. &
+ trim(output_grid(grid_id)) == 'rotated_latlon' .or. &
+ trim(output_grid(grid_id)) == 'lambert_conformal') then
!mask fields according to sfc pressure
- !if (mype == lead_write_task) print *,'before mask_fields'
if( .not. lmask_fields ) then
wbeg = MPI_Wtime()
- call ESMF_LogWrite("before mask_fields for wrt field bundle", ESMF_LOGMSG_INFO, rc=rc)
- !call mask_fields(wrt_int_state%wrtFB(nbdl),rc)
call mask_fields(file_bundle,rc)
- !if (mype == lead_write_task) print *,'after mask_fields'
- call ESMF_LogWrite("after mask_fields for wrt field bundle", ESMF_LOGMSG_INFO, rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
wend = MPI_Wtime()
- if (mype == lead_write_task) then
+ if (lprnt) then
write(*,'(A,F10.5,A,I4.2,A,I2.2)')' mask_fields time is ',wend-wbeg
endif
endif
- if (trim(output_file(nbdl)) == 'netcdf' .and. nbits==0) then
-
- wbeg = MPI_Wtime()
- call write_netcdf(file_bundle,wrt_int_state%wrtFB(nbdl),trim(filename), &
- wrt_mpi_comm,wrt_int_state%mype,imo,jmo,&
- ichunk2d,jchunk2d,ichunk3d,jchunk3d,kchunk3d,rc)
- wend = MPI_Wtime()
- if (mype == lead_write_task) then
- write(*,'(A,F10.5,A,I4.2,A,I2.2)')' netcdf Write Time is ',wend-wbeg &
- ,' at Fcst ',NF_HOURS,':',NF_MINUTES
- endif
-
- else if (trim(output_file(nbdl)) == 'netcdf_parallel' .and. nbits==0) then
-
-#ifdef NO_PARALLEL_NETCDF
- rc = ESMF_RC_NOT_IMPL
- print *,'netcdf_parallel not available on this machine'
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, &
- line=__LINE__, file=__FILE__)) return
-#endif
- wbeg = MPI_Wtime()
- call write_netcdf_parallel(file_bundle,wrt_int_state%wrtFB(nbdl), &
- trim(filename), wrt_mpi_comm,wrt_int_state%mype,imo,jmo,&
- ichunk2d,jchunk2d,ichunk3d,jchunk3d,kchunk3d,rc)
- wend = MPI_Wtime()
- if (lprnt) then
- write(*,'(A,F10.5,A,I4.2,A,I2.2)')' parallel netcdf Write Time is ',wend-wbeg &
- ,' at Fcst ',NF_HOURS,':',NF_MINUTES
- endif
- else ! unknown output_file
-
- if( nbits /= 0) then
- call ESMF_LogWrite("wrt_run: lossy compression is not supported for regional grids",ESMF_LOGMSG_ERROR,rc=RC)
- call ESMF_Finalize(endflag=ESMF_END_ABORT)
- else
- call ESMF_LogWrite("wrt_run: Unknown output_file",ESMF_LOGMSG_ERROR,rc=RC)
- call ESMF_Finalize(endflag=ESMF_END_ABORT)
- endif
+ if (nbits(grid_id) /= 0) then
+ call ESMF_LogWrite("wrt_run: lossy compression is not supported for regional grids",ESMF_LOGMSG_ERROR,rc=RC)
+ call ESMF_Finalize(endflag=ESMF_END_ABORT)
+ end if
+ wbeg = MPI_Wtime()
+ call write_netcdf(wrt_int_state%wrtFB(nbdl),trim(filename), &
+ use_parallel_netcdf, wrt_mpi_comm,wrt_int_state%mype, &
+ grid_id,rc)
+ wend = MPI_Wtime()
+ if (lprnt) then
+ write(*,'(A15,A,F10.5,A,I4.2,A,I2.2,1X,A)')trim(output_file(nnnn)),' Write Time is ',wend-wbeg &
+ ,' at Fcst ',NF_HOURS,':',NF_MINUTES
endif
else ! unknown output_grid
@@ -1766,7 +1755,7 @@ subroutine wrt_run(wrt_comp, imp_state_write, exp_state_write,clock,rc)
!
!** write out log file
!
- if(mype == lead_write_task) then
+ if (mype == lead_write_task) then
do n=701,900
inquire(n,opened=OPENED)
if(.not.opened)then
@@ -1785,6 +1774,7 @@ subroutine wrt_run(wrt_comp, imp_state_write, exp_state_write,clock,rc)
!-----------------------------------------------------------------------
!
call ESMF_VMBarrier(VM, rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
!
write_run_tim = MPI_Wtime() - tbeg
!
@@ -1792,12 +1782,6 @@ subroutine wrt_run(wrt_comp, imp_state_write, exp_state_write,clock,rc)
WRITE(*,'(A,F10.5,A,I4.2,A,I2.2)')' total Write Time is ',write_run_tim &
,' at Fcst ',NF_HOURS,':',NF_MINUTES
ENDIF
-!
- IF(RC /= ESMF_SUCCESS) THEN
- WRITE(0,*)"FAIL: WRITE_RUN"
-! ELSE
-! WRITE(0,*)"PASS: WRITE_RUN"
- ENDIF
!
!-----------------------------------------------------------------------
!
@@ -1840,21 +1824,14 @@ subroutine wrt_finalize(wrt_comp, imp_state_write, exp_state_write, clock, rc)
!-----------------------------------------------------------------------
!
call ESMF_GridCompGetInternalState(wrt_comp, wrap, rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+
deallocate(wrap%write_int_state,stat=stat)
-!
if (ESMF_LogFoundDeallocError(statusToCheck=stat, &
msg="Deallocation of internal state memory failed.", &
line=__LINE__, file=__FILE__)) return
!
!-----------------------------------------------------------------------
-!
- IF(RC /= ESMF_SUCCESS)THEN
- WRITE(0,*)'FAIL: Write_Finalize.'
-! ELSE
-! WRITE(0,*)'PASS: Write_Finalize.'
- ENDIF
-!
-!-----------------------------------------------------------------------
!
end subroutine wrt_finalize
!
@@ -1865,8 +1842,12 @@ subroutine recover_fields(file_bundle,rc)
type(ESMF_FieldBundle), intent(in) :: file_bundle
integer, intent(out), optional :: rc
!
+ real, parameter :: rdgas = 287.04, grav = 9.80
+ real, parameter :: stndrd_atmos_ps = 101325.
+ real, parameter :: stndrd_atmos_lapse = 0.0065
+
integer i,j,k,ifld,fieldCount,nstt,nend,fieldDimCount,gridDimCount
- integer istart,iend,jstart,jend,kstart,kend,km
+ integer istart,iend,jstart,jend,kstart,kend
logical uPresent, vPresent
type(ESMF_Grid) fieldGrid
type(ESMF_Field) ufield, vfield
@@ -1880,68 +1861,63 @@ subroutine recover_fields(file_bundle,rc)
real(ESMF_KIND_R4), dimension(:,:,:), pointer :: uwind3dr4,vwind3dr4
real(ESMF_KIND_R4), dimension(:,:,:), pointer :: cart3dPtr2dr4
real(ESMF_KIND_R4), dimension(:,:,:,:), pointer :: cart3dPtr3dr4
- real(ESMF_KIND_R8), dimension(:,:,:,:), pointer :: cart3dPtr3dr8
- save lonloc, latloc
real(ESMF_KIND_R8) :: coslon, sinlon, sinlat
!
! get filed count
call ESMF_FieldBundleGet(file_bundle, fieldCount=fieldCount, &
grid=fieldGrid, rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
!
- CALL ESMF_LogWrite("call recover field on wrt comp",ESMF_LOGMSG_INFO,rc=RC)
+ call ESMF_LogWrite("call recover field on wrt comp",ESMF_LOGMSG_INFO,rc=RC)
call ESMF_GridGet(fieldgrid, dimCount=gridDimCount, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- if( first_getlatlon ) then
- CALL ESMF_LogWrite("call recover field get coord 1",ESMF_LOGMSG_INFO,rc=RC)
+ call ESMF_LogWrite("call recover field get coord 1",ESMF_LOGMSG_INFO,rc=RC)
- call ESMF_GridGetCoord(fieldgrid, coordDim=1, farrayPtr=lon, rc=rc)
+ call ESMF_GridGetCoord(fieldgrid, coordDim=1, farrayPtr=lon, rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- allocate(lonloc(lbound(lon,1):ubound(lon,1),lbound(lon,2):ubound(lon,2)))
- istart = lbound(lon,1)
- iend = ubound(lon,1)
- jstart = lbound(lon,2)
- jend = ubound(lon,2)
+ allocate(lonloc(lbound(lon,1):ubound(lon,1),lbound(lon,2):ubound(lon,2)))
+ istart = lbound(lon,1)
+ iend = ubound(lon,1)
+ jstart = lbound(lon,2)
+ jend = ubound(lon,2)
!$omp parallel do default(none) shared(lon,lonloc,jstart,jend,istart,iend) &
!$omp private(i,j)
- do j=jstart,jend
- do i=istart,iend
- lonloc(i,j) = lon(i,j) * pi/180.
- enddo
- enddo
+ do j=jstart,jend
+ do i=istart,iend
+ lonloc(i,j) = lon(i,j) * pi/180.
+ enddo
+ enddo
- CALL ESMF_LogWrite("call recover field get coord 2",ESMF_LOGMSG_INFO,rc=RC)
+ call ESMF_LogWrite("call recover field get coord 2",ESMF_LOGMSG_INFO,rc=RC)
- call ESMF_GridGetCoord(fieldgrid, coordDim=2, farrayPtr=lat, rc=rc)
+ call ESMF_GridGetCoord(fieldgrid, coordDim=2, farrayPtr=lat, rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- allocate(latloc(lbound(lat,1):ubound(lat,1),lbound(lat,2):ubound(lat,2)))
- istart = lbound(lat,1)
- iend = ubound(lat,1)
- jstart = lbound(lat,2)
- jend = ubound(lat,2)
+ allocate(latloc(lbound(lat,1):ubound(lat,1),lbound(lat,2):ubound(lat,2)))
+ istart = lbound(lat,1)
+ iend = ubound(lat,1)
+ jstart = lbound(lat,2)
+ jend = ubound(lat,2)
!$omp parallel do default(none) shared(lat,latloc,jstart,jend,istart,iend) &
!$omp private(i,j)
- do j=jstart,jend
- do i=istart,iend
- latloc(i,j) = lat(i,j) * pi/180.d0
- enddo
- enddo
- first_getlatlon = .false.
- endif
+ do j=jstart,jend
+ do i=istart,iend
+ latloc(i,j) = lat(i,j) * pi/180.d0
+ enddo
+ enddo
!
allocate(fcstField(fieldCount))
- CALL ESMF_LogWrite("call recover field get fcstField",ESMF_LOGMSG_INFO,rc=RC)
+ call ESMF_LogWrite("call recover field get fcstField",ESMF_LOGMSG_INFO,rc=RC)
call ESMF_FieldBundleGet(file_bundle, fieldList=fcstField, itemorderflag=ESMF_ITEMORDER_ADDORDER, rc=rc)
!
do ifld=1,fieldCount
- CALL ESMF_LogWrite("call recover field get fieldname, type dimcount",ESMF_LOGMSG_INFO,rc=RC)
+ call ESMF_LogWrite("call recover field get fieldname, type dimcount",ESMF_LOGMSG_INFO,rc=RC)
call ESMF_FieldGet(fcstField(ifld),name=fieldName,typekind=typekind,dimCount=fieldDimCount, rc=rc)
! convert back wind
@@ -1957,7 +1933,7 @@ subroutine recover_fields(file_bundle,rc)
endif
! print *,'in get 3D vector wind, uwindname=',trim(uwindname),' v=', trim(vwindname),' fieldname=',trim(fieldname)
! get u , v wind
- CALL ESMF_LogWrite("call recover field get u, v field",ESMF_LOGMSG_INFO,rc=RC)
+ call ESMF_LogWrite("call recover field get u, v field",ESMF_LOGMSG_INFO,rc=RC)
call ESMF_FieldBundleGet(file_bundle,trim(uwindname),field=ufield,isPresent=uPresent,rc=rc)
call ESMF_FieldBundleGet(file_bundle,trim(vwindname),field=vfield,isPresent=vPresent,rc=rc)
if(.not. uPresent .or. .not.vPresent) then
@@ -1969,7 +1945,7 @@ subroutine recover_fields(file_bundle,rc)
! get field data
if ( typekind == ESMF_TYPEKIND_R4 ) then
if( fieldDimCount > gridDimCount+1 ) then
- CALL ESMF_LogWrite("call recover field get 3d card wind farray",ESMF_LOGMSG_INFO,rc=RC)
+ call ESMF_LogWrite("call recover field get 3d card wind farray",ESMF_LOGMSG_INFO,rc=RC)
call ESMF_FieldGet(fcstField(ifld), localDe=0, farrayPtr=cart3dPtr3dr4, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
if( ubound(cart3dPtr3dr4,1)-lbound(cart3dPtr3dr4,1)+1/=3) then
@@ -2006,11 +1982,11 @@ subroutine recover_fields(file_bundle,rc)
enddo
else
call ESMF_FieldGet(fcstField(ifld), localDe=0, farrayPtr=cart3dPtr2dr4, rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
if( ubound(cart3dPtr2dr4,1)-lbound(cart3dPtr2dr4,1)+1 /= 3) then
rc=991
- print *,'ERROR, 2D the vector dimension /= 3, rc=',rc
- exit
+ write(0,*) 'ERROR, 2D the vector dimension /= 3, rc=',rc
+ call ESMF_Finalize(endflag=ESMF_END_ABORT)
endif
istart = lbound(cart3dPtr2dr4,2)
iend = ubound(cart3dPtr2dr4,2)
@@ -2067,8 +2043,8 @@ subroutine mask_fields(file_bundle,rc)
type(ESMF_FieldBundle), intent(in) :: file_bundle
integer, intent(out), optional :: rc
!
- integer i,j,k,ifld,fieldCount,nstt,nend,fieldDimCount,gridDimCount
- integer istart,iend,jstart,jend,kstart,kend,km
+ integer i,j,k,ifld,fieldCount,fieldDimCount,gridDimCount
+ integer istart,iend,jstart,jend,kstart,kend
type(ESMF_Grid) fieldGrid
type(ESMF_TypeKind_Flag) typekind
type(ESMF_TypeKind_Flag) attTypeKind
@@ -2085,8 +2061,6 @@ subroutine mask_fields(file_bundle,rc)
real(ESMF_KIND_R8) :: missing_value_r8=9.99e20
character(len=ESMF_MAXSTR) :: msg
- save maskwrt
-
call ESMF_LogWrite("call mask field on wrt comp",ESMF_LOGMSG_INFO,rc=RC)
! get fieldCount
@@ -2104,8 +2078,6 @@ subroutine mask_fields(file_bundle,rc)
call ESMF_FieldBundleGet(file_bundle, fieldList=fcstField, itemorderflag=ESMF_ITEMORDER_ADDORDER, rc=rc)
! generate the maskwrt according to surface pressure
- if( first_getmaskwrt ) then
-
do ifld=1,fieldCount
!call ESMF_LogWrite("call mask field get fieldname, type dimcount",ESMF_LOGMSG_INFO,rc=RC)
call ESMF_FieldGet(fcstField(ifld),name=fieldName,typekind=typekind,dimCount=fieldDimCount, rc=rc)
@@ -2142,9 +2114,6 @@ subroutine mask_fields(file_bundle,rc)
exit
endif
enddo
- first_getmaskwrt = .false.
-
- endif !first_getmaskwrt
! loop to mask all fields according to maskwrt
do ifld=1,fieldCount
@@ -2164,8 +2133,8 @@ subroutine mask_fields(file_bundle,rc)
line=__LINE__, file=__FILE__)) return ! bail out
if( ubound(vect4dPtr3dr4,1)-lbound(vect4dPtr3dr4,1)+1/=3 ) then
rc=991
- print *,'ERROR, 3D the vector dimension /= 3, rc=',rc
- exit
+ write(0,*) 'ERROR, 3D the vector dimension /= 3, rc=',rc
+ call ESMF_Finalize(endflag=ESMF_END_ABORT)
endif
! Get the _FillValue from the field attribute if exists
call ESMF_AttributeGet(fcstField(ifld), convention="NetCDF", purpose="FV3", &
@@ -2207,8 +2176,8 @@ subroutine mask_fields(file_bundle,rc)
line=__LINE__, file=__FILE__)) return ! bail out
if( ubound(vect3dPtr2dr4,1)-lbound(vect3dPtr2dr4,1)+1 /= 3 ) then
rc=991
- print *,'ERROR, 2D the vector dimension /= 3, rc=',rc
- exit
+ write(0,*) 'ERROR, 2D the vector dimension /= 3, rc=',rc
+ call ESMF_Finalize(endflag=ESMF_END_ABORT)
endif
! Get the _FillValue from the field attribute if exists
call ESMF_AttributeGet(fcstField(ifld), convention="NetCDF", purpose="FV3", &
@@ -2318,6 +2287,7 @@ subroutine mask_fields(file_bundle,rc)
endif
enddo
!
+ deallocate(maskwrt)
deallocate(fcstField)
rc = 0
@@ -3346,12 +3316,12 @@ subroutine splat4(idrt,jmax,aslat)
121.737742088d0, 124.879308913d0, 128.020877005d0, 131.162446275d0, &
134.304016638d0, 137.445588020d0, 140.587160352d0, 143.728733573d0, &
146.870307625d0, 150.011882457d0, 153.153458019d0, 156.295034268d0 /
- real(8) :: dlt,d1=1.d0
- integer :: jhe,jho,j0=0
+ real(8) :: dlt
+ integer :: jhe,jho
! real(8),parameter :: PI=3.14159265358979d0,C=(1.d0-(2.d0/PI)**2)*0.25d0
real(8),parameter :: C=(1.d0-(2.d0/PI)**2)*0.25d0
real(8) r
- integer jh,js,n,j
+ integer jh,n,j
! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
! GAUSSIAN LATITUDES
IF(IDRT.EQ.4) THEN
@@ -3456,12 +3426,12 @@ subroutine splat8(idrt,jmax,aslat)
121.737742088d0, 124.879308913d0, 128.020877005d0, 131.162446275d0, &
134.304016638d0, 137.445588020d0, 140.587160352d0, 143.728733573d0, &
146.870307625d0, 150.011882457d0, 153.153458019d0, 156.295034268d0 /
- real(8) :: dlt,d1=1.d0
- integer(4) :: jhe,jho,j0=0
+ real(8) :: dlt
+ integer(4) :: jhe,jho
! real(8),parameter :: PI=3.14159265358979d0,C=(1.d0-(2.d0/PI)**2)*0.25d0
real(8),parameter :: C=(1.d0-(2.d0/PI)**2)*0.25d0
real(8) r
- integer jh,js,n,j
+ integer jh,n,j
! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
! GAUSSIAN LATITUDES
IF(IDRT.EQ.4) THEN
@@ -3611,7 +3581,7 @@ subroutine lambert(stlat1,stlat2,c_lat,c_lon,glon,glat,x,y,inv)
! inv == 1 (glon,glat) ---> (x,y) lat/lon to grid
! inv == -1 (x,y) ---> (glon,glat) grid to lat/lon
- real(ESMF_KIND_R8) :: en,f,rho,rho0, dlon, theta, xp, yp
+ real(ESMF_KIND_R8) :: en,f,rho,rho0, dlon, theta
IF (stlat1 == stlat2) THEN
en=sin(stlat1*dtor)
@@ -3653,7 +3623,7 @@ subroutine get_outfile(nfl, filename, outfile_name,noutfile)
character(*), intent(inout) :: outfile_name(:)
integer, intent(inout) :: noutfile
- integer :: i,j,n,idx
+ integer :: i,j,n
logical :: found
!
noutfile = 0
diff --git a/io/post_gfs.F90 b/io/post_gfs.F90
index 08079d9c9..c0adaa0a5 100644
--- a/io/post_gfs.F90
+++ b/io/post_gfs.F90
@@ -368,7 +368,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, &
avgetrans, avgesnow, avgprec_cont, avgcprate_cont,&
avisbeamswin, avisdiffswin, airbeamswin, airdiffswin, &
alwoutc, alwtoac, aswoutc, aswtoac, alwinc, aswinc,&
- avgpotevp, snoavg, ti, si, cuppt
+ avgpotevp, snoavg, ti, si, cuppt, fdnsst
use soil, only: sldpth, sh2o, smc, stc
use masks, only: lmv, lmh, htm, vtm, gdlat, gdlon, dx, dy, hbm2, sm, sice
use ctlblk_mod, only: im, jm, lm, lp1, jsta, jend, jsta_2l, jend_2u, jsta_m,jend_m, &
@@ -505,13 +505,14 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, &
! GFS does not have surface specific humidity
! inst sensible heat flux
! inst latent heat flux
-!$omp parallel do default(none),private(i,j),shared(jsta,jend,im,spval,qs,twbs,qwbs,ths)
+!$omp parallel do default(none),private(i,j),shared(jsta,jend,im,spval,qs,twbs,qwbs,ths,fdnsst)
do j=jsta,jend
do i=1,im
qs(i,j) = SPVAL
twbs(i,j) = SPVAL
qwbs(i,j) = SPVAL
ths(i,j) = SPVAL
+ fdnsst(i,j) = SPVAL
enddo
enddo
@@ -917,6 +918,18 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, &
enddo
endif
+ ! foundation temperature
+ if(trim(fieldname)=='tref') then
+ !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,arrayr42d,fdnsst)
+ do j=jsta,jend
+ do i=ista, iend
+ if (arrayr42d(i,j) /= spval) then
+ fdnsst(i,j) = arrayr42d(i,j)
+ endif
+ enddo
+ enddo
+ endif
+
! convective precip in m per physics time step
if(trim(fieldname)=='cpratb_ave') then
!$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,dtq2,arrayr42d,avgcprate)
diff --git a/io/post_regional.F90 b/io/post_regional.F90
index 44ea99b2e..a42e10475 100644
--- a/io/post_regional.F90
+++ b/io/post_regional.F90
@@ -220,7 +220,7 @@ end subroutine post_run_regional
!
!-----------------------------------------------------------------------
!
- subroutine post_getattr_regional(wrt_int_state)
+ subroutine post_getattr_regional(wrt_int_state,grid_id)
!
use esmf
use ctlblk_mod, only: im, jm, mpi_comm_comp,gdsdegr,spval
@@ -236,6 +236,7 @@ subroutine post_getattr_regional(wrt_int_state)
implicit none
!
type(wrt_internal_state),intent(inout) :: wrt_int_state
+ integer, intent(in) :: grid_id
!
! local variable
integer i,j,k,n,kz, attcount, nfb
@@ -254,92 +255,92 @@ subroutine post_getattr_regional(wrt_int_state)
fldbundle = wrt_int_state%wrtFB(nfb)
! set grid spec:
-! if(mype==0) print*,'in post_getattr_lam,output_grid=',trim(output_grid),'nfb=',nfb
+! if(mype==0) print*,'in post_getattr_lam,output_grid=',trim(output_grid(grid_id)),'nfb=',nfb
! if(mype==0) print*,'in post_getattr_lam, lon1=',lon1,lon2,lat1,lat2,dlon,dlat
gdsdegr = 1000000.
- if(trim(output_grid) == 'regional_latlon') then
+ if(trim(output_grid(grid_id)) == 'regional_latlon') then
MAPTYPE=0
gridtype='A'
- if( lon1<0 ) then
- lonstart = nint((lon1+360.)*gdsdegr)
+ if( lon1(grid_id)<0 ) then
+ lonstart = nint((lon1(grid_id)+360.)*gdsdegr)
else
- lonstart = nint(lon1*gdsdegr)
+ lonstart = nint(lon1(grid_id)*gdsdegr)
endif
- if( lon2<0 ) then
- lonlast = nint((lon2+360.)*gdsdegr)
+ if( lon2(grid_id)<0 ) then
+ lonlast = nint((lon2(grid_id)+360.)*gdsdegr)
else
- lonlast = nint(lon2*gdsdegr)
+ lonlast = nint(lon2(grid_id)*gdsdegr)
endif
- latstart = nint(lat1*gdsdegr)
- latlast = nint(lat2*gdsdegr)
+ latstart = nint(lat1(grid_id)*gdsdegr)
+ latlast = nint(lat2(grid_id)*gdsdegr)
- dxval = dlon*gdsdegr
- dyval = dlat*gdsdegr
+ dxval = dlon(grid_id)*gdsdegr
+ dyval = dlat(grid_id)*gdsdegr
! if(mype==0) print*,'lonstart,latstart,dyval,dxval', &
! lonstart,lonlast,latstart,latlast,dyval,dxval
- else if(trim(output_grid) == 'lambert_conformal') then
+ else if(trim(output_grid(grid_id)) == 'lambert_conformal') then
MAPTYPE=1
GRIDTYPE='A'
- if( cen_lon<0 ) then
- cenlon = nint((cen_lon+360.)*gdsdegr)
+ if( cen_lon(grid_id)<0 ) then
+ cenlon = nint((cen_lon(grid_id)+360.)*gdsdegr)
else
- cenlon = nint(cen_lon*gdsdegr)
+ cenlon = nint(cen_lon(grid_id)*gdsdegr)
endif
- cenlat = cen_lat*gdsdegr
- if( lon1<0 ) then
- lonstart = nint((lon1+360.)*gdsdegr)
+ cenlat = cen_lat(grid_id)*gdsdegr
+ if( lon1(grid_id)<0 ) then
+ lonstart = nint((lon1(grid_id)+360.)*gdsdegr)
else
- lonstart = nint(lon1*gdsdegr)
+ lonstart = nint(lon1(grid_id)*gdsdegr)
endif
- latstart = nint(lat1*gdsdegr)
+ latstart = nint(lat1(grid_id)*gdsdegr)
- truelat1 = nint(stdlat1*gdsdegr)
- truelat2 = nint(stdlat2*gdsdegr)
+ truelat1 = nint(stdlat1(grid_id)*gdsdegr)
+ truelat2 = nint(stdlat2(grid_id)*gdsdegr)
- if(dxin null()
+ logical :: top_parent_is_global
integer :: num_restart_interval, restart_starttime
real,dimension(:),allocatable :: restart_interval
+
+ integer :: urc
+ type(ESMF_State) :: tempState
+ type(ESMF_Info) :: info
+
+ type(time_type) :: Time_init, Time, Time_step, Time_end, &
+ Time_restart, Time_step_restart
+ type(time_type) :: iautime
+ integer :: io_unit, calendar_type_res, date_res(6), date_init_res(6)
+
!
!-----------------------------------------------------------------------
!***********************************************************************
@@ -220,71 +385,56 @@ subroutine fcst_initialize(fcst_comp, importState, exportState, clock, rc)
timeis = mpi_wtime()
rc = ESMF_SUCCESS
!
-!-----------------------------------------------------------------------
-!*** ALLOCATE THE WRITE COMPONENT'S INTERNAL STATE.
-!-----------------------------------------------------------------------
-!
- allocate(atm_int_state,stat=rc)
-!
-!-----------------------------------------------------------------------
-!*** ATTACH THE INTERNAL STATE TO THE WRITE COMPONENT.
-!-----------------------------------------------------------------------
-!
- wrap%ptr => atm_int_state
- call ESMF_GridCompSetInternalState(fcst_comp, wrap, rc)
+ call ESMF_VMGetCurrent(vm=vm,rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
-!
- call ESMF_VMGetCurrent(vm=VM,rc=RC)
- call ESMF_VMGet(vm=VM, localPet=mype, mpiCommunicator=fcst_mpi_comm, &
+
+ call ESMF_VMGet(vm=vm, localPet=mype, mpiCommunicator=fcst_mpi_comm, &
petCount=fcst_ntasks, rc=rc)
- if (mype == 0) write(0,*)'in fcst comp init, fcst_ntasks=',fcst_ntasks
-!
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+ if (mype == 0) write(*,*)'in fcst comp init, fcst_ntasks=',fcst_ntasks
+
CF = ESMF_ConfigCreate(rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+
call ESMF_ConfigLoadFile(config=CF ,filename='model_configure' ,rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
-!
+
num_restart_interval = ESMF_ConfigGetLen(config=CF, label ='restart_interval:',rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- if(mype == 0) print *,'af nems config,num_restart_interval=',num_restart_interval
+ if (mype == 0) print *,'af nems config,num_restart_interval=',num_restart_interval
if (num_restart_interval<=0) num_restart_interval = 1
allocate(restart_interval(num_restart_interval))
restart_interval = 0
call ESMF_ConfigGetAttribute(CF,valueList=restart_interval,label='restart_interval:', &
- count=num_restart_interval, rc=rc)
+ count=num_restart_interval, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- if(mype == 0) print *,'af nems config,restart_interval=',restart_interval
-
+ if (mype == 0) print *,'af nems config,restart_interval=',restart_interval
!
call fms_init(fcst_mpi_comm)
call mpp_init()
initClock = mpp_clock_id( 'Initialization' )
call mpp_clock_begin (initClock) !nesting problem
- call fms_init
call constants_init
call sat_vapor_pres_init
-!
- if ( force_date_from_configure ) then
-
- select case( uppercase(trim(calendar)) )
- case( 'JULIAN' )
- calendar_type = JULIAN
- case( 'GREGORIAN' )
- calendar_type = GREGORIAN
- case( 'NOLEAP' )
- calendar_type = NOLEAP
- case( 'THIRTY_DAY' )
- calendar_type = THIRTY_DAY_MONTHS
- case( 'NO_CALENDAR' )
- calendar_type = NO_CALENDAR
- case default
- call mpp_error ( FATAL, 'fcst_initialize: calendar must be one of '// &
- 'JULIAN|GREGORIAN|NOLEAP|THIRTY_DAY|NO_CALENDAR.' )
- end select
- endif
-!
- call set_calendar_type (calendar_type )
+ select case( uppercase(trim(calendar)) )
+ case( 'JULIAN' )
+ calendar_type = JULIAN
+ case( 'GREGORIAN' )
+ calendar_type = GREGORIAN
+ case( 'NOLEAP' )
+ calendar_type = NOLEAP
+ case( 'THIRTY_DAY' )
+ calendar_type = THIRTY_DAY_MONTHS
+ case( 'NO_CALENDAR' )
+ calendar_type = NO_CALENDAR
+ case default
+ call mpp_error ( FATAL, 'fcst_initialize: calendar must be one of '// &
+ 'JULIAN|GREGORIAN|NOLEAP|THIRTY_DAY|NO_CALENDAR.' )
+ end select
+
+ call set_calendar_type (calendar_type)
!
!-----------------------------------------------------------------------
!*** set atmos time
@@ -293,61 +443,105 @@ subroutine fcst_initialize(fcst_comp, importState, exportState, clock, rc)
call ESMF_ClockGet(clock, CurrTime=CurrTime, StartTime=StartTime, &
StopTime=StopTime, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- RunDuration = StopTime - CurrTime
date_init = 0
call ESMF_TimeGet (StartTime, &
YY=date_init(1), MM=date_init(2), DD=date_init(3), &
- H=date_init(4), M =date_init(5), S =date_init(6), RC=rc)
+ H=date_init(4), M =date_init(5), S =date_init(6), rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- if ( date_init(1) == 0 ) date_init = date
- atm_int_state%Time_init = set_date (date_init(1), date_init(2), date_init(3), &
- date_init(4), date_init(5), date_init(6))
- if(mype==0) write(*,'(A,6I5)') 'StartTime=',date_init
+ Time_init = set_date (date_init(1), date_init(2), date_init(3), &
+ date_init(4), date_init(5), date_init(6))
+ if (mype == 0) write(*,'(A,6I5)') 'StartTime=',date_init
date=0
call ESMF_TimeGet (CurrTime, &
YY=date(1), MM=date(2), DD=date(3), &
- H=date(4), M =date(5), S =date(6), RC=rc )
+ H=date(4), M =date(5), S =date(6), rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- if(mype==0) write(*,'(A,6I5)') 'CurrTime =',date
-
- atm_int_state%Time_atmos = set_date (date(1), date(2), date(3), &
- date(4), date(5), date(6))
+ Time = set_date (date(1), date(2), date(3), &
+ date(4), date(5), date(6))
+ if (mype == 0) write(*,'(A,6I5)') 'CurrTime =',date
date_end=0
call ESMF_TimeGet (StopTime, &
YY=date_end(1), MM=date_end(2), DD=date_end(3), &
- H=date_end(4), M =date_end(5), S =date_end(6), RC=rc )
+ H=date_end(4), M =date_end(5), S =date_end(6), rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- if ( date_end(1) == 0 ) date_end = date
- atm_int_state%Time_end = set_date (date_end(1), date_end(2), date_end(3), &
- date_end(4), date_end(5), date_end(6))
- if(mype==0) write(*,'(A,6I5)') 'StopTime =',date_end
-!
- call diag_manager_set_time_end(atm_int_state%Time_end)
-!
- CALL ESMF_TimeIntervalGet(RunDuration, S=Run_length, RC=rc)
+ Time_end = set_date (date_end(1), date_end(2), date_end(3), &
+ date_end(4), date_end(5), date_end(6))
+ if (mype == 0) write(*,'(A,6I5)') 'StopTime =',date_end
+
+!------------------------------------------------------------------------
+! If this is a restarted run ('INPUT/coupler.res' file exists),
+! compare date and date_init to the values in 'coupler.res'
+
+ if (mype == 0) then
+ inquire(FILE='INPUT/coupler.res', EXIST=fexist)
+ if (fexist) then ! file exists, this is a restart run
+
+ call ESMF_UtilIOUnitGet(unit=io_unit, rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+
+ open(unit=io_unit, file='INPUT/coupler.res', status='old', action='read', err=998)
+ read (io_unit,*,err=999) calendar_type_res
+ read (io_unit,*) date_init_res
+ read (io_unit,*) date_res
+ close(io_unit)
+
+ if(date_res(1) == 0 .and. date_init_res(1) /= 0) date_res = date_init_res
+
+ if(mype == 0) write(*,'(A,6(I4))') 'INPUT/coupler.res: date_init=',date_init_res
+ if(mype == 0) write(*,'(A,6(I4))') 'INPUT/coupler.res: date =',date_res
+
+ if (calendar_type /= calendar_type_res) then
+ write(0,'(A)') 'fcst_initialize ERROR: calendar_type /= calendar_type_res'
+ write(0,'(A,6(I4))')' calendar_type = ', calendar_type
+ write(0,'(A,6(I4))')' calendar_type_res = ', calendar_type_res
+ call ESMF_Finalize(endflag=ESMF_END_ABORT)
+ endif
+
+ if (.not. ALL(date_init.EQ.date_init_res)) then
+ write(0,'(A)') 'fcst_initialize ERROR: date_init /= date_init_res'
+ write(0,'(A,6(I4))')' date_init = ', date_init
+ write(0,'(A,6(I4))')' date_init_res = ', date_init_res
+ call ESMF_Finalize(endflag=ESMF_END_ABORT)
+ endif
+
+ if (.not. ALL(date.EQ.date_res)) then
+ write(0,'(A)') 'fcst_initialize ERROR: date /= date_res'
+ write(0,'(A,6(I4))')' date = ', date
+ write(0,'(A,6(I4))')' date_res = ', date_res
+ call ESMF_Finalize(endflag=ESMF_END_ABORT)
+ endif
+
+ 999 continue
+ 998 continue
+
+ endif ! fexist
+ endif ! mype == 0
+
+ RunDuration = StopTime - CurrTime
+
+ CALL ESMF_TimeIntervalGet(RunDuration, S=Run_length, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
!
call diag_manager_init (TIME_INIT=date)
- call diag_manager_set_time_end(atm_int_state%Time_end)
+ call diag_manager_set_time_end(Time_end)
!
- atm_int_state%Time_step_atmos = set_time (dt_atmos,0)
- atm_int_state%num_atmos_calls = Run_length / dt_atmos
- atm_int_state%Time_atstart = atm_int_state%Time_atmos
- if (mype == 0) write(0,*)'num_atmos_calls=',atm_int_state%num_atmos_calls,'time_init=', &
- date_init,'time_atmos=',date,'time_end=',date_end,'dt_atmos=',dt_atmos, &
+ Time_step = set_time (dt_atmos,0)
+ num_atmos_calls = Run_length / dt_atmos
+ if (mype == 0) write(*,*)'num_atmos_calls=',num_atmos_calls,'time_init=', &
+ date_init,'time=',date,'time_end=',date_end,'dt_atmos=',dt_atmos, &
'Run_length=',Run_length
! set up forecast time array that controls when to write out restart files
frestart = 0
- call get_time(atm_int_state%Time_end - atm_int_state%Time_init,total_inttime)
+ call get_time(Time_end - Time_init, total_inttime)
! set iau offset time
- atm_int_state%Atm%iau_offset = iau_offset
+ Atmos%iau_offset = iau_offset
if(iau_offset > 0 ) then
iautime = set_time(iau_offset * 3600, 0)
endif
@@ -359,19 +553,19 @@ subroutine fcst_initialize(fcst_comp, importState, exportState, clock, rc)
if(freq_restart) then
if(restart_interval(1) >= 0) then
tmpvar = restart_interval(1) * 3600
- atm_int_state%Time_step_restart = set_time (tmpvar, 0)
+ Time_step_restart = set_time (tmpvar, 0)
if(iau_offset > 0 ) then
- atm_int_state%Time_restart = atm_int_state%Time_init + iautime + atm_int_state%Time_step_restart
+ Time_restart = Time_init + iautime + Time_step_restart
frestart(1) = tmpvar + iau_offset *3600
else
- atm_int_state%Time_restart = atm_int_state%Time_init + atm_int_state%Time_step_restart
+ Time_restart = Time_init + Time_step_restart
frestart(1) = tmpvar
endif
if(restart_interval(1) > 0) then
i = 2
- do while ( atm_int_state%Time_restart < atm_int_state%Time_end )
+ do while ( Time_restart < Time_end )
frestart(i) = frestart(i-1) + tmpvar
- atm_int_state%Time_restart = atm_int_state%Time_restart + atm_int_state%Time_step_restart
+ Time_restart = Time_restart + Time_step_restart
i = i + 1
enddo
endif
@@ -396,9 +590,9 @@ subroutine fcst_initialize(fcst_comp, importState, exportState, clock, rc)
if ( ANY(frestart(:) == total_inttime) ) restart_endfcst = .true.
if (mype == 0) print *,'frestart=',frestart(1:10)/3600, 'restart_endfcst=',restart_endfcst, &
'total_inttime=',total_inttime
-! if there is restart writing during integration
- atm_int_state%intrm_rst = 0
- if (frestart(1)>0) atm_int_state%intrm_rst = 1
+! if there is restart writing during integration
+ intrm_rst = 0
+ if (frestart(1)>0) intrm_rst = 1
!
!----- write time stamps (for start time and end time) ------
@@ -412,14 +606,11 @@ subroutine fcst_initialize(fcst_comp, importState, exportState, clock, rc)
!
!------ initialize component models ------
- call atmos_model_init (atm_int_state%Atm, atm_int_state%Time_init, &
- atm_int_state%Time_atmos, atm_int_state%Time_step_atmos)
+ call atmos_model_init (Atmos, Time_init, Time, Time_step)
!
inquire(FILE='data_table', EXIST=fexist)
if (fexist) then
- call data_override_init ( ) ! Atm_domain_in = Atm%domain, &
- ! Ice_domain_in = Ice%domain, &
- ! Land_domain_in = Land%domain )
+ call data_override_init()
endif
!-----------------------------------------------------------------------
!---- open and close dummy file in restart dir to check if dir exists --
@@ -429,10 +620,8 @@ subroutine fcst_initialize(fcst_comp, importState, exportState, clock, rc)
call mpp_close(unit, MPP_DELETE)
endif
!
-!
!-----------------------------------------------------------------------
-!*** create grid for output fields
-!*** first try: Create cubed sphere grid from file
+!*** create grid for output fields, using FV3 parameters
!-----------------------------------------------------------------------
!
call mpp_error(NOTE, 'before create fcst grid')
@@ -443,157 +632,102 @@ subroutine fcst_initialize(fcst_comp, importState, exportState, clock, rc)
call read_data("INPUT/grid_spec.nc", "atm_mosaic_file", gridfile)
endif
- if (mpp_pe() == mpp_root_pe()) &
- write(*, *) 'create fcst grid: mype,regional,nested=',mype,atm_int_state%Atm%regional,atm_int_state%Atm%nested
-
- ! regional-only without nests
- if( atm_int_state%Atm%regional .and. .not. atm_int_state%Atm%nested ) then
-
- call atmosphere_control_data (isc, iec, jsc, jec, nlev)
-
- domain = atm_int_state%Atm%domain
- fcstNpes = atm_int_state%Atm%layout(1)*atm_int_state%Atm%layout(2)
- allocate(isl(fcstNpes), iel(fcstNpes), jsl(fcstNpes), jel(fcstNpes))
- allocate(deBlockList(2,2,fcstNpes))
- call mpp_get_compute_domains(domain,xbegin=isl,xend=iel,ybegin=jsl,yend=jel)
- do n=1,fcstNpes
- deBlockList(:,1,n) = (/ isl(n),iel(n) /)
- deBlockList(:,2,n) = (/ jsl(n),jel(n) /)
- end do
- delayout = ESMF_DELayoutCreate(petMap=(/(i,i=0,fcstNpes-1)/), rc=rc); ESMF_ERR_ABORT(rc)
- distgrid = ESMF_DistGridCreate(minIndex=(/1,1/), &
- maxIndex=(/atm_int_state%Atm%mlon,atm_int_state%Atm%mlat/), &
- delayout=delayout, &
- deBlockList=deBlockList, rc=rc); ESMF_ERR_ABORT(rc)
-
- fcstGrid = ESMF_GridCreateNoPeriDim(regDecomp=(/atm_int_state%Atm%layout(1),atm_int_state%Atm%layout(2)/), &
- minIndex=(/1,1/), &
- maxIndex=(/atm_int_state%Atm%mlon,atm_int_state%Atm%mlat/), &
- gridAlign=(/-1,-1/), &
- decompflag=(/ESMF_DECOMP_SYMMEDGEMAX,ESMF_DECOMP_SYMMEDGEMAX/), &
- name="fcst_grid", &
- indexflag=ESMF_INDEX_DELOCAL, &
- rc=rc); ESMF_ERR_ABORT(rc)
-
- ! add and define "center" coordinate values
- call ESMF_GridAddCoord(fcstGrid, staggerLoc=ESMF_STAGGERLOC_CENTER, rc=rc); ESMF_ERR_ABORT(rc)
- call ESMF_GridGetCoord(fcstGrid, coordDim=1, staggerLoc=ESMF_STAGGERLOC_CENTER, &
- farrayPtr=glonPtr, rc=rc); ESMF_ERR_ABORT(rc)
- call ESMF_GridGetCoord(fcstGrid, coordDim=2, staggerLoc=ESMF_STAGGERLOC_CENTER, &
- farrayPtr=glatPtr, rc=rc); ESMF_ERR_ABORT(rc)
+ ngrids = Atmos%ngrids
+ mygrid = Atmos%mygrid
+ allocate(grid_number_on_all_pets(fcst_ntasks))
+ call mpi_allgather(mygrid, 1, MPI_INTEGER, &
+ grid_number_on_all_pets, 1, MPI_INTEGER, &
+ fcst_mpi_comm, rc)
+
+ allocate (fcstGrid(ngrids),fcstGridComp(ngrids))
+ do n=1,ngrids
+
+ pelist => null()
+ call atmos_model_get_nth_domain_info(n, layout, nx, ny, pelist)
+ call ESMF_VMBroadcast(vm, bcstData=layout, count=2, rootPet=pelist(1), rc=rc); ESMF_ERR_ABORT(rc)
+
+ if (n==1) then
+ ! on grid==1 (top level parent) determine if the domain is global or regional
+ top_parent_is_global = .true.
+ if(mygrid==1) then
+ if (Atmos%regional) top_parent_is_global = .false.
+ endif
+ call mpi_bcast(top_parent_is_global, 1, MPI_LOGICAL, 0, fcst_mpi_comm, rc)
+ endif
- do j = jsc, jec
- do i = isc, iec
- glonPtr(i-isc+1,j-jsc+1) = atm_int_state%Atm%lon(i-isc+1,j-jsc+1) * dtor
- glatPtr(i-isc+1,j-jsc+1) = atm_int_state%Atm%lat(i-isc+1,j-jsc+1) * dtor
- enddo
- enddo
+ if (n==1 .and. top_parent_is_global) then
- ! add and define "corner" coordinate values
- call ESMF_GridAddCoord(fcstGrid, staggerLoc=ESMF_STAGGERLOC_CORNER, &
- rc=rc); ESMF_ERR_ABORT(rc)
- call ESMF_GridGetCoord(fcstGrid, coordDim=1, staggerLoc=ESMF_STAGGERLOC_CORNER, &
- totalLBound=tlb, totalUBound=tub, &
- farrayPtr=glonPtr, rc=rc); ESMF_ERR_ABORT(rc)
- glonPtr(tlb(1):tub(1),tlb(2):tub(2)) = &
- atm_int_state%Atm%lon_bnd(tlb(1):tub(1),tlb(2):tub(2)) * dtor
- call ESMF_GridGetCoord(fcstGrid, coordDim=2, staggerLoc=ESMF_STAGGERLOC_CORNER, &
- totalLBound=tlb, totalUBound=tub, &
- farrayPtr=glatPtr, rc=rc); ESMF_ERR_ABORT(rc)
- glatPtr(tlb(1):tub(1),tlb(2):tub(2)) = &
- atm_int_state%Atm%lat_bnd(tlb(1):tub(1),tlb(2):tub(2)) * dtor
-
- call mpp_error(NOTE, 'after create fcst grid for regional-only')
-
- else ! not regional only
-
- if (.not. atm_int_state%Atm%regional .and. .not. atm_int_state%Atm%nested ) then !! global only
-
- do tl=1,6
- decomptile(1,tl) = atm_int_state%Atm%layout(1)
- decomptile(2,tl) = atm_int_state%Atm%layout(2)
- decompflagPTile(:,tl) = (/ESMF_DECOMP_SYMMEDGEMAX,ESMF_DECOMP_SYMMEDGEMAX/)
- enddo
- fcstGrid = ESMF_GridCreateMosaic(filename="INPUT/"//trim(gridfile), &
- regDecompPTile=decomptile,tileFilePath="INPUT/", &
- decompflagPTile=decompflagPTile, &
- staggerlocList=(/ESMF_STAGGERLOC_CENTER, ESMF_STAGGERLOC_CORNER/), &
- name='fcst_grid', rc=rc)
+ fcstGridComp(n) = ESMF_GridCompCreate(name="global", petList=pelist, rc=rc); ESMF_ERR_ABORT(rc)
+
+ call ESMF_InfoGetFromHost(fcstGridComp(n), info=info, rc=rc); ESMF_ERR_ABORT(rc)
+ call ESMF_InfoSet(info, key="layout", values=layout, rc=rc); ESMF_ERR_ABORT(rc)
+ call ESMF_InfoSet(info, key="tilesize", value=Atmos%mlon, rc=rc); ESMF_ERR_ABORT(rc)
+
+ call ESMF_GridCompSetServices(fcstGridComp(n), SetServicesNest, userrc=urc, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+ if (ESMF_LogFoundError(rcToCheck=urc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__, rcToReturn=rc)) return
+
+ else
- call mpp_error(NOTE, 'after create fcst grid for global-only with INPUT/'//trim(gridfile))
+ allocate(petListNest(layout(1)*layout(2)))
+ k=pelist(1)
+ do j=1,layout(2)
+ do i=1,layout(1)
+ petListNest(k-pelist(1)+1) = k
+ k = k + 1
+ end do
+ end do
- else !! global-nesting or regional-nesting
+ fcstGridComp(n) = ESMF_GridCompCreate(name="nest", petList=petListNest, rc=rc); ESMF_ERR_ABORT(rc)
- if (mype==0) TileLayout = atm_int_state%Atm%layout
- call ESMF_VMBroadcast(vm, bcstData=TileLayout, count=2, &
- rootPet=0, rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+ call ESMF_InfoGetFromHost(fcstGridComp(n), info=info, rc=rc); ESMF_ERR_ABORT(rc)
+ call ESMF_InfoSet(info, key="layout", values=layout, rc=rc); ESMF_ERR_ABORT(rc)
+ call ESMF_InfoSet(info, key="nx", value=nx, rc=rc); ESMF_ERR_ABORT(rc)
+ call ESMF_InfoSet(info, key="ny", value=ny, rc=rc); ESMF_ERR_ABORT(rc)
- if (mype==0) npes(1) = mpp_npes()
- call ESMF_VMBroadcast(vm, bcstData=npes, count=1, &
- rootPet=0, rc=rc)
+ call ESMF_GridCompSetServices(fcstGridComp(n), SetServicesNest, userrc=urc, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+ if (ESMF_LogFoundError(rcToCheck=urc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__, rcToReturn=rc)) return
- if ( npes(1) == TileLayout(1) * TileLayout(2) * 6 ) then
- ! global-nesting
- nestRootPet = npes(1)
- gridfile="grid.nest02.tile7.nc"
- else if ( npes(1) == TileLayout(1) * TileLayout(2) ) then
- ! regional-nesting
- nestRootPet = npes(1)
- gridfile="grid.nest02.tile2.nc"
- else
- call mpp_error(FATAL, 'Inconsistent nestRootPet and Atm%layout')
- endif
- if (mype == nestRootPet) then
- if (nestRootPet /= atm_int_state%Atm%pelist(1)) then
- write(0,*)'error in fcst_initialize: nestRootPet /= atm_int_state%Atm%pelist(1)'
- write(0,*)'error in fcst_initialize: nestRootPet = ',nestRootPet
- write(0,*)'error in fcst_initialize: atm_int_state%Atm%pelist(1) = ',atm_int_state%Atm%pelist(1)
- ESMF_ERR_ABORT(100)
- endif
- endif
+ deallocate(petListNest)
- ! nest rootPet shares peList with others
- if (mype == nestRootPet) peListSize(1) = size(atm_int_state%Atm%pelist)
- call ESMF_VMBroadcast(vm, bcstData=peListSize, count=1, rootPet=nestRootPet, rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+ end if
- ! nest rootPet shares layout with others
- if (mype == nestRootPet) regDecomp = atm_int_state%Atm%layout
- call ESMF_VMBroadcast(vm, bcstData=regDecomp, count=2, rootPet=nestRootPet, rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+ if (ESMF_GridCompIsPetLocal(fcstGridComp(n), rc=rc)) then
+ call ESMF_GridCompGet(fcstGridComp(n), grid=fcstGrid(n), rc=rc); ESMF_ERR_ABORT(rc)
- ! prepare petMap variable
- allocate(petMap(peListSize(1)))
- if (mype == nestRootPet) petMap = atm_int_state%Atm%pelist
- ! do the actual broadcast of the petMap
- call ESMF_VMBroadcast(vm, bcstData=petMap, count=peListSize(1), rootPet=nestRootPet, rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+ call ESMF_GridAddCoord(fcstGrid(n), staggerLoc=ESMF_STAGGERLOC_CENTER, rc=rc); ESMF_ERR_ABORT(rc)
+ call ESMF_GridAddCoord(fcstGrid(n), staggerLoc=ESMF_STAGGERLOC_CORNER, rc=rc); ESMF_ERR_ABORT(rc)
- ! create the DELayout that maps DEs to the PETs in the petMap
- delayout = ESMF_DELayoutCreate(petMap=petMap, rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+ ! define "center" coordinate values
+ call ESMF_GridGetCoord(fcstGrid(n), coordDim=1, staggerLoc=ESMF_STAGGERLOC_CENTER, &
+ totalLBound=tlb, totalUBound=tub, &
+ farrayPtr=glonPtr, rc=rc); ESMF_ERR_ABORT(rc)
+ glonPtr(tlb(1):tub(1),tlb(2):tub(2)) = Atmos%lon(tlb(1):tub(1),tlb(2):tub(2))
- ! create the nest Grid by reading it from file but use DELayout
- fcstGrid = ESMF_GridCreate(filename="INPUT/"//trim(gridfile), &
- fileformat=ESMF_FILEFORMAT_GRIDSPEC, regDecomp=regDecomp, &
- decompflag=(/ESMF_DECOMP_SYMMEDGEMAX,ESMF_DECOMP_SYMMEDGEMAX/), &
- delayout=delayout, isSphere=.false., indexflag=ESMF_INDEX_DELOCAL, &
- rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+ call ESMF_GridGetCoord(fcstGrid(n), coordDim=2, staggerLoc=ESMF_STAGGERLOC_CENTER, &
+ totalLBound=tlb, totalUBound=tub, &
+ farrayPtr=glatPtr, rc=rc); ESMF_ERR_ABORT(rc)
+ glatPtr(tlb(1):tub(1),tlb(2):tub(2)) = Atmos%lat(tlb(1):tub(1),tlb(2):tub(2))
- call mpp_error(NOTE, 'after create fcst grid with INPUT/'//trim(gridfile))
+ ! define "corner" coordinate values
+ call ESMF_GridGetCoord(fcstGrid(n), coordDim=1, staggerLoc=ESMF_STAGGERLOC_CORNER, &
+ totalLBound=tlb, totalUBound=tub, &
+ farrayPtr=glonPtr, rc=rc); ESMF_ERR_ABORT(rc)
+ glonPtr(tlb(1):tub(1),tlb(2):tub(2)) = Atmos%lon_bnd(tlb(1):tub(1),tlb(2):tub(2))
- endif
+ call ESMF_GridGetCoord(fcstGrid(n), coordDim=2, staggerLoc=ESMF_STAGGERLOC_CORNER, &
+ totalLBound=tlb, totalUBound=tub, &
+ farrayPtr=glatPtr, rc=rc); ESMF_ERR_ABORT(rc)
+ glatPtr(tlb(1):tub(1),tlb(2):tub(2)) = Atmos%lat_bnd(tlb(1):tub(1),tlb(2):tub(2))
+ end if ! IsPetLocal
- endif
+ end do
!
!! FIXME
- if ( .not. atm_int_state%Atm%nested ) then !! global only
- call addLsmask2grid(fcstGrid, rc=rc)
+ if ( .not. Atmos%nested ) then !! global only
+ call addLsmask2grid(fcstGrid(mygrid), rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
! print *,'call addLsmask2grid after fcstGrid, rc=',rc
endif
@@ -607,7 +741,7 @@ subroutine fcst_initialize(fcst_comp, importState, exportState, clock, rc)
!
! Write grid to netcdf file
if( cplprint_flag ) then
- call wrt_fcst_grid(fcstGrid, "diagnostic_FV3_fcstGrid.nc", &
+ call wrt_fcst_grid(fcstGrid(mygrid), "diagnostic_FV3_fcstGrid.nc", &
regridArea=.TRUE., rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
endif
@@ -621,15 +755,39 @@ subroutine fcst_initialize(fcst_comp, importState, exportState, clock, rc)
name="gridfile", value=trim(gridfile), rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+! Add total number of domains(grids) Attribute to the exportState
+ call ESMF_AttributeAdd(exportState, convention="NetCDF", purpose="FV3", &
+ attrList=(/"ngrids"/), rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+
+ call ESMF_AttributeSet(exportState, convention="NetCDF", purpose="FV3", &
+ name="ngrids", value=ngrids, rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+
+! Add top_parent_is_global Attribute to the exportState
+ call ESMF_AttributeAdd(exportState, convention="NetCDF", purpose="FV3", &
+ attrList=(/"top_parent_is_global"/), rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+
+ call ESMF_AttributeSet(exportState, convention="NetCDF", purpose="FV3", &
+ name="top_parent_is_global", value=top_parent_is_global, rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+
! Add dimension Attributes to Grid
- call ESMF_AttributeAdd(fcstGrid, convention="NetCDF", purpose="FV3", &
+ do n=1,ngrids
+ if (ESMF_GridCompIsPetLocal(fcstGridComp(n), rc=rc)) then
+
+ call ESMF_AttributeAdd(fcstGrid(n), convention="NetCDF", purpose="FV3", &
attrList=(/"ESMF:gridded_dim_labels"/), rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- call ESMF_AttributeSet(fcstGrid, convention="NetCDF", purpose="FV3", &
+ call ESMF_AttributeSet(fcstGrid(n), convention="NetCDF", purpose="FV3", &
name="ESMF:gridded_dim_labels", valueList=(/"grid_xt", "grid_yt"/), rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
-!
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+
+ endif
+ end do
+
! Add time Attribute to the exportState
call ESMF_AttributeAdd(exportState, convention="NetCDF", purpose="FV3", &
attrList=(/ "time ", &
@@ -653,11 +811,10 @@ subroutine fcst_initialize(fcst_comp, importState, exportState, clock, rc)
dateS="hours since "//dateSY//'-'//dateSM//'-'//dateSD//' '//dateSH//':'// &
dateSN//":"//dateSS
- if (mype == 0) write(0,*)'dateS=',trim(dateS),'date_init=',date_init
+ if (mype == 0) write(*,*)'dateS=',trim(dateS),'date_init=',date_init
call ESMF_AttributeSet(exportState, convention="NetCDF", purpose="FV3", &
name="time:units", value=trim(dateS), rc=rc)
-! name="time:units", value="hours since 2016-10-03 00:00:00", rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
call ESMF_AttributeSet(exportState, convention="NetCDF", purpose="FV3", &
@@ -679,49 +836,73 @@ subroutine fcst_initialize(fcst_comp, importState, exportState, clock, rc)
! Create FieldBundle for Fields that need to be regridded bilinear
if( quilting ) then
+ allocate(fieldbundle(ngrids))
+ nbdlphys = 2
+ allocate(fieldbundlephys(nbdlphys,ngrids))
+
+ do n=1,ngrids
+ bundle_grid=''
+ if (ngrids > 1 .and. n >= 2) then
+ write(bundle_grid,'(A5,I2.2,A1)') '.nest', n, '.'
+ endif
+
do i=1,num_files
!
- name_FB = filename_base(i)
+ tempState = ESMF_StateCreate(rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+
+ name_FB = trim(filename_base(i)) // trim(bundle_grid)
!
if( i==1 ) then
! for dyn
name_FB1 = trim(name_FB)//'_bilinear'
- fieldbundle = ESMF_FieldBundleCreate(name=trim(name_FB1),rc=rc)
- if (mype == 0) write(0,*)'af create fcst fieldbundle, name=',trim(name_FB),'rc=',rc
+ fieldbundle(n) = ESMF_FieldBundleCreate(name=trim(name_FB1),rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+
+ call ESMF_AttributeAdd(fieldbundle(n), convention="NetCDF", purpose="FV3", &
+ attrList=(/"grid_id"/), rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+
+ call ESMF_AttributeSet(fieldbundle(n), convention="NetCDF", purpose="FV3", &
+ name="grid_id", value=n, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- call fv_dyn_bundle_setup(atm_int_state%Atm%axes, &
- fieldbundle, fcstGrid, quilting, rc=rc)
+ call ESMF_StateAdd(tempState, (/fieldbundle(n)/), rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- ! Add the field to the importState so parent can connect to it
- call ESMF_StateAdd(exportState, (/fieldbundle/), rc=rc)
+ call ESMF_GridCompInitialize(fcstGridComp(n), importState=tempState,&
+ exportState=exportState, phase=1, userrc=urc, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+ if (ESMF_LogFoundError(rcToCheck=urc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__, rcToReturn=rc)) return
else if( i==2 ) then
! for phys
- nbdlphys = 2
- allocate(fieldbundlephys(nbdlphys))
do j=1, nbdlphys
if( j==1 ) then
name_FB1 = trim(name_FB)//'_nearest_stod'
else
name_FB1 = trim(name_FB)//'_bilinear'
endif
- fieldbundlephys(j) = ESMF_FieldBundleCreate(name=trim(name_FB1),rc=rc)
- if (mype == 0) write(0,*)'af create fcst fieldbundle, name=',trim(name_FB1),'rc=',rc
+ fieldbundlephys(j,n) = ESMF_FieldBundleCreate(name=trim(name_FB1),rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- enddo
-!
- call fv_phys_bundle_setup(atm_int_state%Atm%diag, atm_int_state%Atm%axes, &
- fieldbundlephys, fcstGrid, quilting, nbdlphys)
-!
- ! Add the field to the importState so parent can connect to it
- do j=1,nbdlphys
- call ESMF_StateAdd(exportState, (/fieldbundlephys(j)/), rc=rc)
+
+ call ESMF_AttributeAdd(fieldbundlephys(j,n), convention="NetCDF", purpose="FV3", &
+ attrList=(/"grid_id"/), rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+
+ call ESMF_AttributeSet(fieldbundlephys(j,n), convention="NetCDF", purpose="FV3", &
+ name="grid_id", value=n, rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+
+ call ESMF_StateAdd(tempState, (/fieldbundlephys(j,n)/), rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
enddo
+ call ESMF_GridCompInitialize(fcstGridComp(n), importState=tempState,&
+ exportState=exportState, phase=2, userrc=urc, rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+ if (ESMF_LogFoundError(rcToCheck=urc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__, rcToReturn=rc)) return
+
else
write(0,*)' unknown name_FB ', trim(name_FB)
@@ -729,7 +910,11 @@ subroutine fcst_initialize(fcst_comp, importState, exportState, clock, rc)
endif
!
- enddo
+ call ESMF_StateDestroy(tempState, noGarbage=.true., rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+
+ enddo ! num_files
+ enddo ! ngrids
!end qulting
endif
@@ -737,16 +922,8 @@ subroutine fcst_initialize(fcst_comp, importState, exportState, clock, rc)
call get_atmos_model_ungridded_dim(nlev=numLevels, &
nsoillev=numSoilLayers, &
ntracers=numTracers)
-!
-!-----------------------------------------------------------------------
-!
- IF(rc /= ESMF_SUCCESS) THEN
- WRITE(0,*)"FAIL: Fcst_Initialize."
-! ELSE
-! WRITE(0,*)"PASS: Fcst_Initialize."
- ENDIF
-!
- if (mype == 0) write(0,*)'in fcst,init total time: ', mpi_wtime() - timeis
+
+ if (mype == 0) write(*,*)'fcst_initialize total time: ', mpi_wtime() - timeis
!
!-----------------------------------------------------------------------
!
@@ -767,30 +944,22 @@ subroutine fcst_run_phase_1(fcst_comp, importState, exportState,clock,rc)
type(ESMF_Clock) :: clock
integer,intent(out) :: rc
!
-!-----------------------------------------------------------------------
!*** local variables
!
- integer :: i,j, mype, na, date(6)
- character(20) :: compname
-
- type(ESMF_Time) :: currtime
+ integer :: mype, na
integer(kind=ESMF_KIND_I8) :: ntimestep_esmf
- character(len=64) :: timestamp
-!
-!-----------------------------------------------------------------------
-!
- real(kind=8) :: mpi_wtime, tbeg1
+ real(kind=8) :: mpi_wtime, tbeg1
!
!-----------------------------------------------------------------------
!***********************************************************************
!-----------------------------------------------------------------------
!
tbeg1 = mpi_wtime()
- rc = esmf_success
+ rc = ESMF_SUCCESS
!
!-----------------------------------------------------------------------
!
- call ESMF_GridCompGet(fcst_comp, name=compname, localpet=mype, rc=rc)
+ call ESMF_GridCompGet(fcst_comp, localpet=mype, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
!
call ESMF_ClockGet(clock, advanceCount=NTIMESTEP_ESMF, rc=rc)
@@ -801,31 +970,21 @@ subroutine fcst_run_phase_1(fcst_comp, importState, exportState,clock,rc)
!-----------------------------------------------------------------------
! *** call fcst integration subroutines
- call get_date (atm_int_state%Time_atmos, date(1), date(2), date(3), &
- date(4), date(5), date(6))
- atm_int_state%Time_atmos = atm_int_state%Time_atmos + atm_int_state%Time_step_atmos
-
- call update_atmos_model_dynamics (atm_int_state%Atm)
+ call update_atmos_model_dynamics (Atmos)
- call update_atmos_radiation_physics (atm_int_state%Atm)
+ call update_atmos_radiation_physics (Atmos)
- call atmos_model_exchange_phase_1 (atm_int_state%Atm, rc=rc)
+ call atmos_model_exchange_phase_1 (Atmos, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
-!-----------------------------------------------------------------------
-!
-! IF(RC /= ESMF_SUCCESS) THEN
-! if(mype==0) WRITE(0,*)"FAIL: fcst_RUN"
-! ELSE
- if(mype==0) WRITE(*,*)"PASS: fcstRUN phase 1, na = ",na, ' time is ', mpi_wtime()-tbeg1
-! ENDIF
+ if (mype == 0) write(*,*)"PASS: fcstRUN phase 1, na = ",na, ' time is ', mpi_wtime()-tbeg1
!
!-----------------------------------------------------------------------
!
end subroutine fcst_run_phase_1
!
!-----------------------------------------------------------------------
-!&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+!#######################################################################
!-----------------------------------------------------------------------
!
subroutine fcst_run_phase_2(fcst_comp, importState, exportState,clock,rc)
@@ -839,78 +998,78 @@ subroutine fcst_run_phase_2(fcst_comp, importState, exportState,clock,rc)
type(ESMF_Clock) :: clock
integer,intent(out) :: rc
!
-!-----------------------------------------------------------------------
!*** local variables
!
- integer :: i,j, mype, na, date(6), seconds
- character(20) :: compname
-
- type(time_type) :: restart_inctime
- type(ESMF_Time) :: currtime
+ integer :: mype, na, date(6), seconds
integer(kind=ESMF_KIND_I8) :: ntimestep_esmf
character(len=64) :: timestamp
-!
-!-----------------------------------------------------------------------
-!
- real(kind=8) :: mpi_wtime, tbeg1
+ integer :: unit
+ real(kind=8) :: mpi_wtime, tbeg1
!
!-----------------------------------------------------------------------
!***********************************************************************
!-----------------------------------------------------------------------
!
tbeg1 = mpi_wtime()
- rc = esmf_success
+ rc = ESMF_SUCCESS
!
!-----------------------------------------------------------------------
!
- call ESMF_GridCompGet(fcst_comp, name=compname, localpet=mype, rc=rc)
+ call ESMF_GridCompGet(fcst_comp, localpet=mype, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
-!
+
call ESMF_ClockGet(clock, advanceCount=NTIMESTEP_ESMF, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
na = NTIMESTEP_ESMF
- if (mype == 0) write(0,*)'in fcst run phase 2, na=',na
!
!-----------------------------------------------------------------------
! *** call fcst integration subroutines
- call atmos_model_exchange_phase_2 (atm_int_state%Atm, rc=rc)
+ call atmos_model_exchange_phase_2 (Atmos, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
- call update_atmos_model_state (atm_int_state%Atm, rc=rc)
+ call update_atmos_model_state (Atmos, rc=rc)
if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
-!--- intermediate restart
- if (atm_int_state%intrm_rst>0) then
- if (na /= atm_int_state%num_atmos_calls-1) then
- call get_time(atm_int_state%Time_atmos - atm_int_state%Time_init, seconds)
+ !--- intermediate restart
+ if (intrm_rst>0) then
+ if (na /= num_atmos_calls-1) then
+ call get_time(Atmos%Time - Atmos%Time_init, seconds)
if (ANY(frestart(:) == seconds)) then
- if (mype == 0) write(0,*)'write out restart at na=',na,' seconds=',seconds, &
- 'integration lenght=',na*dt_atmos/3600.
- timestamp = date_to_string (atm_int_state%Time_atmos)
- call atmos_model_restart(atm_int_state%Atm, timestamp)
+ if (mype == 0) write(*,*)'write out restart at na=',na,' seconds=',seconds, &
+ 'integration lenght=',na*dt_atmos/3600.
+
+ timestamp = date_to_string (Atmos%Time)
+ call atmos_model_restart(Atmos, timestamp)
call write_stoch_restart_atm('RESTART/'//trim(timestamp)//'.atm_stoch.res.nc')
- call wrt_atmres_timestamp(atm_int_state,timestamp)
+ !----- write restart file ------
+ if (mpp_pe() == mpp_root_pe())then
+ call get_date (Atmos%Time, date(1), date(2), date(3), &
+ date(4), date(5), date(6))
+ call mpp_open( unit, 'RESTART/'//trim(timestamp)//'.coupler.res', nohdrs=.TRUE. )
+ write( unit, '(i6,8x,a)' )calendar_type, &
+ '(Calendar: no_calendar=0, thirty_day_months=1, julian=2, gregorian=3, noleap=4)'
+
+ write( unit, '(6i6,8x,a)' )date_init, &
+ 'Model start time: year, month, day, hour, minute, second'
+ write( unit, '(6i6,8x,a)' )date, &
+ 'Current model time: year, month, day, hour, minute, second'
+ call mpp_close(unit)
+ endif
endif
endif
endif
-!
-!-----------------------------------------------------------------------
-!
-! IF(RC /= ESMF_SUCCESS) THEN
-! if(mype==0) WRITE(0,*)"FAIL: fcst_RUN"
-! ELSE
- if(mype==0) WRITE(*,*)"PASS: fcstRUN phase 2, na = ",na, ' time is ', mpi_wtime()-tbeg1
-! ENDIF
+
+ if (mype == 0) write(*,*)"PASS: fcstRUN phase 2, na = ",na, ' time is ', mpi_wtime()-tbeg1
!
!-----------------------------------------------------------------------
!
end subroutine fcst_run_phase_2
!
!-----------------------------------------------------------------------
-!&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+!#######################################################################
!-----------------------------------------------------------------------
!
subroutine fcst_finalize(fcst_comp, importState, exportState,clock,rc)
@@ -919,45 +1078,33 @@ subroutine fcst_finalize(fcst_comp, importState, exportState,clock,rc)
!*** finalize the forecast grid component.
!-----------------------------------------------------------------------
!
- type(ESMF_GridComp) :: fcst_comp
- type(ESMF_State) :: importState, exportState
- type(ESMF_Clock) :: clock
- integer,intent(out) :: rc
+ type(ESMF_GridComp) :: fcst_comp
+ type(ESMF_State) :: importState, exportState
+ type(ESMF_Clock) :: clock
+ integer,intent(out) :: rc
!
!*** local variables
!
- integer :: unit
- integer,dimension(6) :: date
-
- real(8) mpi_wtime, tfs, tfe
+ integer :: mype
+ integer :: unit
+ integer,dimension(6) :: date
+ real(kind=8) :: mpi_wtime, tbeg1
!
!-----------------------------------------------------------------------
!***********************************************************************
!-----------------------------------------------------------------------
!
- tfs = mpi_wtime()
- rc = ESMF_SUCCESS
-!
-!-----------------------------------------------------------------------
-!*** retrieve the fcst component's esmf internal state
-!-----------------------------------------------------------------------
-!
- call ESMF_GridCompGetInternalState(fcst_comp, wrap, rc)
- atm_int_state => wrap%ptr
-!
-!-----------------------------------------------------------------------
-!
- call atmos_model_end (atm_int_state%atm)
-!
-!*** check time versus expected ending time
+ tbeg1 = mpi_wtime()
+ rc = ESMF_SUCCESS
- if (atm_int_state%Time_atmos /= atm_int_state%Time_end) &
- call error_mesg ('program coupler', &
- 'final time does not match expected ending time', WARNING)
+ call ESMF_GridCompGet(fcst_comp, localpet=mype, rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return
+
+ call atmos_model_end (Atmos)
!*** write restart file
if( restart_endfcst ) then
- call get_date (atm_int_state%Time_atmos, date(1), date(2), date(3), &
+ call get_date (Atmos%Time, date(1), date(2), date(3), &
date(4), date(5), date(6))
call mpp_open( unit, 'RESTART/coupler.res', nohdrs=.TRUE. )
if (mpp_pe() == mpp_root_pe())then
@@ -971,56 +1118,18 @@ subroutine fcst_finalize(fcst_comp, importState, exportState,clock,rc)
endif
call mpp_close(unit)
endif
-!
- call diag_manager_end(atm_int_state%Time_atmos )
+
+ call diag_manager_end (Atmos%Time)
call fms_end
+
+ if (mype == 0) write(*,*)'fcst_finalize total time: ', mpi_wtime() - tbeg1
!
!-----------------------------------------------------------------------
-!
- IF(RC /= ESMF_SUCCESS)THEN
- WRITE(0,*)'FAIL: Write_Finalize.'
-! ELSE
-! WRITE(0,*)'PASS: Write_Finalize.'
- ENDIF
-!
- tfe = mpi_wtime()
-! print *,'fms end time: ', tfe-tfs
-!-----------------------------------------------------------------------
!
end subroutine fcst_finalize
!
!#######################################################################
-!-- change name from coupler_res to wrt_res_stamp to avoid confusion,
-!-- here we only write out atmos restart time stamp
-!
- subroutine wrt_atmres_timestamp(atm_int_state,timestamp)
- type(atmos_internalstate_type), intent(in) :: atm_int_state
- character(len=32), intent(in) :: timestamp
-
- integer :: unit, date(6)
-
-!----- compute current date ------
-
- call get_date (atm_int_state%Time_atmos, date(1), date(2), date(3), &
- date(4), date(5), date(6))
-
-!----- write restart file ------
-
- if (mpp_pe() == mpp_root_pe())then
- call mpp_open( unit, 'RESTART/'//trim(timestamp)//'.coupler.res', nohdrs=.TRUE. )
- write( unit, '(i6,8x,a)' )calendar_type, &
- '(Calendar: no_calendar=0, thirty_day_months=1, julian=2, gregorian=3, noleap=4)'
-
- write( unit, '(6i6,8x,a)' )date_init, &
- 'Model start time: year, month, day, hour, minute, second'
- write( unit, '(6i6,8x,a)' )date, &
- 'Current model time: year, month, day, hour, minute, second'
- call mpp_close(unit)
- endif
- end subroutine wrt_atmres_timestamp
-!
-!#######################################################################
!-- write forecast grid to NetCDF file for diagnostics
!
subroutine wrt_fcst_grid(grid, fileName, relaxedflag, regridArea, rc)
@@ -1030,7 +1139,6 @@ subroutine wrt_fcst_grid(grid, fileName, relaxedflag, regridArea, rc)
logical, intent(in), optional :: regridArea
integer, intent(out) :: rc
!
-!-----------------------------------------------------------------------
!*** local variables
!
logical :: ioCapable
@@ -1040,7 +1148,6 @@ subroutine wrt_fcst_grid(grid, fileName, relaxedflag, regridArea, rc)
type(ESMF_Array) :: array
type(ESMF_ArrayBundle) :: arraybundle
logical :: isPresent
- integer :: stat
logical :: hasCorners
logical :: lRegridArea
type(ESMF_Field) :: areaField
diff --git a/module_fv3_config.F90 b/module_fv3_config.F90
index 53963b488..64522ec8e 100644
--- a/module_fv3_config.F90
+++ b/module_fv3_config.F90
@@ -2,7 +2,7 @@
module module_fv3_config
!------------------------------------------------------------------------
!
-!*** fv3 configure variablse from model_configure
+!*** fv3 configure variables from model_configure
!
! revision history
! 01/2017 Jun Wang Initial code
@@ -14,21 +14,15 @@ module module_fv3_config
implicit none
!
integer :: nfhout, nfhout_hf, nsout, dt_atmos
- integer :: nfhmax_hf, first_kdt
+ integer :: first_kdt
integer :: fcst_mpi_comm, fcst_ntasks
- real :: nfhmax
- type(ESMF_Alarm) :: alarm_output_hf, alarm_output
- type(ESMF_TimeInterval) :: output_hfmax
- type(ESMF_TimeInterval) :: output_interval,output_interval_hf
!
logical :: cplprint_flag
logical :: quilting, output_1st_tstep_rst
- logical :: force_date_from_configure
logical :: restart_endfcst
!
real,dimension(:),allocatable :: output_fh
character(esmf_maxstr),dimension(:),allocatable :: filename_base
character(17) :: calendar=' '
- integer :: calendar_type = -99
!
end module module_fv3_config
diff --git a/stochastic_physics/stochastic_physics_wrapper.F90 b/stochastic_physics/stochastic_physics_wrapper.F90
index 45e8532a8..ae67c0daf 100644
--- a/stochastic_physics/stochastic_physics_wrapper.F90
+++ b/stochastic_physics/stochastic_physics_wrapper.F90
@@ -12,6 +12,7 @@ module stochastic_physics_wrapper_mod
real(kind=kind_phys), dimension(:,:,:), allocatable, save :: skebu_wts
real(kind=kind_phys), dimension(:,:,:), allocatable, save :: skebv_wts
real(kind=kind_phys), dimension(:,:,:), allocatable, save :: sfc_wts
+ real(kind=kind_phys), dimension(:,:,:,:), allocatable, save :: spp_wts
logical, save :: is_initialized = .false.
integer, save :: lsoil = -999
@@ -78,7 +79,7 @@ subroutine stochastic_physics_wrapper (GFS_Control, GFS_Data, Atm_block, ierr)
type(block_control_type), intent(inout) :: Atm_block
integer, intent(out) :: ierr
- integer :: nthreads, nb, levs, maxblk, nblks
+ integer :: nthreads, nb, levs, maxblk, nblks, n
logical :: param_update_flag
#ifdef _OPENMP
@@ -96,7 +97,7 @@ subroutine stochastic_physics_wrapper (GFS_Control, GFS_Data, Atm_block, ierr)
initalize_stochastic_physics: if (.not. is_initialized) then
- if (GFS_Control%do_sppt .OR. GFS_Control%do_shum .OR. GFS_Control%do_skeb .OR. (GFS_Control%lndp_type > 0) ) then
+ if (GFS_Control%do_sppt .OR. GFS_Control%do_shum .OR. GFS_Control%do_skeb .OR. (GFS_Control%lndp_type > 0) .OR. GFS_Control%do_spp) then
allocate(xlat(1:nblks,maxblk))
allocate(xlon(1:nblks,maxblk))
do nb=1,nblks
@@ -108,6 +109,7 @@ subroutine stochastic_physics_wrapper (GFS_Control, GFS_Data, Atm_block, ierr)
GFS_Control%input_nml_file, GFS_Control%fn_nml, GFS_Control%nlunit, xlon, xlat, GFS_Control%do_sppt, GFS_Control%do_shum, &
GFS_Control%do_skeb, GFS_Control%lndp_type, GFS_Control%n_var_lndp, GFS_Control%use_zmtnblck, GFS_Control%skeb_npass, &
GFS_Control%lndp_var_list, GFS_Control%lndp_prt_list, &
+ GFS_Control%n_var_spp, GFS_Control%spp_var_list, GFS_Control%spp_prt_list, GFS_Control%do_spp, &
GFS_Control%ak, GFS_Control%bk, nthreads, GFS_Control%master, GFS_Control%communicator, ierr)
if (ierr/=0) then
write(6,*) 'call to init_stochastic_physics failed'
@@ -124,6 +126,23 @@ subroutine stochastic_physics_wrapper (GFS_Control, GFS_Data, Atm_block, ierr)
allocate(skebu_wts(1:nblks,maxblk,1:levs))
allocate(skebv_wts(1:nblks,maxblk,1:levs))
end if
+ if ( GFS_Control%do_spp ) then
+ allocate(spp_wts(1:nblks,maxblk,1:levs,1:GFS_Control%n_var_spp))
+ do n=1,GFS_Control%n_var_spp
+ select case (trim(GFS_Control%spp_var_list(n)))
+ case('pbl')
+ GFS_Control%spp_pbl = 1
+ case('sfc')
+ GFS_Control%spp_sfc = 1
+ case('mp')
+ GFS_Control%spp_mp = 7
+ case('rad')
+ GFS_Control%spp_rad = 1
+ case('gwd')
+ GFS_Control%spp_gwd = 1
+ end select
+ end do
+ end if
if ( GFS_Control%lndp_type == 2 ) then ! this scheme updates through forecast
allocate(sfc_wts(1:nblks,maxblk,1:GFS_Control%n_var_lndp))
end if
@@ -154,7 +173,8 @@ subroutine stochastic_physics_wrapper (GFS_Control, GFS_Data, Atm_block, ierr)
allocate(sfc_wts(1:nblks, maxblk, GFS_Control%n_var_lndp))
call run_stochastic_physics(levs, GFS_Control%kdt, GFS_Control%fhour, GFS_Control%blksz, &
sppt_wts=sppt_wts, shum_wts=shum_wts, skebu_wts=skebu_wts, &
- skebv_wts=skebv_wts, sfc_wts=sfc_wts, nthreads=nthreads)
+ skebv_wts=skebv_wts, sfc_wts=sfc_wts, &
+ spp_wts=spp_wts, nthreads=nthreads)
! Copy contiguous data back
do nb=1,nblks
GFS_Data(nb)%Coupling%sfc_wts(:,:) = sfc_wts(nb,1:GFS_Control%blksz(nb),:)
@@ -188,10 +208,10 @@ subroutine stochastic_physics_wrapper (GFS_Control, GFS_Data, Atm_block, ierr)
is_initialized = .true.
else initalize_stochastic_physics
- if (GFS_Control%do_sppt .OR. GFS_Control%do_shum .OR. GFS_Control%do_skeb .OR. (GFS_Control%lndp_type == 2) ) then
+ if (GFS_Control%do_sppt .OR. GFS_Control%do_shum .OR. GFS_Control%do_skeb .OR. (GFS_Control%lndp_type == 2) .OR. GFS_Control%do_spp) then
call run_stochastic_physics(levs, GFS_Control%kdt, GFS_Control%fhour, GFS_Control%blksz, &
sppt_wts=sppt_wts, shum_wts=shum_wts, skebu_wts=skebu_wts, skebv_wts=skebv_wts, sfc_wts=sfc_wts, &
- nthreads=nthreads)
+ spp_wts=spp_wts, nthreads=nthreads)
! Copy contiguous data back
if (GFS_Control%do_sppt) then
do nb=1,nblks
@@ -209,6 +229,32 @@ subroutine stochastic_physics_wrapper (GFS_Control, GFS_Data, Atm_block, ierr)
GFS_Data(nb)%Coupling%skebv_wts(:,:) = skebv_wts(nb,1:GFS_Control%blksz(nb),:)
end do
end if
+ if (GFS_Control%do_spp) then
+ do n=1,GFS_Control%n_var_spp
+ select case (trim(GFS_Control%spp_var_list(n)))
+ case('pbl')
+ do nb=1,Atm_block%nblks
+ GFS_Data(nb)%Coupling%spp_wts_pbl(:,:) = spp_wts(nb,1:GFS_Control%blksz(nb),:,n)
+ end do
+ case('sfc')
+ do nb=1,Atm_block%nblks
+ GFS_Data(nb)%Coupling%spp_wts_sfc(:,:) = spp_wts(nb,1:GFS_Control%blksz(nb),:,n)
+ end do
+ case('mp')
+ do nb=1,Atm_block%nblks
+ GFS_Data(nb)%Coupling%spp_wts_mp(:,:) = spp_wts(nb,1:GFS_Control%blksz(nb),:,n)
+ end do
+ case('gwd')
+ do nb=1,Atm_block%nblks
+ GFS_Data(nb)%Coupling%spp_wts_gwd(:,:) = spp_wts(nb,1:GFS_Control%blksz(nb),:,n)
+ end do
+ case('rad')
+ do nb=1,Atm_block%nblks
+ GFS_Data(nb)%Coupling%spp_wts_rad(:,:) = spp_wts(nb,1:GFS_Control%blksz(nb),:,n)
+ end do
+ end select
+ end do
+ end if
if (GFS_Control%lndp_type == 2) then ! save wts, and apply lndp scheme
do nb=1,nblks
GFS_Data(nb)%Coupling%sfc_wts(:,:) = sfc_wts(nb,1:GFS_Control%blksz(nb),:)
@@ -347,7 +393,7 @@ subroutine stochastic_physics_wrapper_end (GFS_Control)
type(GFS_control_type), intent(inout) :: GFS_Control
- if (GFS_Control%do_sppt .OR. GFS_Control%do_shum .OR. GFS_Control%do_skeb .OR. (GFS_Control%lndp_type > 0) ) then
+ if (GFS_Control%do_sppt .OR. GFS_Control%do_shum .OR. GFS_Control%do_skeb .OR. (GFS_Control%lndp_type > 0) .OR. GFS_Control%do_spp) then
if (allocated(xlat)) deallocate(xlat)
if (allocated(xlon)) deallocate(xlon)
if (GFS_Control%do_sppt) then
@@ -360,6 +406,9 @@ subroutine stochastic_physics_wrapper_end (GFS_Control)
if (allocated(skebu_wts)) deallocate(skebu_wts)
if (allocated(skebv_wts)) deallocate(skebv_wts)
end if
+ if (GFS_Control%do_spp) then
+ if (allocated(spp_wts)) deallocate(spp_wts)
+ end if
if ( GFS_Control%lndp_type == 2 ) then ! this scheme updates through forecast
lsoil = -999
if (allocated(sfc_wts)) deallocate(sfc_wts)
diff --git a/time_utils.F90 b/time_utils.F90
deleted file mode 100644
index 69aafcb60..000000000
--- a/time_utils.F90
+++ /dev/null
@@ -1,170 +0,0 @@
-module time_utils_mod
-
- use fms_mod, only: uppercase
- use mpp_mod, only: mpp_error, FATAL
- use time_manager_mod, only: time_type, set_time, set_date, get_date
- use time_manager_mod, only: GREGORIAN, JULIAN, NOLEAP, THIRTY_DAY_MONTHS, NO_CALENDAR
- use time_manager_mod, only: fms_get_calendar_type => get_calendar_type
- use ESMF
-
- implicit none
- private
-
- !-------------------- interface blocks ---------------------
- interface fms2esmf_cal
- module procedure fms2esmf_cal_c
- module procedure fms2esmf_cal_i
- end interface fms2esmf_cal
- interface esmf2fms_time
- module procedure esmf2fms_time_t
- module procedure esmf2fms_timestep
- end interface esmf2fms_time
-
- public fms2esmf_cal
- public esmf2fms_time
- public fms2esmf_time
- public string_to_date
-
- contains
-
- !-------------------- module code ---------------------
-
- function fms2esmf_cal_c(calendar)
-! ! Return Value:
- type(ESMF_CALKIND_FLAG) :: fms2esmf_cal_c
-! ! Arguments:
- character(len=*), intent(in) :: calendar
-
- select case( uppercase(trim(calendar)) )
- case( 'GREGORIAN' )
- fms2esmf_cal_c = ESMF_CALKIND_GREGORIAN
- case( 'JULIAN' )
- fms2esmf_cal_c = ESMF_CALKIND_JULIAN
- case( 'NOLEAP' )
- fms2esmf_cal_c = ESMF_CALKIND_NOLEAP
- case( 'THIRTY_DAY' )
- fms2esmf_cal_c = ESMF_CALKIND_360DAY
- case( 'NO_CALENDAR' )
- fms2esmf_cal_c = ESMF_CALKIND_NOCALENDAR
- case default
- call mpp_error(FATAL, &
- 'ocean_solo: ocean_solo_nml entry calendar must be one of GREGORIAN|JULIAN|NOLEAP|THIRTY_DAY|NO_CALENDAR.' )
- end select
- end function fms2esmf_cal_c
-
- function fms2esmf_cal_i(calendar)
-! ! Return Value:
- type(ESMF_CALKIND_FLAG) :: fms2esmf_cal_i
-! ! Arguments:
- integer, intent(in) :: calendar
-
- select case(calendar)
- case(THIRTY_DAY_MONTHS)
- fms2esmf_cal_i = ESMF_CALKIND_360DAY
- case(GREGORIAN)
- fms2esmf_cal_i = ESMF_CALKIND_GREGORIAN
- case(JULIAN)
- fms2esmf_cal_i = ESMF_CALKIND_JULIAN
- case(NOLEAP)
- fms2esmf_cal_i = ESMF_CALKIND_NOLEAP
- case(NO_CALENDAR)
- fms2esmf_cal_i = ESMF_CALKIND_NOCALENDAR
- end select
- end function fms2esmf_cal_i
-
- function esmf2fms_time_t(time)
- ! Return Value
- type(Time_type) :: esmf2fms_time_t
- ! Input Arguments
- type(ESMF_Time), intent(in) :: time
- ! Local Variables
- integer :: yy, mm, dd, h, m, s
- type(ESMF_CALKIND_FLAG) :: calkind
-
- integer :: rc
-
- call ESMF_TimeGet(time, yy=yy, mm=mm, dd=dd, h=h, m=m, s=s, &
- calkindflag=calkind, rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, &
- line=__LINE__, &
- file=__FILE__)) &
- return ! bail out
-
- esmf2fms_time_t = Set_date(yy, mm, dd, h, m, s)
-
- end function esmf2fms_time_t
-
- function esmf2fms_timestep(timestep)
- ! Return Value
- type(Time_type) :: esmf2fms_timestep
- ! Input Arguments
- type(ESMF_TimeInterval), intent(in):: timestep
- ! Local Variables
- integer :: s
- type(ESMF_CALKIND_FLAG) :: calkind
-
- integer :: rc
-
- call ESMF_TimeIntervalGet(timestep, s=s, calkindflag=calkind, rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, &
- line=__LINE__, &
- file=__FILE__)) &
- return ! bail out
-
- esmf2fms_timestep = set_time(s, 0)
-
- end function esmf2fms_timestep
-
- function fms2esmf_time(time, calkind)
- ! Return Value
- type(ESMF_Time) :: fms2esmf_time
- ! Input Arguments
- type(Time_type), intent(in) :: time
- type(ESMF_CALKIND_FLAG), intent(in), optional :: calkind
- ! Local Variables
- integer :: yy, mm, d, h, m, s
- type(ESMF_CALKIND_FLAG) :: l_calkind
-
- integer :: rc
-
- integer :: yy1, mm1, d1, h1, m1, s1
-
- if(present(calkind)) then
- l_calkind = calkind
- else
- l_calkind = fms2esmf_cal(fms_get_calendar_type())
- endif
-
- call get_date(time, yy, mm, d, h, m, s)
- print *,'in fms2esmf_time,time=',yy,mm,d,h,m,s,'calendar_type=', &
- fms_get_calendar_type()
-
- call ESMF_TimeSet(fms2esmf_time, yy=yy, mm=mm, d=d, h=h, m=m, s=s, rc=rc)
-! call ESMF_TimeSet(fms2esmf_time, yy=yy, mm=mm, d=d, h=h, m=m, s=s, &
-! calkindflag=l_calkind, rc=rc)
- if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, &
- line=__LINE__, &
- file=__FILE__)) &
- return ! bail out
-
-!test
- call ESMF_TimeGet(fms2esmf_time,yy=yy1, mm=mm1, d=d1, h=h1, m=m1, s=s1,rc=rc)
- print *,'in fms2esmf_time,test time=',yy1,mm1,d1,h1,m1,s1
-
- end function fms2esmf_time
-
- function string_to_date(string, rc)
- character(len=15), intent(in) :: string
- integer, intent(out), optional :: rc
- type(time_type) :: string_to_date
-
- integer :: yr,mon,day,hr,min,sec
-
- if(present(rc)) rc = ESMF_SUCCESS
-
- read(string, '(I4.4,I2.2,I2.2,".",I2.2,I2.2,I2.2)') yr, mon, day, hr, min, sec
- string_to_date = set_date(yr, mon, day, hr, min, sec)
-
- end function string_to_date
-
-end module time_utils_mod
diff --git a/upp b/upp
index c939eae6b..0dc3c0c1d 160000
--- a/upp
+++ b/upp
@@ -1 +1 @@
-Subproject commit c939eae6bacb3c2a93753bba54b8646f32a0a7ab
+Subproject commit 0dc3c0c1dbdcdc5025dff0c6b06b16aa2a7ddda9