diff --git a/.github/workflows/documentation-and-style.yml b/.github/workflows/documentation-and-style.yml index c83de48159..c171c538d5 100644 --- a/.github/workflows/documentation-and-style.yml +++ b/.github/workflows/documentation-and-style.yml @@ -14,7 +14,7 @@ jobs: - name: Check white space (non-blocking) run: | - ./.testing/trailer.py -e TEOS10 -l 120 src config_src | tee style_errors + ./.testing/trailer.py -e TEOS10 -l 120 src config_src 2>&1 | tee style_errors continue-on-error: true - name: Install packages used when generating documentation @@ -35,4 +35,5 @@ jobs: run: | grep "warning:" docs/_build/doxygen_warn_nortd_log.txt | grep -v "as part of a" | tee doxy_errors cat style_errors doxy_errors > all_errors + cat all_errors test ! -s all_errors diff --git a/config_src/coupled_driver/MOM_surface_forcing_gfdl.F90 b/config_src/coupled_driver/MOM_surface_forcing_gfdl.F90 index 67f6643a42..4d2d9dec9b 100644 --- a/config_src/coupled_driver/MOM_surface_forcing_gfdl.F90 +++ b/config_src/coupled_driver/MOM_surface_forcing_gfdl.F90 @@ -21,7 +21,7 @@ module MOM_surface_forcing_gfdl use MOM_forcing_type, only : allocate_mech_forcing, deallocate_mech_forcing use MOM_get_input, only : Get_MOM_Input, directories use MOM_grid, only : ocean_grid_type -use MOM_interpolate, only : init_external_field, time_interp_extern +use MOM_interpolate, only : init_external_field, time_interp_external use MOM_interpolate, only : time_interp_external_init use MOM_io, only : slasher, write_version_number, MOM_read_data use MOM_restart, only : register_restart_field, restart_init, MOM_restart_CS @@ -349,7 +349,7 @@ subroutine convert_IOB_to_fluxes(IOB, fluxes, index_bounds, Time, valid_time, G, ! Salinity restoring logic if (CS%restore_salt) then - call time_interp_extern(CS%id_srestore, Time, data_restore) + call time_interp_external(CS%id_srestore, Time, data_restore) ! open_ocn_mask indicates where to restore salinity (1 means restore, 0 does not) open_ocn_mask(:,:) = 1.0 if (CS%mask_srestore_under_ice) then ! Do not restore under sea-ice @@ -406,7 +406,7 @@ subroutine convert_IOB_to_fluxes(IOB, fluxes, index_bounds, Time, valid_time, G, ! SST restoring logic if (CS%restore_temp) then - call time_interp_extern(CS%id_trestore, Time, data_restore) + call time_interp_external(CS%id_trestore, Time, data_restore) do j=js,je ; do i=is,ie delta_sst = data_restore(i,j)- sfc_state%SST(i,j) delta_sst = sign(1.0,delta_sst)*min(abs(delta_sst),CS%max_delta_trestore) @@ -1557,7 +1557,7 @@ subroutine surface_forcing_init(Time, G, US, param_file, diag, CS, wind_stagger) if (CS%restore_salt) then salt_file = trim(CS%inputdir) // trim(CS%salt_restore_file) - CS%id_srestore = init_external_field(salt_file, CS%salt_restore_var_name, domain=G%Domain%mpp_domain) + CS%id_srestore = init_external_field(salt_file, CS%salt_restore_var_name, MOM_domain=G%Domain) call safe_alloc_ptr(CS%srestore_mask,isd,ied,jsd,jed); CS%srestore_mask(:,:) = 1.0 if (CS%mask_srestore) then ! read a 2-d file containing a mask for restoring fluxes flnam = trim(CS%inputdir) // 'salt_restore_mask.nc' @@ -1567,7 +1567,7 @@ subroutine surface_forcing_init(Time, G, US, param_file, diag, CS, wind_stagger) if (CS%restore_temp) then temp_file = trim(CS%inputdir) // trim(CS%temp_restore_file) - CS%id_trestore = init_external_field(temp_file, CS%temp_restore_var_name, domain=G%Domain%mpp_domain) + CS%id_trestore = init_external_field(temp_file, CS%temp_restore_var_name, MOM_domain=G%Domain) call safe_alloc_ptr(CS%trestore_mask,isd,ied,jsd,jed); CS%trestore_mask(:,:) = 1.0 if (CS%mask_trestore) then ! read a 2-d file containing a mask for restoring fluxes flnam = trim(CS%inputdir) // 'temp_restore_mask.nc' diff --git a/config_src/nuopc_driver/mom_cap.F90 b/config_src/nuopc_driver/mom_cap.F90 index fc6bb5035e..f0ce8720bb 100644 --- a/config_src/nuopc_driver/mom_cap.F90 +++ b/config_src/nuopc_driver/mom_cap.F90 @@ -25,7 +25,7 @@ module MOM_cap_mod use time_manager_mod, only: fms_get_calendar_type => get_calendar_type use MOM_domains, only: MOM_infra_init, num_pes, root_pe, pe_here use MOM_file_parser, only: get_param, log_version, param_file_type, close_param_file -use MOM_get_input, only: Get_MOM_Input, directories +use MOM_get_input, only: get_MOM_input, directories use MOM_domains, only: pass_var use MOM_error_handler, only: MOM_error, FATAL, is_root_pe use MOM_ocean_model_nuopc, only: ice_ocean_boundary_type @@ -35,7 +35,8 @@ module MOM_cap_mod use MOM_ocean_model_nuopc, only: ocean_model_init, update_ocean_model, ocean_model_end use MOM_ocean_model_nuopc, only: get_ocean_grid, get_eps_omesh use MOM_cap_time, only: AlarmInit -use MOM_cap_methods, only: mom_import, mom_export, mom_set_geomtype +use MOM_cap_methods, only: mom_import, mom_export, mom_set_geomtype, state_diagnose +use MOM_cap_methods, only: ChkErr #ifdef CESMCOUPLED use shr_file_mod, only: shr_file_setLogUnit, shr_file_getLogUnit #endif @@ -123,7 +124,7 @@ module MOM_cap_mod integer :: fldsFrOcn_num = 0 type (fld_list_type) :: fldsFrOcn(fldsMax) -integer :: debug = 0 +integer :: dbug = 0 integer :: import_slice = 1 integer :: export_slice = 1 character(len=256) :: tmpstr @@ -133,11 +134,12 @@ module MOM_cap_mod integer :: logunit !< stdout logging unit number logical :: profile_memory = .true. logical :: grid_attach_area = .false. +logical :: use_coldstart = .true. character(len=128) :: scalar_field_name = '' integer :: scalar_field_count = 0 integer :: scalar_field_idx_grid_nx = 0 integer :: scalar_field_idx_grid_ny = 0 -character(len=*),parameter :: u_file_u = & +character(len=*),parameter :: u_FILE_u = & __FILE__ #ifdef CESMCOUPLED @@ -147,6 +149,7 @@ module MOM_cap_mod logical :: cesm_coupled = .false. type(ESMF_GeomType_Flag) :: geomtype = ESMF_GEOMTYPE_GRID #endif +character(len=8) :: restart_mode = 'alarms' contains @@ -168,32 +171,20 @@ subroutine SetServices(gcomp, rc) ! the NUOPC model component will register the generic methods call NUOPC_CompDerive(gcomp, model_routine_SS, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return ! switching to IPD versions call ESMF_GridCompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, & userRoutine=InitializeP0, phase=0, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return ! set entry point for methods that require specific implementation call NUOPC_CompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, & phaseLabelList=(/"IPDv03p1"/), userRoutine=InitializeAdvertise, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return call NUOPC_CompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, & phaseLabelList=(/"IPDv03p3"/), userRoutine=InitializeRealize, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return !------------------ ! attach specializing method(s) @@ -201,36 +192,21 @@ subroutine SetServices(gcomp, rc) call NUOPC_CompSpecialize(gcomp, specLabel=model_label_DataInitialize, & specRoutine=DataInitialize, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return call NUOPC_CompSpecialize(gcomp, specLabel=model_label_Advance, & specRoutine=ModelAdvance, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_MethodRemove(gcomp, label=model_label_SetRunClock, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return call NUOPC_CompSpecialize(gcomp, specLabel=model_label_SetRunClock, & specRoutine=ModelSetRunClock, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return call NUOPC_CompSpecialize(gcomp, specLabel=model_label_Finalize, & specRoutine=ocean_model_finalize, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return end subroutine SetServices @@ -263,95 +239,62 @@ subroutine InitializeP0(gcomp, importState, exportState, clock, rc) ! Switch to IPDv03 by filtering all other phaseMap entries call NUOPC_CompFilterPhaseMap(gcomp, ESMF_METHOD_INITIALIZE, & acceptStringList=(/"IPDv03p"/), rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return write_diagnostics = .false. call NUOPC_CompAttributeGet(gcomp, name="DumpFields", value=value, & isPresent=isPresent, isSet=isSet, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return if (isPresent .and. isSet) write_diagnostics=(trim(value)=="true") write(logmsg,*) write_diagnostics - call ESMF_LogWrite('MOM_cap:DumpFields = '//trim(logmsg), ESMF_LOGMSG_INFO, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + call ESMF_LogWrite('MOM_cap:DumpFields = '//trim(logmsg), ESMF_LOGMSG_INFO) overwrite_timeslice = .false. call NUOPC_CompAttributeGet(gcomp, name="OverwriteSlice", value=value, & isPresent=isPresent, isSet=isSet, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return if (isPresent .and. isSet) overwrite_timeslice=(trim(value)=="true") write(logmsg,*) overwrite_timeslice - call ESMF_LogWrite('MOM_cap:OverwriteSlice = '//trim(logmsg), ESMF_LOGMSG_INFO, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + call ESMF_LogWrite('MOM_cap:OverwriteSlice = '//trim(logmsg), ESMF_LOGMSG_INFO) profile_memory = .false. call NUOPC_CompAttributeGet(gcomp, name="ProfileMemory", value=value, & isPresent=isPresent, isSet=isSet, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return if (isPresent .and. isSet) profile_memory=(trim(value)=="true") write(logmsg,*) profile_memory - call ESMF_LogWrite('MOM_cap:ProfileMemory = '//trim(logmsg), ESMF_LOGMSG_INFO, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + call ESMF_LogWrite('MOM_cap:ProfileMemory = '//trim(logmsg), ESMF_LOGMSG_INFO) grid_attach_area = .false. call NUOPC_CompAttributeGet(gcomp, name="GridAttachArea", value=value, & isPresent=isPresent, isSet=isSet, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return if (isPresent .and. isSet) grid_attach_area=(trim(value)=="true") write(logmsg,*) grid_attach_area - call ESMF_LogWrite('MOM_cap:GridAttachArea = '//trim(logmsg), ESMF_LOGMSG_INFO, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + call ESMF_LogWrite('MOM_cap:GridAttachArea = '//trim(logmsg), ESMF_LOGMSG_INFO) + + call NUOPC_CompAttributeGet(gcomp, name='dbug_flag', value=value, isPresent=isPresent, isSet=isSet, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + if (isPresent .and. isSet) then + read(value,*) dbug + end if + write(logmsg,'(i6)') dbug + call ESMF_LogWrite('MOM_cap:dbug = '//trim(logmsg), ESMF_LOGMSG_INFO) scalar_field_name = "" call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldName", value=value, & isPresent=isPresent, isSet=isSet, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return if (isPresent .and. isSet) then scalar_field_name = trim(value) - call ESMF_LogWrite('MOM_cap:ScalarFieldName = '//trim(scalar_field_name), ESMF_LOGMSG_INFO, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + call ESMF_LogWrite('MOM_cap:ScalarFieldName = '//trim(scalar_field_name), ESMF_LOGMSG_INFO) endif scalar_field_count = 0 call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldCount", value=value, & isPresent=isPresent, isSet=isSet, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return if (isPresent .and. isSet) then read(value, *, iostat=iostat) scalar_field_count if (iostat /= 0) then @@ -361,20 +304,13 @@ subroutine InitializeP0(gcomp, importState, exportState, clock, rc) return endif write(logmsg,*) scalar_field_count - call ESMF_LogWrite('MOM_cap:ScalarFieldCount = '//trim(logmsg), ESMF_LOGMSG_INFO, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + call ESMF_LogWrite('MOM_cap:ScalarFieldCount = '//trim(logmsg), ESMF_LOGMSG_INFO) endif scalar_field_idx_grid_nx = 0 call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldIdxGridNX", value=value, & isPresent=isPresent, isSet=isSet, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return if (isPresent .and. isSet) then read(value, *, iostat=iostat) scalar_field_idx_grid_nx if (iostat /= 0) then @@ -384,20 +320,13 @@ subroutine InitializeP0(gcomp, importState, exportState, clock, rc) return endif write(logmsg,*) scalar_field_idx_grid_nx - call ESMF_LogWrite('MOM_cap:ScalarFieldIdxGridNX = '//trim(logmsg), ESMF_LOGMSG_INFO, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + call ESMF_LogWrite('MOM_cap:ScalarFieldIdxGridNX = '//trim(logmsg), ESMF_LOGMSG_INFO) endif scalar_field_idx_grid_ny = 0 call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldIdxGridNY", value=value, & isPresent=isPresent, isSet=isSet, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return if (isPresent .and. isSet) then read(value, *, iostat=iostat) scalar_field_idx_grid_ny if (iostat /= 0) then @@ -407,13 +336,17 @@ subroutine InitializeP0(gcomp, importState, exportState, clock, rc) return endif write(logmsg,*) scalar_field_idx_grid_ny - call ESMF_LogWrite('MOM_cap:ScalarFieldIdxGridNY = '//trim(logmsg), ESMF_LOGMSG_INFO, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + call ESMF_LogWrite('MOM_cap:ScalarFieldIdxGridNY = '//trim(logmsg), ESMF_LOGMSG_INFO) endif + use_coldstart = .true. + call NUOPC_CompAttributeGet(gcomp, name="use_coldstart", value=value, & + isPresent=isPresent, isSet=isSet, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + if (isPresent .and. isSet) use_coldstart=(trim(value)=="true") + write(logmsg,*) use_coldstart + call ESMF_LogWrite('MOM_cap:use_coldstart = '//trim(logmsg), ESMF_LOGMSG_INFO) + end subroutine !> Called by NUOPC to advertise import and export fields. "Advertise" @@ -442,6 +375,7 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) type(ice_ocean_boundary_type), pointer :: Ice_ocean_boundary => NULL() type(ocean_internalstate_wrapper) :: ocean_internalstate type(ocean_grid_type), pointer :: ocean_grid => NULL() + type(directories) :: dirs type(time_type) :: Run_len !< length of experiment type(time_type) :: time0 !< Start time of coupled model's calendar. type(time_type) :: time_start !< The time at which to initialize the ocean model @@ -472,11 +406,7 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) rc = ESMF_SUCCESS - call ESMF_LogWrite(subname//' enter', ESMF_LOGMSG_INFO, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + call ESMF_LogWrite(subname//' enter', ESMF_LOGMSG_INFO) allocate(Ice_ocean_boundary) !allocate(ocean_state) ! ocean_model_init allocate this pointer @@ -487,35 +417,21 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) ocean_internalstate%ptr%ocean_state_type_ptr => ocean_state call ESMF_VMGetCurrent(vm, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_VMGet(VM, mpiCommunicator=mpi_comm_mom, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_ClockGet(CLOCK, currTIME=MyTime, TimeStep=TINT, RC=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_TimeGet (MyTime, YY=YEAR, MM=MONTH, DD=DAY, H=HOUR, M=MINUTE, S=SECOND, RC=rc ) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return CALL ESMF_TimeIntervalGet(TINT, S=DT_OCEAN, RC=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return + !TODO: next two lines not present in NCAR call fms_init(mpi_comm_mom) call constants_init call field_manager_init @@ -524,10 +440,7 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) if (cesm_coupled) then call NUOPC_CompAttributeGet(gcomp, name="calendar", value=cvalue, & isPresent=isPresent, isSet=isSet, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return if (isPresent .and. isSet) then read(cvalue,*) calendar select case (trim(calendar)) @@ -561,16 +474,10 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) ! get start/reference time call ESMF_ClockGet(CLOCK, refTime=MyTime, RC=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_TimeGet (MyTime, YY=YEAR, MM=MONTH, DD=DAY, H=HOUR, M=MINUTE, S=SECOND, RC=rc ) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return time0 = set_date (YEAR,MONTH,DAY,HOUR,MINUTE,SECOND) @@ -586,28 +493,16 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) if (is_root_pe()) then call NUOPC_CompAttributeGet(gcomp, name="diro", & isPresent=isPresentDiro, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call NUOPC_CompAttributeGet(gcomp, name="logfile", & isPresent=isPresentLogfile, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return if (isPresentDiro .and. isPresentLogfile) then call NUOPC_CompAttributeGet(gcomp, name="diro", value=diro, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call NUOPC_CompAttributeGet(gcomp, name="logfile", value=logfile, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return - open(newunit=logunit,file=trim(diro)//"/"//trim(logfile)) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + open(newunit=logunit,file=trim(diro)//"/"//trim(logfile)) else logunit = output_unit endif @@ -618,19 +513,12 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) starttype = "" call NUOPC_CompAttributeGet(gcomp, name='start_type', value=cvalue, & isPresent=isPresent, isSet=isSet, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return if (isPresent .and. isSet) then read(cvalue,*) starttype else call ESMF_LogWrite('MOM_cap:start_type unset - using input.nml for restart option', & - ESMF_LOGMSG_INFO, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + ESMF_LOGMSG_INFO) endif runtype = "" @@ -648,26 +536,27 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) endif if (len_trim(runtype) > 0) then - call ESMF_LogWrite('MOM_cap:startup = '//trim(runtype), ESMF_LOGMSG_INFO, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + call ESMF_LogWrite('MOM_cap:startup = '//trim(runtype), ESMF_LOGMSG_INFO) endif restartfile = ""; restartfiles = "" if (runtype == "initial") then - - restartfiles = "n" + if (cesm_coupled) then + restartfiles = "n" + else + call get_MOM_input(dirs=dirs) + restartfiles = dirs%input_filename(1:1) + endif + call ESMF_LogWrite('MOM_cap:restartfile = '//trim(restartfiles), ESMF_LOGMSG_INFO) else if (runtype == "continue") then ! hybrid or branch or continuos runs if (cesm_coupled) then call ESMF_LogWrite('MOM_cap: restart requested, using rpointer.ocn', ESMF_LOGMSG_WARNING) call ESMF_GridCompGet(gcomp, vm=vm, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=u_FILE_u)) return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_VMGet(vm, localPet=localPet, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=u_FILE_u)) return + if (ChkErr(rc,__LINE__,u_FILE_u)) return if (localPet == 0) then ! this hard coded for rpointer.ocn right now @@ -698,7 +587,7 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) endif ! broadcast attribute set on master task to all tasks call ESMF_VMBroadcast(vm, restartfiles, count=len(restartfiles), rootPet=0, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=u_FILE_u)) return + if (ChkErr(rc,__LINE__,u_FILE_u)) return else call ESMF_LogWrite('MOM_cap: restart requested, use input.nml', ESMF_LOGMSG_WARNING) endif @@ -754,12 +643,23 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) Ice_ocean_boundary%lrunoff = 0.0 Ice_ocean_boundary%frunoff = 0.0 + if (ocean_state%use_waves) then + Ice_ocean_boundary%num_stk_bands=ocean_state%Waves%NumBands + allocate ( Ice_ocean_boundary% ustk0 (isc:iec,jsc:jec), & + Ice_ocean_boundary% vstk0 (isc:iec,jsc:jec), & + Ice_ocean_boundary% ustkb (isc:iec,jsc:jec,Ice_ocean_boundary%num_stk_bands), & + Ice_ocean_boundary% vstkb (isc:iec,jsc:jec,Ice_ocean_boundary%num_stk_bands), & + Ice_ocean_boundary%stk_wavenumbers (Ice_ocean_boundary%num_stk_bands)) + Ice_ocean_boundary%ustk0 = 0.0 + Ice_ocean_boundary%vstk0 = 0.0 + Ice_ocean_boundary%stk_wavenumbers = ocean_state%Waves%WaveNum_Cen + Ice_ocean_boundary%ustkb = 0.0 + Ice_ocean_boundary%vstkb = 0.0 + endif + ocean_internalstate%ptr%ocean_state_type_ptr => ocean_state call ESMF_GridCompSetInternalState(gcomp, ocean_internalstate, rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return if (len_trim(scalar_field_name) > 0) then call fld_list_add(fldsToOcn_num, fldsToOcn, trim(scalar_field_name), "will_provide") @@ -800,6 +700,17 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) !These are not currently used and changing requires a nuopc dictionary change !call fld_list_add(fldsToOcn_num, fldsToOcn, "mean_runoff_heat_flx" , "will provide") !call fld_list_add(fldsToOcn_num, fldsToOcn, "mean_calving_heat_flx" , "will provide") + if (ocean_state%use_waves) then + if (Ice_ocean_boundary%num_stk_bands > 3) then + call MOM_error(FATAL, "Number of Stokes Bands > 3, NUOPC cap not set up for this") + endif + call fld_list_add(fldsToOcn_num, fldsToOcn, "eastward_partitioned_stokes_drift_1" , "will provide") + call fld_list_add(fldsToOcn_num, fldsToOcn, "northward_partitioned_stokes_drift_1", "will provide") + call fld_list_add(fldsToOcn_num, fldsToOcn, "eastward_partitioned_stokes_drift_2" , "will provide") + call fld_list_add(fldsToOcn_num, fldsToOcn, "northward_partitioned_stokes_drift_2", "will provide") + call fld_list_add(fldsToOcn_num, fldsToOcn, "eastward_partitioned_stokes_drift_3" , "will provide") + call fld_list_add(fldsToOcn_num, fldsToOcn, "northward_partitioned_stokes_drift_3", "will provide") + endif !--------- export fields ------------- call fld_list_add(fldsFrOcn_num, fldsFrOcn, "ocean_mask" , "will provide") @@ -814,18 +725,12 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) do n = 1,fldsToOcn_num call NUOPC_Advertise(importState, standardName=fldsToOcn(n)%stdname, name=fldsToOcn(n)%shortname, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return enddo do n = 1,fldsFrOcn_num call NUOPC_Advertise(exportState, standardName=fldsFrOcn(n)%stdname, name=fldsFrOcn(n)%shortname, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return enddo end subroutine InitializeAdvertise @@ -909,10 +814,7 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) !---------------------------------------------------------------------------- call ESMF_GridCompGetInternalState(gcomp, ocean_internalstate, rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return Ice_ocean_boundary => ocean_internalstate%ptr%ice_ocean_boundary_type_ptr ocean_public => ocean_internalstate%ptr%ocean_public_type_ptr @@ -923,16 +825,10 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) !---------------------------------------------------------------------------- call ESMF_VMGetCurrent(vm, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_VMGet(vm, petCount=npet, mpiCommunicator=mpicom, localPet=localPet, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return !--------------------------------- ! global mom grid size @@ -940,11 +836,7 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) call mpp_get_global_domain(ocean_public%domain, xsize=nxg, ysize=nyg) write(tmpstr,'(a,2i6)') subname//' nxg,nyg = ',nxg,nyg - call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO) !--------------------------------- ! number of tiles per PET, assumed to be 1, and number of pes (tiles) total @@ -953,19 +845,11 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) ntiles=mpp_get_ntile_count(ocean_public%domain) ! this is tiles on this pe if (ntiles /= 1) then rc = ESMF_FAILURE - call ESMF_LogWrite(subname//' ntiles must be 1', ESMF_LOGMSG_ERROR, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + call ESMF_LogWrite(subname//' ntiles must be 1', ESMF_LOGMSG_ERROR) endif ntiles=mpp_get_domain_npes(ocean_public%domain) write(tmpstr,'(a,1i6)') subname//' ntiles = ',ntiles - call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO) !--------------------------------- ! get start and end indices of each tile and their PET @@ -974,14 +858,10 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) allocate(xb(ntiles),xe(ntiles),yb(ntiles),ye(ntiles),pe(ntiles)) call mpp_get_compute_domains(ocean_public%domain, xbegin=xb, xend=xe, ybegin=yb, yend=ye) call mpp_get_pelist(ocean_public%domain, pe) - if (debug > 0) then + if (dbug > 1) then do n = 1,ntiles write(tmpstr,'(a,6i6)') subname//' tiles ',n,pe(n),xb(n),xe(n),yb(n),ye(n) - call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO) enddo endif @@ -1014,23 +894,14 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) enddo DistGrid = ESMF_DistGridCreate(arbSeqIndexList=gindex, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return ! read in the mesh call NUOPC_CompAttributeGet(gcomp, name='mesh_ocn', value=cvalue, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return EMeshTemp = ESMF_MeshCreate(filename=trim(cvalue), fileformat=ESMF_FILEFORMAT_ESMFMESH, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return if (localPet == 0) then write(logunit,*)'mesh file for mom6 domain is ',trim(cvalue) @@ -1038,17 +909,11 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) ! recreate the mesh using the above distGrid EMesh = ESMF_MeshCreate(EMeshTemp, elementDistgrid=Distgrid, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return ! Check for consistency of lat, lon and mask between mesh and mom6 grid call ESMF_MeshGet(Emesh, spatialDim=spatialDim, numOwnedElements=numOwnedElements, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return allocate(ownedElemCoords(spatialDim*numOwnedElements)) allocate(lonMesh(numOwnedElements), lon(numOwnedElements)) @@ -1056,25 +921,16 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) allocate(maskMesh(numOwnedElements), mask(numOwnedElements)) call ESMF_MeshGet(Emesh, ownedElemCoords=ownedElemCoords, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return do n = 1,numOwnedElements lonMesh(n) = ownedElemCoords(2*n-1) latMesh(n) = ownedElemCoords(2*n) end do elemMaskArray = ESMF_ArrayCreate(Distgrid, maskMesh, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_MeshGet(Emesh, elemMaskArray=elemMaskArray, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call mpp_get_compute_domain(ocean_public%domain, isc, iec, jsc, jec) n = 0 @@ -1121,16 +977,10 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) deallocate(maskMesh, mask) ! realize the import and export fields using the mesh call MOM_RealizeFields(importState, fldsToOcn_num, fldsToOcn, "Ocn import", mesh=Emesh, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call MOM_RealizeFields(exportState, fldsFrOcn_num, fldsFrOcn, "Ocn export", mesh=Emesh, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return else if (geomtype == ESMF_GEOMTYPE_GRID) then @@ -1152,19 +1002,16 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) deBlockList(2,2,n) = ye(n) petMap(n) = pe(n) ! write(tmpstr,'(a,3i8)') subname//' iglo = ',n,deBlockList(1,1,n),deBlockList(1,2,n) - ! call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO, rc=rc) + ! call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO) ! write(tmpstr,'(a,3i8)') subname//' jglo = ',n,deBlockList(2,1,n),deBlockList(2,2,n) - ! call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO, rc=rc) + ! call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO) ! write(tmpstr,'(a,2i8)') subname//' pe = ',n,petMap(n) - ! call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO, rc=rc) + ! call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO) !--- assume a tile with starting index of 1 has an equivalent wraparound tile on the other side enddo delayout = ESMF_DELayoutCreate(petMap, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return ! rsd this assumes tripole grid, but sometimes in CESM a bipole ! grid is used -- need to introduce conditional logic here @@ -1175,18 +1022,12 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) call ESMF_DistGridConnectionSet(connectionList(1), tileIndexA=1, & tileIndexB=1, positionVector=(/nxg+1, 2*nyg+1/), & orientationVector=(/-1, -2/), rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return ! periodic boundary condition along first dimension call ESMF_DistGridConnectionSet(connectionList(2), tileIndexA=1, & tileIndexB=1, positionVector=(/nxg, 0/), rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return distgrid = ESMF_DistGridCreate(minIndex=(/1,1/), maxIndex=(/nxg,nyg/), & ! indexflag = ESMF_INDEX_DELOCAL, & @@ -1195,10 +1036,7 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) delayout=delayout, & connectionList=connectionList, & rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return deallocate(xb,xe,yb,ye,pe) deallocate(connectionList) @@ -1207,32 +1045,18 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) deallocate(petMap) call ESMF_DistGridGet(distgrid=distgrid, localDE=0, elementCount=cnt, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return allocate(indexList(cnt)) write(tmpstr,'(a,i8)') subname//' distgrid cnt= ',cnt - call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO) call ESMF_DistGridGet(distgrid=distgrid, localDE=0, seqIndexList=indexList, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return write(tmpstr,'(a,4i8)') subname//' distgrid list= ',& indexList(1),indexList(cnt),minval(indexList), maxval(indexList) - call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO) deallocate(IndexList) @@ -1242,91 +1066,55 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) gridEdgeLWidth=(/0,0/), gridEdgeUWidth=(/0,1/), & coordSys = ESMF_COORDSYS_SPH_DEG, & rc = rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return - + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_GridAddCoord(gridIn, staggerLoc=ESMF_STAGGERLOC_CENTER, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_GridAddCoord(gridIn, staggerLoc=ESMF_STAGGERLOC_CORNER, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_GridAddItem(gridIn, itemFlag=ESMF_GRIDITEM_MASK, itemTypeKind=ESMF_TYPEKIND_I4, & staggerLoc=ESMF_STAGGERLOC_CENTER, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return ! Attach area to the Grid optionally. By default the cell areas are computed. if(grid_attach_area) then call ESMF_GridAddItem(gridIn, itemFlag=ESMF_GRIDITEM_AREA, itemTypeKind=ESMF_TYPEKIND_R8, & staggerLoc=ESMF_STAGGERLOC_CENTER, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return - + if (ChkErr(rc,__LINE__,u_FILE_u)) return endif call ESMF_GridGetCoord(gridIn, coordDim=1, & staggerloc=ESMF_STAGGERLOC_CENTER, & farrayPtr=dataPtr_xcen, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_GridGetCoord(gridIn, coordDim=2, & staggerloc=ESMF_STAGGERLOC_CENTER, & farrayPtr=dataPtr_ycen, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return - + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_GridGetCoord(gridIn, coordDim=1, & staggerloc=ESMF_STAGGERLOC_CORNER, & farrayPtr=dataPtr_xcor, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_GridGetCoord(gridIn, coordDim=2, & staggerloc=ESMF_STAGGERLOC_CORNER, & farrayPtr=dataPtr_ycor, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_GridGetItem(gridIn, itemflag=ESMF_GRIDITEM_MASK, & staggerloc=ESMF_STAGGERLOC_CENTER, & farrayPtr=dataPtr_mask, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return if(grid_attach_area) then call ESMF_GridGetItem(gridIn, itemflag=ESMF_GRIDITEM_AREA, & staggerloc=ESMF_STAGGERLOC_CENTER, & farrayPtr=dataPtr_area, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return endif ! load up area, mask, center and corner values @@ -1349,13 +1137,13 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) ubnd4 = ubound(dataPtr_xcor,2) write(tmpstr,*) subname//' iscjsc = ',isc,iec,jsc,jec - call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO, rc=rc) + call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO) write(tmpstr,*) subname//' lbub12 = ',lbnd1,ubnd1,lbnd2,ubnd2 - call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO, rc=rc) + call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO) write(tmpstr,*) subname//' lbub34 = ',lbnd3,ubnd3,lbnd4,ubnd4 - call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO, rc=rc) + call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO) if (iec-isc /= ubnd1-lbnd1 .or. jec-jsc /= ubnd2-lbnd2) then call ESMF_LogSetError(ESMF_RC_ARG_BAD, & @@ -1394,38 +1182,32 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) enddo write(tmpstr,*) subname//' mask = ',minval(dataPtr_mask),maxval(dataPtr_mask) - call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO, rc=rc) + call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO) if(grid_attach_area) then write(tmpstr,*) subname//' area = ',minval(dataPtr_area),maxval(dataPtr_area) - call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO, rc=rc) + call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO) endif write(tmpstr,*) subname//' xcen = ',minval(dataPtr_xcen),maxval(dataPtr_xcen) - call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO, rc=rc) + call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO) write(tmpstr,*) subname//' ycen = ',minval(dataPtr_ycen),maxval(dataPtr_ycen) - call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO, rc=rc) + call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO) write(tmpstr,*) subname//' xcor = ',minval(dataPtr_xcor),maxval(dataPtr_xcor) - call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO, rc=rc) + call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO) write(tmpstr,*) subname//' ycor = ',minval(dataPtr_ycor),maxval(dataPtr_ycor) - call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO, rc=rc) + call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO) gridOut = gridIn ! for now out same as in call MOM_RealizeFields(importState, fldsToOcn_num, fldsToOcn, "Ocn import", grid=gridIn, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call MOM_RealizeFields(exportState, fldsFrOcn_num, fldsFrOcn, "Ocn export", grid=gridOut, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return endif @@ -1436,18 +1218,11 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) if (len_trim(scalar_field_name) > 0) then call State_SetScalar(real(nxg,ESMF_KIND_R8),scalar_field_idx_grid_nx, exportState, localPet, & scalar_field_name, scalar_field_count, rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call State_SetScalar(real(nyg,ESMF_KIND_R8),scalar_field_idx_grid_ny, exportState, localPet, & scalar_field_name, scalar_field_count, rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return - + if (ChkErr(rc,__LINE__,u_FILE_u)) return endif !--------------------------------- @@ -1461,10 +1236,7 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) !call NUOPC_Write(exportState, fileNamePrefix='post_realize_field_ocn_export_', & ! timeslice=1, relaxedFlag=.true., rc=rc) - !if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - ! line=__LINE__, & - ! file=__FILE__)) & - ! return ! bail out + !if (ChkErr(rc,__LINE__,u_FILE_u)) return end subroutine InitializeRealize @@ -1498,21 +1270,15 @@ subroutine DataInitialize(gcomp, rc) ! query the Component for its clock, importState and exportState call ESMF_GridCompGet(gcomp, clock=clock, importState=importState, exportState=exportState, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_ClockGet(clock, currTime=currTime, timeStep=timeStep, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_TimeGet(currTime, timestring=timestr, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_GridCompGetInternalState(gcomp, ocean_internalstate, rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return Ice_ocean_boundary => ocean_internalstate%ptr%ice_ocean_boundary_type_ptr ocean_public => ocean_internalstate%ptr%ocean_public_type_ptr @@ -1520,62 +1286,44 @@ subroutine DataInitialize(gcomp, rc) call get_ocean_grid(ocean_state, ocean_grid) call mom_export(ocean_public, ocean_grid, ocean_state, exportState, clock, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_StateGet(exportState, itemCount=fieldCount, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return allocate(fieldNameList(fieldCount)) call ESMF_StateGet(exportState, itemNameList=fieldNameList, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return do n=1, fieldCount call ESMF_StateGet(exportState, itemName=fieldNameList(n), field=field, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return call NUOPC_SetAttribute(field, name="Updated", value="true", rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return enddo deallocate(fieldNameList) ! check whether all Fields in the exportState are "Updated" if (NUOPC_IsUpdated(exportState)) then call NUOPC_CompAttributeSet(gcomp, name="InitializeDataComplete", value="true", rc=rc) - call ESMF_LogWrite("MOM6 - Initialize-Data-Dependency SATISFIED!!!", ESMF_LOGMSG_INFO, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + call ESMF_LogWrite("MOM6 - Initialize-Data-Dependency SATISFIED!!!", ESMF_LOGMSG_INFO) + if (ChkErr(rc,__LINE__,u_FILE_u)) return endif if(write_diagnostics) then do n = 1,fldsFrOcn_num fldname = fldsFrOcn(n)%shortname call ESMF_StateGet(exportState, itemName=trim(fldname), itemType=itemType, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ChkErr(rc,__LINE__,u_FILE_u)) return if (itemType /= ESMF_STATEITEM_NOTFOUND) then call ESMF_StateGet(exportState, itemName=trim(fldname), field=field, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_FieldWrite(field, fileName='field_init_ocn_export_'//trim(timestr)//'.nc', & timeslice=1, overwrite=overwrite_timeslice, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ChkErr(rc,__LINE__,u_FILE_u)) return endif enddo endif @@ -1639,43 +1387,23 @@ subroutine ModelAdvance(gcomp, rc) ! query the Component for its clock, importState and exportState call ESMF_GridCompGet(gcomp, clock=clock, importState=importState, & exportState=exportState, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return ! HERE THE MODEL ADVANCES: currTime -> currTime + timeStep call ESMF_ClockPrint(clock, options="currTime", & preString="------>Advancing OCN from: ", unit=msgString, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out - call ESMF_LogWrite(subname//trim(msgString), ESMF_LOGMSG_INFO, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return + call ESMF_LogWrite(subname//trim(msgString), ESMF_LOGMSG_INFO) call ESMF_ClockGet(clock, startTime=startTime, currTime=currTime, & timeStep=timeStep, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_TimePrint(currTime + timeStep, & preString="--------------------------------> to: ", unit=msgString, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out - call ESMF_LogWrite(trim(msgString), ESMF_LOGMSG_INFO, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return + call ESMF_LogWrite(trim(msgString), ESMF_LOGMSG_INFO) call ESMF_TimeGet(currTime, timestring=import_timestr, rc=rc) call ESMF_TimeGet(currTime+timestep, timestring=export_timestr, rc=rc) @@ -1687,16 +1415,12 @@ subroutine ModelAdvance(gcomp, rc) ! Apply ocean lag for startup runs: !--------------- - if (cesm_coupled) then + if (cesm_coupled .or. (.not.use_coldstart)) then if (trim(runtype) == "initial") then ! Do not call MOM6 timestepping routine if the first cpl tstep of a startup run if (currTime == startTime) then - call ESMF_LogWrite("MOM6 - Skipping the first coupling timestep", ESMF_LOGMSG_INFO, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + call ESMF_LogWrite("MOM6 - Skipping the first coupling timestep", ESMF_LOGMSG_INFO) do_advance = .false. else do_advance = .true. @@ -1705,18 +1429,10 @@ subroutine ModelAdvance(gcomp, rc) if (do_advance) then ! If the second cpl tstep of a startup run, step back a cpl tstep and advance for two cpl tsteps if (currTime == startTime + timeStep) then - call ESMF_LogWrite("MOM6 - Stepping back one coupling timestep", ESMF_LOGMSG_INFO, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + call ESMF_LogWrite("MOM6 - Stepping back one coupling timestep", ESMF_LOGMSG_INFO) Time = esmf2fms_time(currTime-timeStep) ! i.e., startTime - call ESMF_LogWrite("MOM6 - doubling the coupling timestep", ESMF_LOGMSG_INFO, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + call ESMF_LogWrite("MOM6 - doubling the coupling timestep", ESMF_LOGMSG_INFO) Time_step_coupled = 2 * esmf2fms_time(timeStep) endif end if @@ -1727,10 +1443,7 @@ subroutine ModelAdvance(gcomp, rc) if (do_advance) then call ESMF_GridCompGetInternalState(gcomp, ocean_internalstate, rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return Ice_ocean_boundary => ocean_internalstate%ptr%ice_ocean_boundary_type_ptr ocean_public => ocean_internalstate%ptr%ocean_public_type_ptr @@ -1741,22 +1454,27 @@ subroutine ModelAdvance(gcomp, rc) !--------------- if (write_diagnostics) then - do n = 1,fldsToOcn_num - fldname = fldsToOcn(n)%shortname - call ESMF_StateGet(importState, itemName=trim(fldname), itemType=itemType, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - - if (itemType /= ESMF_STATEITEM_NOTFOUND) then - call ESMF_StateGet(importState, itemName=trim(fldname), field=lfield, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - - call ESMF_FieldWrite(lfield, fileName='field_ocn_import_'//trim(import_timestr)//'.nc', & - timeslice=1, overwrite=overwrite_timeslice, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - endif - enddo + do n = 1,fldsToOcn_num + fldname = fldsToOcn(n)%shortname + call ESMF_StateGet(importState, itemName=trim(fldname), itemType=itemType, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + if (itemType /= ESMF_STATEITEM_NOTFOUND) then + call ESMF_StateGet(importState, itemName=trim(fldname), field=lfield, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + call ESMF_FieldWrite(lfield, fileName='field_ocn_import_'//trim(import_timestr)//'.nc', & + timeslice=1, overwrite=overwrite_timeslice, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + endif + enddo endif + if (dbug > 0) then + call state_diagnose(importState,subname//':IS ',rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + end if + !--------------- ! Get ocean grid !--------------- @@ -1768,10 +1486,7 @@ subroutine ModelAdvance(gcomp, rc) !--------------- call mom_import(ocean_public, ocean_grid, importState, ice_ocean_boundary, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return !--------------- ! Update MOM6 @@ -1786,11 +1501,12 @@ subroutine ModelAdvance(gcomp, rc) !--------------- call mom_export(ocean_public, ocean_grid, ocean_state, exportState, clock, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return + if (dbug > 0) then + call state_diagnose(exportState,subname//':ES ',rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + end if endif !--------------- @@ -1798,61 +1514,42 @@ subroutine ModelAdvance(gcomp, rc) !--------------- call ESMF_ClockGetAlarm(clock, alarmname='stop_alarm', alarm=stop_alarm, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return !--------------- ! If restart alarm exists and is ringing - write restart file !--------------- - call ESMF_ClockGetAlarm(clock, alarmname='restart_alarm', alarm=restart_alarm, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out - - if (ESMF_AlarmIsRinging(restart_alarm, rc=rc)) then - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out - - ! turn off the alarm - call ESMF_AlarmRingerOff(restart_alarm, rc=rc ) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (restart_mode == 'alarms') then + call ESMF_ClockGetAlarm(clock, alarmname='restart_alarm', alarm=restart_alarm, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + if (ESMF_AlarmIsRinging(restart_alarm, rc=rc)) then + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + ! turn off the alarm + call ESMF_AlarmRingerOff(restart_alarm, rc=rc ) + if (ChkErr(rc,__LINE__,u_FILE_u)) return ! determine restart filename - call ESMF_ClockGetNextTime(clock, MyTime, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out - call ESMF_TimeGet (MyTime, yy=year, mm=month, dd=day, h=hour, m=minute, s=seconds, rc=rc ) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out - if (cesm_coupled) then + call ESMF_ClockGetNextTime(clock, MyTime, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + call ESMF_TimeGet (MyTime, yy=year, mm=month, dd=day, h=hour, m=minute, s=seconds, rc=rc ) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + if (cesm_coupled) then call NUOPC_CompAttributeGet(gcomp, name='case_name', value=casename, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=u_FILE_u)) return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_GridCompGet(gcomp, vm=vm, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=u_FILE_u)) return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_VMGet(vm, localPet=localPet, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=u_FILE_u)) return + if (ChkErr(rc,__LINE__,u_FILE_u)) return write(restartname,'(A,".mom6.r.",I4.4,"-",I2.2,"-",I2.2,"-",I5.5)') & trim(casename), year, month, day, seconds - - call ESMF_LogWrite("MOM_cap: Writing restart : "//trim(restartname), ESMF_LOGMSG_INFO, rc=rc) - + call ESMF_LogWrite("MOM_cap: Writing restart : "//trim(restartname), ESMF_LOGMSG_INFO) ! write restart file(s) call ocean_model_restart(ocean_state, restartname=restartname, num_rest_files=num_rest_files) - if (localPet == 0) then ! Write name of restart file in the rpointer file - this is currently hard-coded for the ocean open(newunit=writeunit, file='rpointer.ocn', form='formatted', status='unknown', iostat=iostat) @@ -1874,28 +1571,27 @@ subroutine ModelAdvance(gcomp, rc) write(writeunit,'(a)') trim(restartname) // trim(suffix) // '.nc' enddo endif - close(writeunit) endif - else - ! write the final restart without a timestamp - if (ESMF_AlarmIsRinging(stop_alarm, rc=rc)) then - write(restartname,'(A)')"MOM.res" - else - write(restartname,'(A,I4.4,"-",I2.2,"-",I2.2,"-",I2.2,"-",I2.2,"-",I2.2)') & - "MOM.res.", year, month, day, hour, minute, seconds - endif - - call ESMF_LogWrite("MOM_cap: Writing restart : "//trim(restartname), ESMF_LOGMSG_INFO, rc=rc) + else ! not cesm_coupled + ! write the final restart without a timestamp + if (ESMF_AlarmIsRinging(stop_alarm, rc=rc)) then + write(restartname,'(A)')"MOM.res" + else + write(restartname,'(A,I4.4,"-",I2.2,"-",I2.2,"-",I2.2,"-",I2.2,"-",I2.2)') & + "MOM.res.", year, month, day, hour, minute, seconds + endif + call ESMF_LogWrite("MOM_cap: Writing restart : "//trim(restartname), ESMF_LOGMSG_INFO) - ! write restart file(s) - call ocean_model_restart(ocean_state, restartname=restartname) - end if + ! write restart file(s) + call ocean_model_restart(ocean_state, restartname=restartname) + endif - if (is_root_pe()) then - write(logunit,*) subname//' writing restart file ',trim(restartname) - endif - endif + if (is_root_pe()) then + write(logunit,*) subname//' writing restart file ',trim(restartname) + endif + endif + end if ! restart_mode !--------------- ! Write diagnostics @@ -1905,15 +1601,15 @@ subroutine ModelAdvance(gcomp, rc) do n = 1,fldsFrOcn_num fldname = fldsFrOcn(n)%shortname call ESMF_StateGet(exportState, itemName=trim(fldname), itemType=itemType, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ChkErr(rc,__LINE__,u_FILE_u)) return if (itemType /= ESMF_STATEITEM_NOTFOUND) then call ESMF_StateGet(exportState, itemName=trim(fldname), field=lfield, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_FieldWrite(lfield, fileName='field_ocn_export_'//trim(export_timestr)//'.nc', & timeslice=1, overwrite=overwrite_timeslice, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (ChkErr(rc,__LINE__,u_FILE_u)) return endif enddo endif @@ -1949,23 +1645,14 @@ subroutine ModelSetRunClock(gcomp, rc) ! query the Component for its clock, importState and exportState call NUOPC_ModelGet(gcomp, driverClock=dclock, modelClock=mclock, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_ClockGet(dclock, currTime=dcurrtime, timeStep=dtimestep, & stopTime=dstoptime, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_ClockGet(mclock, currTime=mcurrtime, timeStep=mtimestep, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return !-------------------------------- ! check that the current time in the model and driver are the same @@ -1973,16 +1660,10 @@ subroutine ModelSetRunClock(gcomp, rc) if (mcurrtime /= dcurrtime) then call ESMF_TimeGet(dcurrtime, timeString=dtimestring, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_TimeGet(mcurrtime, timeString=mtimestring, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_LogSetError(ESMF_RC_VAL_WRONG, & msg=subname//": ERROR in time consistency: "//trim(dtimestring)//" != "//trim(mtimestring), & @@ -1997,10 +1678,7 @@ subroutine ModelSetRunClock(gcomp, rc) mstoptime = mcurrtime + dtimestep call ESMF_ClockSet(mclock, currTime=dcurrtime, timeStep=dtimestep, stopTime=mstoptime, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return if (first_time) then !-------------------------------- @@ -2014,18 +1692,18 @@ subroutine ModelSetRunClock(gcomp, rc) if (cesm_coupled) then call NUOPC_CompAttributeGet(gcomp, name="restart_option", value=restart_option, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=u_FILE_u)) return + if (ChkErr(rc,__LINE__,u_FILE_u)) return ! If restart_option is set then must also have set either restart_n or restart_ymd call NUOPC_CompAttributeGet(gcomp, name="restart_n", value=cvalue, & isPresent=isPresent, isSet=isSet, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=u_FILE_u)) return + if (ChkErr(rc,__LINE__,u_FILE_u)) return if (isPresent .and. isSet) then read(cvalue,*) restart_n endif call NUOPC_CompAttributeGet(gcomp, name="restart_ymd", value=cvalue, & isPresent=isPresent, isSet=isSet, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=u_FILE_u)) return + if (ChkErr(rc,__LINE__,u_FILE_u)) return if (isPresent .and. isSet) then read(cvalue,*) restart_ymd endif @@ -2040,66 +1718,64 @@ subroutine ModelSetRunClock(gcomp, rc) else call NUOPC_CompAttributeGet(gcomp, name="restart_n", value=cvalue, & isPresent=isPresent, isSet=isSet, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, file=__FILE__)) return + if (ChkErr(rc,__LINE__,u_FILE_u)) return - ! If restart_option is set then must also have set either restart_n or restart_ymd + ! If restart_n is set and non-zero, then restart_option must be available from config if (isPresent .and. isSet) then - call ESMF_LogWrite(subname//" Restart_n = "//trim(cvalue), ESMF_LOGMSG_INFO, rc=rc) + call ESMF_LogWrite(subname//" Restart_n = "//trim(cvalue), ESMF_LOGMSG_INFO) read(cvalue,*) restart_n if(restart_n /= 0)then call NUOPC_CompAttributeGet(gcomp, name="restart_option", value=cvalue, & isPresent=isPresent, isSet=isSet, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, file=__FILE__)) return + if (ChkErr(rc,__LINE__,u_FILE_u)) return if (isPresent .and. isSet) then read(cvalue,*) restart_option call ESMF_LogWrite(subname//" Restart_option = "//restart_option, & - ESMF_LOGMSG_INFO, rc=rc) + ESMF_LOGMSG_INFO) + else + call ESMF_LogSetError(ESMF_RC_VAL_WRONG, & + msg=subname//": ERROR both restart_n and restart_option must be set ", & + line=__LINE__, file=__FILE__, rcToReturn=rc) + return endif - + ! not used in nems call NUOPC_CompAttributeGet(gcomp, name="restart_ymd", value=cvalue, & isPresent=isPresent, isSet=isSet, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, file=__FILE__)) return + if (ChkErr(rc,__LINE__,u_FILE_u)) return if (isPresent .and. isSet) then read(cvalue,*) restart_ymd - call ESMF_LogWrite(subname//" Restart_ymd = "//trim(cvalue), ESMF_LOGMSG_INFO, rc=rc) + call ESMF_LogWrite(subname//" Restart_ymd = "//trim(cvalue), ESMF_LOGMSG_INFO) endif else - restart_option = 'none' - call ESMF_LogWrite(subname//" Set restart option = "//restart_option, ESMF_LOGMSG_INFO, rc=rc) + ! restart_n is zero, restarts will be written at finalize only (no alarm control) + restart_mode = 'no_alarms' + call ESMF_LogWrite(subname//" Restarts will be written at finalize only", ESMF_LOGMSG_INFO) endif endif endif - call AlarmInit(mclock, & - alarm = restart_alarm, & - option = trim(restart_option), & - opt_n = restart_n, & - opt_ymd = restart_ymd, & - RefTime = mcurrTime, & - alarmname = 'restart_alarm', rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out - - call ESMF_AlarmSet(restart_alarm, clock=mclock, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out - call ESMF_LogWrite(subname//" Restart alarm is Created and Set", ESMF_LOGMSG_INFO, rc=rc) + if (restart_mode == 'alarms') then + call AlarmInit(mclock, & + alarm = restart_alarm, & + option = trim(restart_option), & + opt_n = restart_n, & + opt_ymd = restart_ymd, & + RefTime = mcurrTime, & + alarmname = 'restart_alarm', rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + call ESMF_AlarmSet(restart_alarm, clock=mclock, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + call ESMF_LogWrite(subname//" Restart alarm is Created and Set", ESMF_LOGMSG_INFO) + end if ! create a 1-shot alarm at the driver stop time stop_alarm = ESMF_AlarmCreate(mclock, ringtime=dstopTime, name = "stop_alarm", rc=rc) - call ESMF_LogWrite(subname//" Create Stop alarm", ESMF_LOGMSG_INFO, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, file=__FILE__)) return + call ESMF_LogWrite(subname//" Create Stop alarm", ESMF_LOGMSG_INFO) + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_TimeGet(dstoptime, timestring=timestr, rc=rc) - call ESMF_LogWrite("Stop Alarm will ring at : "//trim(timestr), ESMF_LOGMSG_INFO, rc=rc) + call ESMF_LogWrite("Stop Alarm will ring at : "//trim(timestr), ESMF_LOGMSG_INFO) first_time = .false. @@ -2110,20 +1786,13 @@ subroutine ModelSetRunClock(gcomp, rc) !-------------------------------- call ESMF_ClockAdvance(mclock,rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_ClockSet(mclock, currTime=dcurrtime, timeStep=dtimestep, stopTime=mstoptime, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return end subroutine ModelSetRunClock - !=============================================================================== !> Called by NUOPC at the end of the run to clean up. @@ -2145,54 +1814,34 @@ subroutine ocean_model_finalize(gcomp, rc) type(ESMF_Alarm), allocatable :: alarmList(:) integer :: alarmCount character(len=64) :: timestamp - character(len=64) :: alarm_name logical :: write_restart - integer :: i character(len=*),parameter :: subname='(MOM_cap:ocean_model_finalize)' write(*,*) 'MOM: --- finalize called ---' rc = ESMF_SUCCESS call ESMF_GridCompGetInternalState(gcomp, ocean_internalstate, rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return ocean_public => ocean_internalstate%ptr%ocean_public_type_ptr ocean_state => ocean_internalstate%ptr%ocean_state_type_ptr call NUOPC_ModelGet(gcomp, modelClock=clock, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_ClockGet(clock, currTime=currTime, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return Time = esmf2fms_time(currTime) - ! Check if the clock has a restart alarm - and if it does do not write a restart - call ESMF_ClockGet(clock, alarmCount=alarmCount, rc = rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, file=__FILE__)) return - - allocate(alarmList(1:alarmCount)) - call ESMF_ClockGetAlarmList(clock, alarmlistflag=ESMF_ALARMLIST_ALL, alarmList=alarmList, rc = rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, file=__FILE__)) return - - write_restart = .true. - do i = 1,alarmCount - call ESMF_AlarmGet(alarmlist(i), name=alarm_name, rc = rc) - if(trim(alarm_name) == 'restart_alarm' .and. ESMF_AlarmIsEnabled(alarmlist(i), rc=rc))write_restart = .false. - enddo - deallocate(alarmList) + ! Do not write a restart unless mode is no_alarms + if (restart_mode == 'no_alarms') then + write_restart = .true. + else + write_restart = .false. + end if + if (write_restart)call ESMF_LogWrite("No Restart Alarm, writing restart at Finalize ", & + ESMF_LOGMSG_INFO) - if(write_restart)call ESMF_LogWrite("No Restart Alarm, writing restart at Finalize ", ESMF_LOGMSG_INFO, rc=rc) call ocean_model_end(ocean_public, ocean_State, Time, write_restart=write_restart) call field_manager_end() @@ -2223,16 +1872,15 @@ subroutine State_SetScalar(value, scalar_id, State, mytask, scalar_name, scalar_ rc = ESMF_SUCCESS call ESMF_StateGet(State, itemName=trim(scalar_name), field=field, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=u_FILE_u)) return + if (ChkErr(rc,__LINE__,u_FILE_u)) return if (mytask == 0) then call ESMF_FieldGet(field, farrayPtr=farrayptr, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=u_FILE_u)) return + if (ChkErr(rc,__LINE__,u_FILE_u)) return if (scalar_id < 0 .or. scalar_id > scalar_count) then call ESMF_LogSetError(ESMF_RC_ARG_BAD, & - msg=subname//": ERROR in scalar_id", & - line=__LINE__, file=__FILE__, rcToReturn=rc) + msg=subname//": ERROR in scalar_id", line=__LINE__, file=__FILE__, rcToReturn=rc) return endif @@ -2270,57 +1918,36 @@ subroutine MOM_RealizeFields(state, nfields, field_defs, tag, grid, mesh, rc) if (field_defs(i)%shortname == scalar_field_name) then call ESMF_LogWrite(subname // tag // " Field "// trim(field_defs(i)%stdname) // " is connected on root pe.", & - ESMF_LOGMSG_INFO, & - line=__LINE__, & - file=__FILE__, & - rc=rc) + ESMF_LOGMSG_INFO) call SetScalarField(field, rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return else call ESMF_LogWrite(subname // tag // " Field "// trim(field_defs(i)%stdname) // " is connected.", & - ESMF_LOGMSG_INFO, & - line=__LINE__, & - file=__FILE__, & - rc=rc) + ESMF_LOGMSG_INFO) if (present(grid)) then field = ESMF_FieldCreate(grid, ESMF_TYPEKIND_R8, indexflag=ESMF_INDEX_DELOCAL, & name=field_defs(i)%shortname, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return ! initialize fldptr to zero call ESMF_FieldGet(field, farrayPtr=fldptr2d, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return fldptr2d(:,:) = 0.0 else if (present(mesh)) then field = ESMF_FieldCreate(mesh=mesh, typekind=ESMF_TYPEKIND_R8, meshloc=ESMF_MESHLOC_ELEMENT, & name=field_defs(i)%shortname, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return ! initialize fldptr to zero call ESMF_FieldGet(field, farrayPtr=fldptr1d, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return fldptr1d(:) = 0.0 endif @@ -2329,24 +1956,16 @@ subroutine MOM_RealizeFields(state, nfields, field_defs, tag, grid, mesh, rc) ! Realize connected field call NUOPC_Realize(state, field=field, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return else ! field is not connected call ESMF_LogWrite(subname // tag // " Field "// trim(field_defs(i)%stdname) // " is not connected.", & - ESMF_LOGMSG_INFO, & - line=__LINE__, & - file=__FILE__, & - rc=rc) + ESMF_LOGMSG_INFO) + ! remove a not connected Field from State call ESMF_StateRemove(state, (/field_defs(i)%shortname/), rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return endif @@ -2369,24 +1988,15 @@ subroutine SetScalarField(field, rc) ! create a DistGrid with a single index space element, which gets mapped onto DE 0. distgrid = ESMF_DistGridCreate(minIndex=(/1/), maxIndex=(/1/), rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return grid = ESMF_GridCreate(distgrid, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return ! num of scalar values field = ESMF_FieldCreate(name=trim(scalar_field_name), grid=grid, typekind=ESMF_TYPEKIND_R8, & ungriddedLBound=(/1/), ungriddedUBound=(/scalar_field_count/), gridToFieldMap=(/2/), rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return end subroutine SetScalarField diff --git a/config_src/nuopc_driver/mom_cap_methods.F90 b/config_src/nuopc_driver/mom_cap_methods.F90 index 70915d0e95..78014f1c63 100644 --- a/config_src/nuopc_driver/mom_cap_methods.F90 +++ b/config_src/nuopc_driver/mom_cap_methods.F90 @@ -5,7 +5,7 @@ module MOM_cap_methods use ESMF, only: ESMF_TimeInterval, ESMF_TimeIntervalGet use ESMF, only: ESMF_State, ESMF_StateGet use ESMF, only: ESMF_Field, ESMF_FieldGet, ESMF_FieldCreate -use ESMF, only: ESMF_GridComp, ESMF_Mesh, ESMF_Grid, ESMF_GridCreate +use ESMF, only: ESMF_GridComp, ESMF_Mesh, ESMF_MeshGet, ESMF_Grid, ESMF_GridCreate use ESMF, only: ESMF_DistGrid, ESMF_DistGridCreate use ESMF, only: ESMF_KIND_R8, ESMF_SUCCESS, ESMF_LogFoundError use ESMF, only: ESMF_LOGERR_PASSTHRU, ESMF_LOGMSG_INFO, ESMF_LOGWRITE @@ -13,7 +13,8 @@ module MOM_cap_methods use ESMF, only: ESMF_StateItem_Flag, ESMF_STATEITEM_NOTFOUND use ESMF, only: ESMF_GEOMTYPE_FLAG, ESMF_GEOMTYPE_GRID, ESMF_GEOMTYPE_MESH use ESMF, only: ESMF_RC_VAL_OUTOFRANGE, ESMF_INDEX_DELOCAL, ESMF_MESHLOC_ELEMENT -use ESMF, only: ESMF_TYPEKIND_R8 +use ESMF, only: ESMF_TYPEKIND_R8, ESMF_FIELDSTATUS_COMPLETE +use ESMF, only: ESMF_FieldStatus_Flag, ESMF_LOGMSG_ERROR, ESMF_FAILURE, ESMF_MAXSTR use ESMF, only: operator(/=), operator(==) use MOM_ocean_model_nuopc, only: ocean_public_type, ocean_state_type use MOM_surface_forcing_nuopc, only: ice_ocean_boundary_type @@ -28,6 +29,8 @@ module MOM_cap_methods public :: mom_set_geomtype public :: mom_import public :: mom_export +public :: state_diagnose +public :: ChkErr private :: State_getImport private :: State_setExport @@ -43,6 +46,9 @@ module MOM_cap_methods type(ESMF_GeomType_Flag) :: geomtype !< SMF type describing type of !! geometry (mesh or grid) +character(len=*),parameter :: u_FILE_u = & + __FILE__ + contains !> Sets module variable geometry type @@ -70,6 +76,8 @@ subroutine mom_import(ocean_public, ocean_grid, importState, ice_ocean_boundary, character(len=128) :: fldname real(ESMF_KIND_R8), allocatable :: taux(:,:) real(ESMF_KIND_R8), allocatable :: tauy(:,:) + real(ESMF_KIND_R8), allocatable :: stkx1(:,:),stkx2(:,:),stkx3(:,:) + real(ESMF_KIND_R8), allocatable :: stky1(:,:),stky2(:,:),stky3(:,:) character(len=*) , parameter :: subname = '(mom_import)' rc = ESMF_SUCCESS @@ -86,60 +94,42 @@ subroutine mom_import(ocean_public, ocean_grid, importState, ice_ocean_boundary, !---- call state_getimport(importState, 'inst_pres_height_surface', & isc, iec, jsc, jec, ice_ocean_boundary%p, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return !---- ! near-IR, direct shortwave (W/m2) !---- call state_getimport(importState, 'mean_net_sw_ir_dir_flx', & isc, iec, jsc, jec, ice_ocean_boundary%sw_flux_nir_dir, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return !---- ! near-IR, diffuse shortwave (W/m2) !---- call state_getimport(importState, 'mean_net_sw_ir_dif_flx', & isc, iec, jsc, jec, ice_ocean_boundary%sw_flux_nir_dif, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return !---- ! visible, direct shortwave (W/m2) !---- call state_getimport(importState, 'mean_net_sw_vis_dir_flx', & isc, iec, jsc, jec, ice_ocean_boundary%sw_flux_vis_dir, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return !---- ! visible, diffuse shortwave (W/m2) !---- call state_getimport(importState, 'mean_net_sw_vis_dif_flx', & isc, iec, jsc, jec, ice_ocean_boundary%sw_flux_vis_dif, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return ! ------- ! Net longwave radiation (W/m2) ! ------- call state_getimport(importState, 'mean_net_lw_flx', & isc, iec, jsc, jec, ice_ocean_boundary%lw_flux, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return !---- ! zonal and meridional surface stress @@ -148,15 +138,9 @@ subroutine mom_import(ocean_public, ocean_grid, importState, ice_ocean_boundary, allocate (tauy(isc:iec,jsc:jec)) call state_getimport(importState, 'mean_zonal_moment_flx', isc, iec, jsc, jec, taux, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return call state_getimport(importState, 'mean_merid_moment_flx', isc, iec, jsc, jec, tauy, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return ! rotate taux and tauy from true zonal/meridional to local coordinates @@ -178,40 +162,28 @@ subroutine mom_import(ocean_public, ocean_grid, importState, ice_ocean_boundary, !---- call state_getimport(importState, 'mean_sensi_heat_flx', & isc, iec, jsc, jec, ice_ocean_boundary%t_flux, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return !---- ! evaporation flux (W/m2) !---- call state_getimport(importState, 'mean_evap_rate', & isc, iec, jsc, jec, ice_ocean_boundary%q_flux, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return !---- ! liquid precipitation (rain) !---- call state_getimport(importState, 'mean_prec_rate', & isc, iec, jsc, jec, ice_ocean_boundary%lprec, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return !---- ! frozen precipitation (snow) !---- call state_getimport(importState, 'mean_fprec_rate', & isc, iec, jsc, jec, ice_ocean_boundary%fprec, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return !---- ! mass and heat content of liquid and frozen runoff @@ -223,37 +195,25 @@ subroutine mom_import(ocean_public, ocean_grid, importState, ice_ocean_boundary, ice_ocean_boundary%lrunoff (:,:) = 0._ESMF_KIND_R8 call state_getimport(importState, 'Foxx_rofl', & isc, iec, jsc, jec, ice_ocean_boundary%lrunoff,rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return ! ice runoff ice_ocean_boundary%frunoff (:,:) = 0._ESMF_KIND_R8 call state_getimport(importState, 'Foxx_rofi', & isc, iec, jsc, jec, ice_ocean_boundary%frunoff,rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return ! heat content of lrunoff ice_ocean_boundary%lrunoff_hflx(:,:) = 0._ESMF_KIND_R8 call state_getimport(importState, 'mean_runoff_heat_flx', & isc, iec, jsc, jec, ice_ocean_boundary%lrunoff_hflx, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return ! heat content of frunoff ice_ocean_boundary%frunoff_hflx(:,:) = 0._ESMF_KIND_R8 call state_getimport(importState, 'mean_calving_heat_flx', & isc, iec, jsc, jec, ice_ocean_boundary%frunoff_hflx, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return !---- ! salt flux from ice @@ -261,10 +221,7 @@ subroutine mom_import(ocean_public, ocean_grid, importState, ice_ocean_boundary, ice_ocean_boundary%salt_flux(:,:) = 0._ESMF_KIND_R8 call state_getimport(importState, 'mean_salt_rate', & isc, iec, jsc, jec, ice_ocean_boundary%salt_flux,rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return ! !---- ! ! snow&ice melt heat flux (W/m^2) @@ -272,10 +229,7 @@ subroutine mom_import(ocean_public, ocean_grid, importState, ice_ocean_boundary, ice_ocean_boundary%seaice_melt_heat(:,:) = 0._ESMF_KIND_R8 call state_getimport(importState, 'net_heat_flx_to_ocn', & isc, iec, jsc, jec, ice_ocean_boundary%seaice_melt_heat,rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return ! !---- ! ! snow&ice melt water flux (W/m^2) @@ -283,10 +237,7 @@ subroutine mom_import(ocean_public, ocean_grid, importState, ice_ocean_boundary, ice_ocean_boundary%seaice_melt(:,:) = 0._ESMF_KIND_R8 call state_getimport(importState, 'mean_fresh_water_to_ocean_rate', & isc, iec, jsc, jec, ice_ocean_boundary%seaice_melt,rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return !---- ! mass of overlying ice @@ -297,10 +248,57 @@ subroutine mom_import(ocean_public, ocean_grid, importState, ice_ocean_boundary, ice_ocean_boundary%mi(:,:) = 0._ESMF_KIND_R8 call state_getimport(importState, 'mass_of_overlying_ice', & isc, iec, jsc, jec, ice_ocean_boundary%mi, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + + !---- + ! Partitioned Stokes Drift Components + !---- + if ( associated(ice_ocean_boundary%ustkb) ) then + allocate(stkx1(isc:iec,jsc:jec)) + allocate(stky1(isc:iec,jsc:jec)) + allocate(stkx2(isc:iec,jsc:jec)) + allocate(stky2(isc:iec,jsc:jec)) + allocate(stkx3(isc:iec,jsc:jec)) + allocate(stky3(isc:iec,jsc:jec)) + + call state_getimport(importState,'eastward_partitioned_stokes_drift_1' , isc, iec, jsc, jec, stkx1,rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + call state_getimport(importState,'northward_partitioned_stokes_drift_1', isc, iec, jsc, jec, stky1,rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + call state_getimport(importState,'eastward_partitioned_stokes_drift_2' , isc, iec, jsc, jec, stkx2,rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + call state_getimport(importState,'northward_partitioned_stokes_drift_2', isc, iec, jsc, jec, stky2,rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + call state_getimport(importState,'eastward_partitioned_stokes_drift_3' , isc, iec, jsc, jec, stkx3,rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + call state_getimport(importState,'northward_partitioned_stokes_drift_3', isc, iec, jsc, jec, stky3,rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + ! rotate from true zonal/meridional to local coordinates + do j = jsc, jec + jg = j + ocean_grid%jsc - jsc + do i = isc, iec + ig = i + ocean_grid%isc - isc + ice_ocean_boundary%ustkb(i,j,1) = ocean_grid%cos_rot(ig,jg)*stkx1(i,j) & + - ocean_grid%sin_rot(ig,jg)*stky1(i,j) + ice_ocean_boundary%vstkb(i,j,1) = ocean_grid%cos_rot(ig,jg)*stky1(i,j) & + + ocean_grid%sin_rot(ig,jg)*stkx1(i,j) + + ice_ocean_boundary%ustkb(i,j,2) = ocean_grid%cos_rot(ig,jg)*stkx2(i,j) & + - ocean_grid%sin_rot(ig,jg)*stky2(i,j) + ice_ocean_boundary%vstkb(i,j,2) = ocean_grid%cos_rot(ig,jg)*stky2(i,j) & + + ocean_grid%sin_rot(ig,jg)*stkx2(i,j) + + ice_ocean_boundary%ustkb(i,j,3) = ocean_grid%cos_rot(ig,jg)*stkx3(i,j) & + - ocean_grid%sin_rot(ig,jg)*stky3(i,j) + ice_ocean_boundary%vstkb(i,j,3) = ocean_grid%cos_rot(ig,jg)*stky3(i,j) & + + ocean_grid%sin_rot(ig,jg)*stkx3(i,j) + enddo + enddo + + deallocate(stkx1,stkx2,stkx3,stky1,stky2,stky3) + endif end subroutine mom_import @@ -339,16 +337,10 @@ subroutine mom_export(ocean_public, ocean_grid, ocean_state, exportState, clock, rc = ESMF_SUCCESS call ESMF_ClockGet( clock, timeStep=timeStep, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_TimeIntervalGet( timeStep, s=dt_int, rc=rc ) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return ! Use Adcroft's rule of reciprocals; it does the right thing here. if (real(dt_int) > 0.0) then @@ -378,10 +370,7 @@ subroutine mom_export(ocean_public, ocean_grid, ocean_state, exportState, clock, call State_SetExport(exportState, 'ocean_mask', & isc, iec, jsc, jec, omask, ocean_grid, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return deallocate(omask) @@ -390,20 +379,14 @@ subroutine mom_export(ocean_public, ocean_grid, ocean_state, exportState, clock, ! ------- call State_SetExport(exportState, 'sea_surface_temperature', & isc, iec, jsc, jec, ocean_public%t_surf, ocean_grid, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return ! ------- ! Sea surface salinity ! ------- call State_SetExport(exportState, 's_surf', & isc, iec, jsc, jec, ocean_public%s_surf, ocean_grid, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return ! ------- ! zonal and meridional currents @@ -430,17 +413,11 @@ subroutine mom_export(ocean_public, ocean_grid, ocean_state, exportState, clock, call State_SetExport(exportState, 'ocn_current_zonal', & isc, iec, jsc, jec, ocz_rot, ocean_grid, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return call State_SetExport(exportState, 'ocn_current_merid', & isc, iec, jsc, jec, ocm_rot, ocean_grid, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return deallocate(ocz, ocm, ocz_rot, ocm_rot) @@ -451,10 +428,7 @@ subroutine mom_export(ocean_public, ocean_grid, ocean_state, exportState, clock, if (itemFlag /= ESMF_STATEITEM_NOTFOUND) then call State_SetExport(exportState, 'So_bldepth', & isc, iec, jsc, jec, ocean_public%obld, ocean_grid, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return endif ! ------- @@ -478,10 +452,7 @@ subroutine mom_export(ocean_public, ocean_grid, ocean_state, exportState, clock, call State_SetExport(exportState, 'freezing_melting_potential', & isc, iec, jsc, jec, melt_potential, ocean_grid, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return deallocate(melt_potential) @@ -492,10 +463,7 @@ subroutine mom_export(ocean_public, ocean_grid, ocean_state, exportState, clock, if (itemFlag /= ESMF_STATEITEM_NOTFOUND) then call State_SetExport(exportState, 'sea_level', & isc, iec, jsc, jec, ocean_public%sea_lev, ocean_grid, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return endif !---------------- @@ -598,17 +566,11 @@ subroutine mom_export(ocean_public, ocean_grid, ocean_state, exportState, clock, call State_SetExport(exportState, 'sea_surface_slope_zonal', & isc, iec, jsc, jec, dhdx_rot, ocean_grid, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return call State_SetExport(exportState, 'sea_surface_slope_merid', & isc, iec, jsc, jec, dhdy_rot, ocean_grid, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return deallocate(ssh, dhdx, dhdy, dhdx_rot, dhdy_rot) @@ -627,15 +589,9 @@ subroutine State_GetFldPtr_1d(State, fldname, fldptr, rc) character(len=*),parameter :: subname='(MOM_cap:State_GetFldPtr)' call ESMF_StateGet(State, itemName=trim(fldname), field=lfield, rc=lrc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_FieldGet(lfield, farrayPtr=fldptr, rc=lrc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return if (present(rc)) rc = lrc @@ -654,15 +610,9 @@ subroutine State_GetFldPtr_2d(State, fldname, fldptr, rc) character(len=*),parameter :: subname='(MOM_cap:State_GetFldPtr)' call ESMF_StateGet(State, itemName=trim(fldname), field=lfield, rc=lrc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_FieldGet(lfield, farrayPtr=fldptr, rc=lrc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return if (present(rc)) rc = lrc @@ -702,10 +652,7 @@ subroutine State_GetImport(state, fldname, isc, iec, jsc, jec, output, do_sum, r ! get field pointer call state_getfldptr(state, trim(fldname), dataptr1d, rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return ! determine output array n = 0 @@ -723,10 +670,7 @@ subroutine State_GetImport(state, fldname, isc, iec, jsc, jec, output, do_sum, r else if (geomtype == ESMF_GEOMTYPE_GRID) then call state_getfldptr(state, trim(fldname), dataptr2d, rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return lbnd1 = lbound(dataPtr2d,1) lbnd2 = lbound(dataPtr2d,2) @@ -786,10 +730,7 @@ subroutine State_SetExport(state, fldname, isc, iec, jsc, jec, input, ocean_grid if (geomtype == ESMF_GEOMTYPE_MESH) then call state_getfldptr(state, trim(fldname), dataptr1d, rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return n = 0 do j = jsc, jec @@ -804,10 +745,7 @@ subroutine State_SetExport(state, fldname, isc, iec, jsc, jec, input, ocean_grid else if (geomtype == ESMF_GEOMTYPE_GRID) then call state_getfldptr(state, trim(fldname), dataptr2d, rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return lbnd1 = lbound(dataPtr2d,1) lbnd2 = lbound(dataPtr2d,2) @@ -828,4 +766,190 @@ subroutine State_SetExport(state, fldname, isc, iec, jsc, jec, input, ocean_grid end subroutine State_SetExport +!> This subroutine writes the minimum, maximum and sum of each field +!! contained within an ESMF state. +subroutine state_diagnose(State, string, rc) + + ! ---------------------------------------------- + ! Diagnose status of State + ! ---------------------------------------------- + + type(ESMF_State), intent(in) :: state !< An ESMF State + character(len=*), intent(in) :: string !< A string indicating whether the State is an + !! import or export State + integer , intent(out) :: rc !< Return code + + ! local variables + integer :: i,j,n + type(ESMf_Field) :: lfield + integer :: fieldCount, lrank + character(ESMF_MAXSTR) ,pointer :: lfieldnamelist(:) + real(ESMF_KIND_R8), pointer :: dataPtr1d(:) + real(ESMF_KIND_R8), pointer :: dataPtr2d(:,:) + character(len=*),parameter :: subname='(state_diagnose)' + character(len=ESMF_MAXSTR) :: msgString + ! ---------------------------------------------- + + call ESMF_StateGet(state, itemCount=fieldCount, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + allocate(lfieldnamelist(fieldCount)) + + call ESMF_StateGet(state, itemNameList=lfieldnamelist, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + do n = 1, fieldCount + + call ESMF_StateGet(state, itemName=lfieldnamelist(n), field=lfield, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + call field_getfldptr(lfield, fldptr1=dataPtr1d, fldptr2=dataPtr2d, rank=lrank, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + if (lrank == 0) then + ! no local data + elseif (lrank == 1) then + if (size(dataPtr1d) > 0) then + write(msgString,'(A,3g14.7,i8)') trim(string)//': '//trim(lfieldnamelist(n)), & + minval(dataPtr1d), maxval(dataPtr1d), sum(dataPtr1d), size(dataPtr1d) + else + write(msgString,'(A,a)') trim(string)//': '//trim(lfieldnamelist(n))," no data" + endif + elseif (lrank == 2) then + if (size(dataPtr2d) > 0) then + write(msgString,'(A,3g14.7,i8)') trim(string)//': '//trim(lfieldnamelist(n)), & + minval(dataPtr2d), maxval(dataPtr2d), sum(dataPtr2d), size(dataPtr2d) + else + write(msgString,'(A,a)') trim(string)//': '//trim(lfieldnamelist(n))," no data" + endif + else + call ESMF_LogWrite(trim(subname)//": ERROR rank not supported ", ESMF_LOGMSG_ERROR) + rc = ESMF_FAILURE + return + endif + call ESMF_LogWrite(trim(msgString), ESMF_LOGMSG_INFO) + enddo + + deallocate(lfieldnamelist) + +end subroutine state_diagnose + +!> Obtain a pointer to a rank 1 or rank 2 ESMF field +subroutine field_getfldptr(field, fldptr1, fldptr2, rank, abort, rc) + + ! input/output variables + type(ESMF_Field) , intent(in) :: field !< An ESMF field + real(ESMF_KIND_R8), pointer , intent(inout), optional :: fldptr1(:) !< A pointer to a rank 1 ESMF field + real(ESMF_KIND_R8), pointer , intent(inout), optional :: fldptr2(:,:) !< A pointer to a rank 2 ESMF field + integer , intent(out) , optional :: rank !< Field rank + logical , intent(in) , optional :: abort !< Abort code + integer , intent(out) , optional :: rc !< Return code + + ! local variables + type(ESMF_GeomType_Flag) :: geomtype + type(ESMF_FieldStatus_Flag) :: status + type(ESMF_Mesh) :: lmesh + integer :: lrank, nnodes, nelements + logical :: labort + character(len=*), parameter :: subname='(field_getfldptr)' + ! ---------------------------------------------- + + if (.not.present(rc)) then + call ESMF_LogWrite(trim(subname)//": ERROR rc not present ", & + ESMF_LOGMSG_ERROR, line=__LINE__, file=u_FILE_u) + rc = ESMF_FAILURE + return + endif + + rc = ESMF_SUCCESS + + labort = .true. + if (present(abort)) then + labort = abort + endif + lrank = -99 + + call ESMF_FieldGet(field, status=status, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + if (status /= ESMF_FIELDSTATUS_COMPLETE) then + lrank = 0 + if (labort) then + call ESMF_LogWrite(trim(subname)//": ERROR data not allocated ", ESMF_LOGMSG_INFO) + rc = ESMF_FAILURE + return + else + call ESMF_LogWrite(trim(subname)//": WARNING data not allocated ", ESMF_LOGMSG_INFO) + endif + else + + call ESMF_FieldGet(field, geomtype=geomtype, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + if (geomtype == ESMF_GEOMTYPE_GRID) then + call ESMF_FieldGet(field, rank=lrank, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + elseif (geomtype == ESMF_GEOMTYPE_MESH) then + call ESMF_FieldGet(field, rank=lrank, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + call ESMF_FieldGet(field, mesh=lmesh, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + call ESMF_MeshGet(lmesh, numOwnedNodes=nnodes, numOwnedElements=nelements, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + if (nnodes == 0 .and. nelements == 0) lrank = 0 + else + call ESMF_LogWrite(trim(subname)//": ERROR geomtype not supported ", & + ESMF_LOGMSG_INFO) + rc = ESMF_FAILURE + return + endif ! geomtype + + if (lrank == 0) then + call ESMF_LogWrite(trim(subname)//": no local nodes or elements ", & + ESMF_LOGMSG_INFO) + elseif (lrank == 1) then + if (.not.present(fldptr1)) then + call ESMF_LogWrite(trim(subname)//": ERROR missing rank=1 array ", & + ESMF_LOGMSG_ERROR, line=__LINE__, file=u_FILE_u) + rc = ESMF_FAILURE + return + endif + call ESMF_FieldGet(field, farrayPtr=fldptr1, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + elseif (lrank == 2) then + if (.not.present(fldptr2)) then + call ESMF_LogWrite(trim(subname)//": ERROR missing rank=2 array ", & + ESMF_LOGMSG_ERROR, line=__LINE__, file=u_FILE_u) + rc = ESMF_FAILURE + return + endif + call ESMF_FieldGet(field, farrayPtr=fldptr2, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + else + call ESMF_LogWrite(trim(subname)//": ERROR in rank ", & + ESMF_LOGMSG_ERROR, line=__LINE__, file=u_FILE_u) + rc = ESMF_FAILURE + return + endif + + endif ! status + + if (present(rank)) then + rank = lrank + endif + +end subroutine field_getfldptr + +!> Returns true if ESMF_LogFoundError() determines that rc is an error code. Otherwise false. +logical function ChkErr(rc, line, file) + integer, intent(in) :: rc !< return code to check + integer, intent(in) :: line !< Integer source line number + character(len=*), intent(in) :: file !< User-provided source file name + integer :: lrc + ChkErr = .false. + lrc = rc + if (ESMF_LogFoundError(rcToCheck=lrc, msg=ESMF_LOGERR_PASSTHRU, line=line, file=file)) then + ChkErr = .true. + endif +end function ChkErr + end module MOM_cap_methods diff --git a/config_src/nuopc_driver/mom_cap_time.F90 b/config_src/nuopc_driver/mom_cap_time.F90 index daf9889c43..7f210bda71 100644 --- a/config_src/nuopc_driver/mom_cap_time.F90 +++ b/config_src/nuopc_driver/mom_cap_time.F90 @@ -16,6 +16,7 @@ module MOM_cap_time use ESMF , only : ESMF_RC_ARG_BAD use ESMF , only : operator(<), operator(/=), operator(+), operator(-), operator(*) , operator(>=) use ESMF , only : operator(<=), operator(>), operator(==) +use MOM_cap_methods , only : ChkErr implicit none; private @@ -125,22 +126,13 @@ subroutine AlarmInit( clock, alarm, option, & endif call ESMF_ClockGet(clock, CurrTime=CurrTime, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_TimeGet(CurrTime, yy=cyy, mm=cmm, dd=cdd, s=csec, rc=rc ) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_TimeGet(CurrTime, yy=nyy, mm=nmm, dd=ndd, s=nsec, rc=rc ) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return ! initial guess of next alarm, this will be updated below if (present(RefTime)) then @@ -151,25 +143,16 @@ subroutine AlarmInit( clock, alarm, option, & ! Determine calendar call ESMF_ClockGet(clock, calendar=cal, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return ! Determine inputs for call to create alarm selectcase (trim(option)) case (optNONE, optNever) call ESMF_TimeIntervalSet(AlarmInterval, yy=9999, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_TimeSet( NextAlarm, yy=9999, mm=12, dd=1, s=0, calendar=cal, rc=rc ) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return update_nextalarm = .false. case (optDate) @@ -188,15 +171,9 @@ subroutine AlarmInit( clock, alarm, option, & return endif call ESMF_TimeIntervalSet(AlarmInterval, yy=9999, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call TimeInit(NextAlarm, lymd, cal, tod=ltod, desc="optDate", rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return update_nextalarm = .false. case (optIfdays0) @@ -208,104 +185,65 @@ subroutine AlarmInit( clock, alarm, option, & return endif call ESMF_TimeIntervalSet(AlarmInterval, mm=1, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_TimeSet( NextAlarm, yy=cyy, mm=cmm, dd=opt_n, s=0, calendar=cal, rc=rc ) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return update_nextalarm = .true. case (optNSteps, optNStep) call ESMF_ClockGet(clock, TimeStep=AlarmInterval, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return AlarmInterval = AlarmInterval * opt_n update_nextalarm = .true. case (optNSeconds, optNSecond) call ESMF_TimeIntervalSet(AlarmInterval, s=1, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return AlarmInterval = AlarmInterval * opt_n update_nextalarm = .true. case (optNMinutes, optNMinute) call ESMF_TimeIntervalSet(AlarmInterval, s=60, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return AlarmInterval = AlarmInterval * opt_n update_nextalarm = .true. case (optNHours, optNHour) call ESMF_TimeIntervalSet(AlarmInterval, s=3600, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return AlarmInterval = AlarmInterval * opt_n update_nextalarm = .true. case (optNDays, optNDay) call ESMF_TimeIntervalSet(AlarmInterval, d=1, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return AlarmInterval = AlarmInterval * opt_n update_nextalarm = .true. case (optNMonths, optNMonth) call ESMF_TimeIntervalSet(AlarmInterval, mm=1, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return AlarmInterval = AlarmInterval * opt_n update_nextalarm = .true. case (optMonthly) call ESMF_TimeIntervalSet(AlarmInterval, mm=1, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_TimeSet( NextAlarm, yy=cyy, mm=cmm, dd=1, s=0, calendar=cal, rc=rc ) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return update_nextalarm = .true. case (optNYears, optNYear) call ESMF_TimeIntervalSet(AlarmInterval, yy=1, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return AlarmInterval = AlarmInterval * opt_n update_nextalarm = .true. case (optYearly) call ESMF_TimeIntervalSet(AlarmInterval, yy=1, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return call ESMF_TimeSet( NextAlarm, yy=cyy, mm=1, dd=1, s=0, calendar=cal, rc=rc ) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return update_nextalarm = .true. case default @@ -332,10 +270,7 @@ subroutine AlarmInit( clock, alarm, option, & endif alarm = ESMF_AlarmCreate( name=lalarmname, clock=clock, ringTime=NextAlarm, ringInterval=AlarmInterval, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return end subroutine AlarmInit @@ -378,10 +313,7 @@ subroutine TimeInit( Time, ymd, cal, tod, desc, logunit, rc) call date2ymd (ymd,yr,mon,day) call ESMF_TimeSet( Time, yy=yr, mm=mon, dd=day, s=ltod, calendar=cal, rc=rc ) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return + if (ChkErr(rc,__LINE__,u_FILE_u)) return end subroutine TimeInit diff --git a/config_src/nuopc_driver/mom_ocean_model_nuopc.F90 b/config_src/nuopc_driver/mom_ocean_model_nuopc.F90 index 1ba3484ef9..493762f4bc 100644 --- a/config_src/nuopc_driver/mom_ocean_model_nuopc.F90 +++ b/config_src/nuopc_driver/mom_ocean_model_nuopc.F90 @@ -144,7 +144,7 @@ module MOM_ocean_model_nuopc integer :: nstep = 0 !< The number of calls to update_ocean. logical :: use_ice_shelf !< If true, the ice shelf model is enabled. - logical :: use_waves !< If true use wave coupling. + logical,public :: use_waves !< If true use wave coupling. logical :: icebergs_alter_ocean !< If true, the icebergs can change ocean the !! ocean dynamics and forcing fluxes. @@ -204,7 +204,7 @@ module MOM_ocean_model_nuopc type(marine_ice_CS), pointer :: & marine_ice_CSp => NULL() !< A pointer to the control structure for the !! marine ice effects module. - type(wave_parameters_cs), pointer :: & + type(wave_parameters_cs), pointer, public :: & Waves !< A structure containing pointers to the surface wave fields type(surface_forcing_CS), pointer :: & forcing_CSp => NULL() !< A pointer to the MOM forcing control structure @@ -388,6 +388,9 @@ subroutine ocean_model_init(Ocean_sfc, OS, Time_init, Time_in, gas_fields_ocn, i "If true, enables surface wave modules.", default=.false.) if (OS%use_waves) then call MOM_wave_interface_init(OS%Time, OS%grid, OS%GV, OS%US, param_file, OS%Waves, OS%diag) + call get_param(param_file,mdl,"SURFBAND_WAVENUMBERS",OS%Waves%WaveNum_Cen, & + "Central wavenumbers for surface Stokes drift bands.",units='rad/m', & + default=0.12566) else call MOM_wave_interface_init_lite(param_file) endif @@ -572,7 +575,7 @@ subroutine update_ocean_model(Ice_ocean_boundary, OS, Ocean_sfc, & call set_net_mass_forcing(OS%fluxes, OS%forces, OS%grid, OS%US) if (OS%use_waves) then - call Update_Surface_Waves(OS%grid, OS%GV, OS%US, OS%time, ocean_coupling_time_step, OS%waves) + call Update_Surface_Waves(OS%grid, OS%GV, OS%US, OS%time, ocean_coupling_time_step, OS%waves, OS%forces) endif if (OS%nstep==0) then @@ -733,7 +736,7 @@ subroutine ocean_model_end(Ocean_sfc, Ocean_state, Time, write_restart) type(time_type), intent(in) :: Time !< The model time, used for writing restarts. logical, intent(in) :: write_restart !< true => write restart file - call ocean_model_save_restart(Ocean_state, Time) + if(write_restart)call ocean_model_save_restart(Ocean_state, Time) call diag_mediator_end(Time, Ocean_state%diag, end_diag_manager=.true.) call MOM_end(Ocean_state%MOM_CSp) if (Ocean_state%use_ice_shelf) call ice_shelf_end(Ocean_state%Ice_shelf_CSp) diff --git a/config_src/nuopc_driver/mom_surface_forcing_nuopc.F90 b/config_src/nuopc_driver/mom_surface_forcing_nuopc.F90 index 9ecf8bb01a..689a9f0f4a 100644 --- a/config_src/nuopc_driver/mom_surface_forcing_nuopc.F90 +++ b/config_src/nuopc_driver/mom_surface_forcing_nuopc.F90 @@ -183,6 +183,16 @@ module MOM_surface_forcing_nuopc !! ice-shelves, expressed as a coefficient !! for divergence damping, as determined !! outside of the ocean model in [m3/s] + real, pointer, dimension(:,:) :: ustk0 => NULL() !< Surface Stokes drift, zonal [m/s] + real, pointer, dimension(:,:) :: vstk0 => NULL() !< Surface Stokes drift, meridional [m/s] + real, pointer, dimension(:) :: stk_wavenumbers => NULL() !< The central wave number of Stokes bands [rad/m] + real, pointer, dimension(:,:,:) :: ustkb => NULL() !< Stokes Drift spectrum, zonal [m/s] + !! Horizontal - u points + !! 3rd dimension - wavenumber + real, pointer, dimension(:,:,:) :: vstkb => NULL() !< Stokes Drift spectrum, meridional [m/s] + !! Horizontal - v points + !! 3rd dimension - wavenumber + integer :: num_stk_bands !< Number of Stokes drift bands passed through the coupler integer :: xtype !< The type of the exchange - REGRID, REDIST or DIRECT type(coupler_2d_bc_type) :: fluxes !< A structure that may contain an array of !! named fields used for passive tracer fluxes. @@ -428,7 +438,10 @@ subroutine convert_IOB_to_fluxes(IOB, fluxes, index_bounds, Time, valid_time, G, call MOM_error(FATAL, "liquid runoff is being added via data_override but "// & "there is no associated runoff in the IOB") return - end if + endif + if (associated(IOB%lrunoff)) then + if(CS%liquid_runoff_from_data)call data_override('OCN', 'runoff', IOB%lrunoff, Time) + endif ! obtain fluxes from IOB; note the staggering of indices i0 = is - isc_bnd ; j0 = js - jsc_bnd @@ -445,7 +458,6 @@ subroutine convert_IOB_to_fluxes(IOB, fluxes, index_bounds, Time, valid_time, G, ! liquid runoff flux if (associated(IOB%lrunoff)) then - if(CS%liquid_runoff_from_data)call data_override('OCN', 'runoff', IOB%lrunoff, Time) fluxes%lrunoff(i,j) = kg_m2_s_conversion * IOB%lrunoff(i-i0,j-j0) * G%mask2dT(i,j) endif @@ -624,7 +636,7 @@ subroutine convert_IOB_to_forces(IOB, forces, index_bounds, Time, G, US, CS) real :: mass_eff !< effective mass of sea ice for rigidity [R Z ~> kg m-2] integer :: wind_stagger !< AGRID, BGRID_NE, or CGRID_NE (integers from MOM_domains) - integer :: i, j, is, ie, js, je, Isq, Ieq, Jsq, Jeq, i0, j0 + integer :: i, j, is, ie, js, je, Isq, Ieq, Jsq, Jeq, i0, j0, istk integer :: isd, ied, jsd, jed, IsdB, IedB, JsdB, JedB, isr, ier, jsr, jer integer :: isc_bnd, iec_bnd, jsc_bnd, jec_bnd @@ -663,6 +675,7 @@ subroutine convert_IOB_to_forces(IOB, forces, index_bounds, Time, G, US, CS) if ( (associated(IOB%area_berg) .and. (.not. associated(forces%area_berg))) .or. & (associated(IOB%mass_berg) .and. (.not. associated(forces%mass_berg))) ) & call allocate_mech_forcing(G, forces, iceberg=.true.) + if (associated(IOB%ice_rigidity)) then rigidity_at_h(:,:) = 0.0 call safe_alloc_ptr(forces%rigidity_ice_u,IsdB,IedB,jsd,jed) @@ -673,6 +686,9 @@ subroutine convert_IOB_to_forces(IOB, forces, index_bounds, Time, G, US, CS) if (associated(forces%rigidity_ice_u)) forces%rigidity_ice_u(:,:) = 0.0 if (associated(forces%rigidity_ice_v)) forces%rigidity_ice_v(:,:) = 0.0 + if ( associated(IOB%ustkb) ) & + call allocate_mech_forcing(G, forces, waves=.true., num_stk_bands=IOB%num_stk_bands) + ! applied surface pressure from atmosphere and cryosphere if (CS%use_limited_P_SSH) then forces%p_surf_SSH => forces%p_surf @@ -830,6 +846,24 @@ subroutine convert_IOB_to_forces(IOB, forces, index_bounds, Time, G, US, CS) endif ! endif for wind related fields + ! wave to ocean coupling + if ( associated(IOB%ustkb) ) then + + forces%stk_wavenumbers(:) = IOB%stk_wavenumbers + do j=js,je; do i=is,ie + forces%ustk0(i,j) = IOB%ustk0(i-I0,j-J0) ! How to be careful here that the domains are right? + forces%vstk0(i,j) = IOB%vstk0(i-I0,j-J0) + enddo ; enddo + call pass_vector(forces%ustk0,forces%vstk0, G%domain ) + do istk = 1,IOB%num_stk_bands + do j=js,je; do i=is,ie + forces%ustkb(i,j,istk) = IOB%ustkb(i-I0,j-J0,istk) + forces%vstkb(i,j,istk) = IOB%vstkb(i-I0,j-J0,istk) + enddo; enddo + call pass_vector(forces%ustkb(:,:,istk),forces%vstkb(:,:,istk), G%domain ) + enddo + endif + ! sea ice related dynamic fields if (associated(IOB%ice_rigidity)) then call pass_var(rigidity_at_h, G%Domain, halo=1) diff --git a/config_src/nuopc_driver/time_utils.F90 b/config_src/nuopc_driver/time_utils.F90 index e995c1b697..81efcd2765 100644 --- a/config_src/nuopc_driver/time_utils.F90 +++ b/config_src/nuopc_driver/time_utils.F90 @@ -14,6 +14,7 @@ module time_utils_mod use ESMF, only: ESMF_Time, ESMF_TimeGet, ESMF_LogFoundError use ESMF, only: ESMF_LOGERR_PASSTHRU,ESMF_TimeInterval use ESMF, only: ESMF_TimeIntervalGet, ESMF_TimeSet, ESMF_SUCCESS +use MOM_cap_methods, only: ChkErr implicit none; private @@ -34,6 +35,9 @@ module time_utils_mod public fms2esmf_time public string_to_date +character(len=*),parameter :: u_FILE_u = & + __FILE__ + contains !> Sets fms2esmf_cal_c to the corresponding ESMF calendar type @@ -90,10 +94,7 @@ function esmf2fms_time_t(time) call ESMF_TimeGet(time, yy=yy, mm=mm, dd=dd, h=h, m=m, s=s, & calkindflag=calkind, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return esmf2fms_time_t = set_date(yy, mm, dd, h, m, s) @@ -111,10 +112,7 @@ function esmf2fms_timestep(timestep) integer :: rc call ESMF_TimeIntervalGet(timestep, s=s, calkindflag=calkind, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return esmf2fms_timestep = set_time(s, 0) @@ -142,10 +140,7 @@ function fms2esmf_time(time, calkind) call ESMF_TimeSet(fms2esmf_time, yy=yy, mm=mm, d=d, h=h, m=m, s=s, & calkindflag=l_calkind, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & - line=__LINE__, & - file=__FILE__)) & - return ! bail out + if (ChkErr(rc,__LINE__,u_FILE_u)) return end function fms2esmf_time diff --git a/src/core/MOM.F90 b/src/core/MOM.F90 index ce0343f714..23aa866b90 100644 --- a/src/core/MOM.F90 +++ b/src/core/MOM.F90 @@ -23,7 +23,7 @@ module MOM use MOM_diag_mediator, only : diag_grid_storage, diag_grid_storage_init use MOM_diag_mediator, only : diag_save_grids, diag_restore_grids use MOM_diag_mediator, only : diag_copy_storage_to_diag, diag_copy_diag_to_storage -use MOM_domain_init, only : MOM_domains_init +use MOM_domains, only : MOM_domains_init use MOM_domains, only : sum_across_PEs, pass_var, pass_vector, clone_MOM_domain use MOM_domains, only : To_North, To_East, To_South, To_West use MOM_domains, only : To_All, Omit_corners, CGRID_NE, SCALAR_PAIR @@ -880,12 +880,12 @@ subroutine step_MOM(forces_in, fluxes_in, sfc_state, Time_start, time_int_in, CS enddo ; enddo ; endif if (CS%ensemble_ocean) then - ! update the time for the next analysis step if needed - call set_analysis_time(CS%Time,CS%odaCS) ! store ensemble vector in odaCS call set_prior_tracer(CS%Time, G, GV, CS%h, CS%tv, CS%odaCS) ! call DA interface call oda(CS%Time,CS%odaCS) + ! update the time for the next analysis step if needed + call set_analysis_time(CS%Time,CS%odaCS) endif if (showCallTree) call callTree_waypoint("calling extract_surface_state (step_MOM)") diff --git a/src/core/MOM_forcing_type.F90 b/src/core/MOM_forcing_type.F90 index afcfa11633..dd6b92da2d 100644 --- a/src/core/MOM_forcing_type.F90 +++ b/src/core/MOM_forcing_type.F90 @@ -249,6 +249,19 @@ module MOM_forcing_type !! ice needs to be accumulated, and the rigidity explicitly !! reset to zero at the driver level when appropriate. + real, pointer, dimension(:,:) :: & + ustk0 => NULL(), & !< Surface Stokes drift, zonal [m/s] + vstk0 => NULL() !< Surface Stokes drift, meridional [m/s] + real, pointer, dimension(:) :: & + stk_wavenumbers => NULL() !< The central wave number of Stokes bands [rad/m] + real, pointer, dimension(:,:,:) :: & + ustkb => NULL(), & !< Stokes Drift spectrum, zonal [m/s] + !! Horizontal - u points + !! 3rd dimension - wavenumber + vstkb => NULL() !< Stokes Drift spectrum, meridional [m/s] + !! Horizontal - v points + !! 3rd dimension - wavenumber + logical :: initialized = .false. !< This indicates whether the appropriate arrays have been initialized. end type mech_forcing @@ -2983,7 +2996,7 @@ end subroutine allocate_forcing_by_ref !> Conditionally allocate fields within the mechanical forcing type using !! control flags. subroutine allocate_mech_forcing_by_group(G, forces, stress, ustar, shelf, & - press, iceberg) + press, iceberg, waves, num_stk_bands) type(ocean_grid_type), intent(in) :: G !< Ocean grid structure type(mech_forcing), intent(inout) :: forces !< Forcing fields structure @@ -2992,6 +3005,8 @@ subroutine allocate_mech_forcing_by_group(G, forces, stress, ustar, shelf, & logical, optional, intent(in) :: shelf !< If present and true, allocate forces for ice-shelf logical, optional, intent(in) :: press !< If present and true, allocate p_surf and related fields logical, optional, intent(in) :: iceberg !< If present and true, allocate forces for icebergs + logical, optional, intent(in) :: waves !< If present and true, allocate wave fields + integer, optional, intent(in) :: num_stk_bands !< Number of Stokes bands to allocate ! Local variables integer :: isd, ied, jsd, jed, IsdB, IedB, JsdB, JedB @@ -3017,6 +3032,24 @@ subroutine allocate_mech_forcing_by_group(G, forces, stress, ustar, shelf, & !These fields should only on allocated when iceberg area is being passed through the coupler. call myAlloc(forces%area_berg,isd,ied,jsd,jed, iceberg) call myAlloc(forces%mass_berg,isd,ied,jsd,jed, iceberg) + + !These fields should only be allocated when waves + call myAlloc(forces%ustk0,isd,ied,jsd,jed, waves) + call myAlloc(forces%vstk0,isd,ied,jsd,jed, waves) + if (present(waves)) then; if (waves) then; if (.not.associated(forces%ustkb)) then + if (.not.(present(num_stk_bands))) call MOM_error(FATAL,"Requested to & + initialize with waves, but no waves are present.") + allocate(forces%stk_wavenumbers(num_stk_bands)) + forces%stk_wavenumbers(:) = 0.0 + allocate(forces%ustkb(isd:ied,jsd:jed,num_stk_bands)) + forces%ustkb(isd:ied,jsd:jed,:) = 0.0 + endif ; endif ; endif + + if (present(waves)) then; if (waves) then; if (.not.associated(forces%vstkb)) then + allocate(forces%vstkb(isd:ied,jsd:jed,num_stk_bands)) + forces%vstkb(isd:ied,jsd:jed,:) = 0.0 + endif ; endif ; endif + end subroutine allocate_mech_forcing_by_group diff --git a/src/core/MOM_grid.F90 b/src/core/MOM_grid.F90 index 9ca98adf71..60219c1c68 100644 --- a/src/core/MOM_grid.F90 +++ b/src/core/MOM_grid.F90 @@ -5,7 +5,7 @@ module MOM_grid use MOM_hor_index, only : hor_index_type, hor_index_init use MOM_domains, only : MOM_domain_type, get_domain_extent, compute_block_extent -use MOM_domains, only : get_global_shape, get_domain_extent_dsamp2, deallocate_MOM_domain +use MOM_domains, only : get_global_shape, deallocate_MOM_domain use MOM_error_handler, only : MOM_error, MOM_mesg, FATAL use MOM_file_parser, only : get_param, log_param, log_version, param_file_type use MOM_unit_scaling, only : unit_scale_type @@ -363,9 +363,9 @@ subroutine MOM_grid_init(G, param_file, US, HI, global_indexing, bathymetry_at_v if ( G%block(nblocks)%jed+G%block(nblocks)%jdg_offset > G%HI%jed + G%HI%jdg_offset ) & call MOM_error(FATAL, "MOM_grid_init: G%jed_bk > G%jed") - call get_domain_extent_dsamp2(G%Domain, G%HId2%isc, G%HId2%iec, G%HId2%jsc, G%HId2%jec,& - G%HId2%isd, G%HId2%ied, G%HId2%jsd, G%HId2%jed,& - G%HId2%isg, G%HId2%ieg, G%HId2%jsg, G%HId2%jeg) + call get_domain_extent(G%Domain, G%HId2%isc, G%HId2%iec, G%HId2%jsc, G%HId2%jec, & + G%HId2%isd, G%HId2%ied, G%HId2%jsd, G%HId2%jed, & + G%HId2%isg, G%HId2%ieg, G%HId2%jsg, G%HId2%jeg, coarsen=2) ! Set array sizes for fields that are discretized at tracer cell boundaries. G%HId2%IscB = G%HId2%isc ; G%HId2%JscB = G%HId2%jsc diff --git a/src/core/MOM_open_boundary.F90 b/src/core/MOM_open_boundary.F90 index c06cbfeb11..9672356bf6 100644 --- a/src/core/MOM_open_boundary.F90 +++ b/src/core/MOM_open_boundary.F90 @@ -24,7 +24,7 @@ module MOM_open_boundary use MOM_tidal_forcing, only : astro_longitudes, astro_longitudes_init, eq_phase, nodal_fu, tidal_frequency use MOM_time_manager, only : set_date, time_type, time_type_to_real, operator(-) use MOM_tracer_registry, only : tracer_type, tracer_registry_type, tracer_name_lookup -use MOM_interpolate, only : init_external_field, time_interp_extern, time_interp_external_init +use MOM_interpolate, only : init_external_field, time_interp_external, time_interp_external_init use MOM_remapping, only : remappingSchemesDoc, remappingDefaultScheme, remapping_CS use MOM_remapping, only : initialize_remapping, remapping_core_h, end_remapping use MOM_regridding, only : regridding_CS @@ -859,8 +859,8 @@ subroutine initialize_segment_data(G, OBC, PF) endif endif segment%field(m)%buffer_src(:,:,:)=0.0 - segment%field(m)%fid = init_external_field(trim(filename),& - trim(fieldname),ignore_axis_atts=.true.,threading=SINGLE_FILE) + segment%field(m)%fid = init_external_field(trim(filename), trim(fieldname), & + ignore_axis_atts=.true., threading=SINGLE_FILE) if (siz(3) > 1) then if ((index(segment%field(m)%name, 'phase') > 0) .or. (index(segment%field(m)%name, 'amp') > 0)) then ! siz(3) is constituent for tidal variables @@ -890,8 +890,8 @@ subroutine initialize_segment_data(G, OBC, PF) endif segment%field(m)%dz_src(:,:,:)=0.0 segment%field(m)%nk_src=siz(3) - segment%field(m)%fid_dz = init_external_field(trim(filename),trim(fieldname),& - ignore_axis_atts=.true.,threading=SINGLE_FILE) + segment%field(m)%fid_dz = init_external_field(trim(filename), trim(fieldname), & + ignore_axis_atts=.true., threading=SINGLE_FILE) endif else segment%field(m)%nk_src=1 @@ -3908,7 +3908,7 @@ subroutine update_OBC_segment_data(G, GV, US, OBC, tv, h, Time) tmp_buffer_in => tmp_buffer endif - call time_interp_extern(segment%field(m)%fid,Time, tmp_buffer_in) + call time_interp_external(segment%field(m)%fid,Time, tmp_buffer_in) ! NOTE: Rotation of face-points require that we skip the final value if (turns /= 0) then ! TODO: This is hardcoded for 90 degrees, and needs to be generalized. @@ -3975,7 +3975,7 @@ subroutine update_OBC_segment_data(G, GV, US, OBC, tv, h, Time) ! no dz for tidal variables if (segment%field(m)%nk_src > 1 .and.& (index(segment%field(m)%name, 'phase') .le. 0 .and. index(segment%field(m)%name, 'amp') .le. 0)) then - call time_interp_extern(segment%field(m)%fid_dz,Time, tmp_buffer_in) + call time_interp_external(segment%field(m)%fid_dz,Time, tmp_buffer_in) if (turns /= 0) then ! TODO: This is hardcoded for 90 degrees, and needs to be generalized. if (segment%is_E_or_W & diff --git a/src/framework/MOM_coms.F90 b/src/framework/MOM_coms.F90 index 13fc4df75d..c3ed3ba7b3 100644 --- a/src/framework/MOM_coms.F90 +++ b/src/framework/MOM_coms.F90 @@ -4,16 +4,16 @@ module MOM_coms ! This file is part of MOM6. See LICENSE.md for the license. +use MOM_coms_infra, only : PE_here, root_PE, num_PEs, set_rootPE, Set_PElist, Get_PElist +use MOM_coms_infra, only : broadcast, field_chksum, MOM_infra_init, MOM_infra_end +use MOM_coms_infra, only : sum_across_PEs, max_across_PEs, min_across_PEs use MOM_error_handler, only : MOM_error, MOM_mesg, FATAL, WARNING -use MOM_coms_wrapper, only : PE_here, root_PE, num_PEs, Set_PElist, Get_PElist -use MOM_coms_wrapper, only : broadcast, field_chksum, MOM_infra_init, MOM_infra_end -use MOM_coms_wrapper, only : sum_across_PEs, max_across_PEs, min_across_PEs implicit none ; private public :: PE_here, root_PE, num_PEs, MOM_infra_init, MOM_infra_end public :: broadcast, sum_across_PEs, min_across_PEs, max_across_PEs, field_chksum -public :: Set_PElist, Get_PElist +public :: set_PElist, Get_PElist, Set_rootPE public :: reproducing_sum, reproducing_sum_EFP, EFP_sum_across_PEs, EFP_list_sum_across_PEs public :: EFP_plus, EFP_minus, EFP_to_real, real_to_EFP, EFP_real_diff public :: operator(+), operator(-), assignment(=) diff --git a/src/framework/MOM_coms_wrapper.F90 b/src/framework/MOM_coms_infra.F90 similarity index 97% rename from src/framework/MOM_coms_wrapper.F90 rename to src/framework/MOM_coms_infra.F90 index 954f6da93c..f187d010a4 100644 --- a/src/framework/MOM_coms_wrapper.F90 +++ b/src/framework/MOM_coms_infra.F90 @@ -1,11 +1,12 @@ !> Thin interfaces to non-domain-oriented mpp communication subroutines -module MOM_coms_wrapper +module MOM_coms_infra ! This file is part of MOM6. See LICENSE.md for the license. use fms_mod, only : fms_end, MOM_infra_init => fms_init use memutils_mod, only : print_memuse_stats use mpp_mod, only : PE_here => mpp_pe, root_PE => mpp_root_pe, num_PEs => mpp_npes +use mpp_mod, only : set_rootPE => mpp_set_root_pe use mpp_mod, only : Set_PElist => mpp_set_current_pelist, Get_PElist => mpp_get_current_pelist use mpp_mod, only : mpp_broadcast, mpp_sync, mpp_sync_self, field_chksum => mpp_chksum use mpp_mod, only : sum_across_PEs => mpp_sum, max_across_PEs => mpp_max, min_across_PEs => mpp_min @@ -13,7 +14,7 @@ module MOM_coms_wrapper implicit none ; private public :: PE_here, root_PE, num_PEs, MOM_infra_init, MOM_infra_end, Set_PElist, Get_PElist -public :: broadcast, sum_across_PEs, min_across_PEs, max_across_PEs, field_chksum +public :: set_rootPE, broadcast, sum_across_PEs, min_across_PEs, max_across_PEs, field_chksum ! This module provides interfaces to the non-domain-oriented communication subroutines. @@ -157,4 +158,4 @@ subroutine MOM_infra_end call fms_end() end subroutine MOM_infra_end -end module MOM_coms_wrapper +end module MOM_coms_infra diff --git a/src/framework/MOM_diag_manager.F90 b/src/framework/MOM_diag_manager.F90 index 0c9f875bcd..6519ffadb6 100644 --- a/src/framework/MOM_diag_manager.F90 +++ b/src/framework/MOM_diag_manager.F90 @@ -3,21 +3,23 @@ module MOM_diag_manager ! This file is part of MOM6. See LICENSE.md for the license. -use MOM_time_manager, only : time_type -use diag_axis_mod, only : diag_axis_init, get_diag_axis_name, EAST, NORTH -use diag_data_mod, only : null_axis_id +use diag_axis_mod, only : axis_init=>diag_axis_init, get_diag_axis_name, EAST, NORTH +use diag_data_mod, only : null_axis_id use diag_manager_mod, only : diag_manager_init, diag_manager_end use diag_manager_mod, only : send_data, diag_field_add_attribute, DIAG_FIELD_NOT_FOUND use diag_manager_mod, only : register_diag_field use diag_manager_mod, only : register_static_field_fms=>register_static_field use diag_manager_mod, only : get_diag_field_id_fms=>get_diag_field_id +use MOM_domain_infra, only : MOM_domain_type, domain2d +use MOM_error_infra, only : MOM_error=>MOM_err, FATAL +use MOM_time_manager, only : time_type implicit none ; private -public diag_manager_init, diag_manager_end -public diag_axis_init, get_diag_axis_name, EAST, NORTH, null_axis_id -public send_data, diag_field_add_attribute, DIAG_FIELD_NOT_FOUND -public register_diag_field_fms, register_static_field_fms, get_diag_field_id_fms +public :: diag_manager_init, diag_manager_end +public :: diag_axis_init, get_diag_axis_name, EAST, NORTH +public :: send_data, diag_field_add_attribute, DIAG_FIELD_NOT_FOUND +public :: register_diag_field_fms, register_static_field_fms, get_diag_field_id_fms !> A wrapper for register_diag_field_array() interface register_diag_field_fms @@ -94,6 +96,64 @@ integer function register_diag_field_scalar_fms(module_name, field_name, init_ti end function register_diag_field_scalar_fms +!> diag_axis_init stores up the information for an axis that can be used for diagnostics and +!! returns an integer hadle for this axis. +integer function diag_axis_init(name, data, units, cart_name, long_name, MOM_domain, position, & + direction, edges, set_name, coarsen, null_axis) + character(len=*), intent(in) :: name !< The name of this axis + real, dimension(:), intent(in) :: data !< The array of coordinate values + character(len=*), intent(in) :: units !< The units for the axis data + character(len=*), intent(in) :: cart_name !< Cartesian axis ("X", "Y", "Z", "T", or "N" for none) + character(len=*), & + optional, intent(in) :: long_name !< The long name of this axis + type(MOM_domain_type), & + optional, intent(in) :: MOM_Domain !< A MOM_Domain that describes the decomposition + integer, optional, intent(in) :: position !< This indicates the relative position of this + !! axis. The default is CENTER, but EAST and NORTH + !! are common options. + integer, optional, intent(in) :: direction !< This indicates the direction along which this + !! axis increases: 1 for upward, -1 for downward, or + !! 0 for non-vertical axes (the default) + integer, optional, intent(in) :: edges !< The axis_id of the complementary axis that + !! describes the edges of this axis + character(len=*), & + optional, intent(in) :: set_name !< A name to use for this set of axes. + integer, optional, intent(in) :: coarsen !< An optional degree of coarsening for the grid, 1 + !! by default. + logical, optional, intent(in) :: null_axis !< If present and true, return the special null axis + !! id for use with scalars. + + integer :: coarsening ! The degree of grid coarsening + + if (present(null_axis)) then ; if (null_axis) then + ! Return the special null axis id for scalars + diag_axis_init = null_axis_id + return + endif ; endif + + if (present(MOM_domain)) then + coarsening = 1 ; if (present(coarsen)) coarsening = coarsen + if (coarsening == 1) then + diag_axis_init = axis_init(name, data, units, cart_name, long_name=long_name, & + direction=direction, set_name=set_name, edges=edges, & + domain2=MOM_domain%mpp_domain, domain_position=position) + elseif (coarsening == 2) then + diag_axis_init = axis_init(name, data, units, cart_name, long_name=long_name, & + direction=direction, set_name=set_name, edges=edges, & + domain2=MOM_domain%mpp_domain_d2, domain_position=position) + else + call MOM_error(FATAL, "diag_axis_init called with an invalid value of coarsen.") + endif + else + if (present(coarsen)) then ; if (coarsen /= 1) then + call MOM_error(FATAL, "diag_axis_init does not support grid coarsening without a MOM_domain.") + endif ; endif + diag_axis_init = axis_init(name, data, units, cart_name, long_name=long_name, & + direction=direction, set_name=set_name, edges=edges) + endif + +end function diag_axis_init + !> \namespace mom_diag_manager !! !! This module simply wraps register_diag_field() from FMS's diag_manager_mod. diff --git a/src/framework/MOM_diag_mediator.F90 b/src/framework/MOM_diag_mediator.F90 index 071585a951..108bd389e6 100644 --- a/src/framework/MOM_diag_mediator.F90 +++ b/src/framework/MOM_diag_mediator.F90 @@ -10,7 +10,7 @@ module MOM_diag_mediator use MOM_cpu_clock, only : cpu_clock_id, cpu_clock_begin, cpu_clock_end use MOM_cpu_clock, only : CLOCK_MODULE, CLOCK_ROUTINE use MOM_diag_manager, only : diag_manager_init, diag_manager_end -use MOM_diag_manager, only : diag_axis_init, get_diag_axis_name, null_axis_id +use MOM_diag_manager, only : diag_axis_init, get_diag_axis_name use MOM_diag_manager, only : send_data, diag_field_add_attribute, EAST, NORTH use MOM_diag_manager, only : register_diag_field_fms, register_static_field_fms use MOM_diag_manager, only : get_diag_field_id_fms, DIAG_FIELD_NOT_FOUND @@ -345,7 +345,7 @@ subroutine set_axes_info(G, GV, US, param_file, diag_cs, set_vertical) logical, optional, intent(in) :: set_vertical !< If true or missing, set up !! vertical axes ! Local variables - integer :: id_xq, id_yq, id_zl, id_zi, id_xh, id_yh + integer :: id_xq, id_yq, id_zl, id_zi, id_xh, id_yh, id_null integer :: id_zl_native, id_zi_native integer :: i, j, k, nz real :: zlev(GV%ke), zinter(GV%ke+1) @@ -380,39 +380,39 @@ subroutine set_axes_info(G, GV, US, param_file, diag_cs, set_vertical) if (G%symmetric) then if (diag_cs%grid_space_axes) then id_xq = diag_axis_init('iq', IaxB(G%isgB:G%iegB), 'none', 'x', & - 'q point grid-space longitude', Domain2=G%Domain%mpp_domain, domain_position=EAST) + 'q point grid-space longitude', G%Domain, position=EAST) id_yq = diag_axis_init('jq', JaxB(G%jsgB:G%jegB), 'none', 'y', & - 'q point grid space latitude', Domain2=G%Domain%mpp_domain, domain_position=NORTH) + 'q point grid space latitude', G%Domain, position=NORTH) else id_xq = diag_axis_init('xq', G%gridLonB(G%isgB:G%iegB), G%x_axis_units, 'x', & - 'q point nominal longitude', Domain2=G%Domain%mpp_domain, domain_position=EAST) + 'q point nominal longitude', G%Domain, position=EAST) id_yq = diag_axis_init('yq', G%gridLatB(G%jsgB:G%jegB), G%y_axis_units, 'y', & - 'q point nominal latitude', Domain2=G%Domain%mpp_domain, domain_position=NORTH) + 'q point nominal latitude', G%Domain, position=NORTH) endif else if (diag_cs%grid_space_axes) then id_xq = diag_axis_init('Iq', IaxB(G%isg:G%ieg), 'none', 'x', & - 'q point grid-space longitude', Domain2=G%Domain%mpp_domain, domain_position=EAST) + 'q point grid-space longitude', G%Domain, position=EAST) id_yq = diag_axis_init('Jq', JaxB(G%jsg:G%jeg), 'none', 'y', & - 'q point grid space latitude', Domain2=G%Domain%mpp_domain, domain_position=NORTH) + 'q point grid space latitude', G%Domain, position=NORTH) else id_xq = diag_axis_init('xq', G%gridLonB(G%isg:G%ieg), G%x_axis_units, 'x', & - 'q point nominal longitude', Domain2=G%Domain%mpp_domain, domain_position=EAST) + 'q point nominal longitude', G%Domain, position=EAST) id_yq = diag_axis_init('yq', G%gridLatB(G%jsg:G%jeg), G%y_axis_units, 'y', & - 'q point nominal latitude', Domain2=G%Domain%mpp_domain, domain_position=NORTH) + 'q point nominal latitude', G%Domain, position=NORTH) endif endif if (diag_cs%grid_space_axes) then id_xh = diag_axis_init('ih', iax(G%isg:G%ieg), 'none', 'x', & - 'h point grid-space longitude', Domain2=G%Domain%mpp_domain, domain_position=EAST) + 'h point grid-space longitude', G%Domain, position=EAST) id_yh = diag_axis_init('jh', jax(G%jsg:G%jeg), 'none', 'y', & - 'h point grid space latitude', Domain2=G%Domain%mpp_domain, domain_position=NORTH) + 'h point grid space latitude', G%Domain, position=NORTH) else id_xh = diag_axis_init('xh', G%gridLonT(G%isg:G%ieg), G%x_axis_units, 'x', & - 'h point nominal longitude', Domain2=G%Domain%mpp_domain) + 'h point nominal longitude', G%Domain) id_yh = diag_axis_init('yh', G%gridLatT(G%jsg:G%jeg), G%y_axis_units, 'y', & - 'h point nominal latitude', Domain2=G%Domain%mpp_domain) + 'h point nominal latitude', G%Domain) endif if (set_vert) then @@ -420,11 +420,9 @@ subroutine set_axes_info(G, GV, US, param_file, diag_cs, set_vertical) zinter(1:nz+1) = GV%sInterface(1:nz+1) zlev(1:nz) = GV%sLayer(1:nz) id_zl = diag_axis_init('zl', zlev, trim(GV%zAxisUnits), 'z', & - 'Layer '//trim(GV%zAxisLongName), & - direction=GV%direction) + 'Layer '//trim(GV%zAxisLongName), direction=GV%direction) id_zi = diag_axis_init('zi', zinter, trim(GV%zAxisUnits), 'z', & - 'Interface '//trim(GV%zAxisLongName), & - direction=GV%direction) + 'Interface '//trim(GV%zAxisLongName), direction=GV%direction) else id_zl = -1 ; id_zi = -1 endif @@ -473,8 +471,9 @@ subroutine set_axes_info(G, GV, US, param_file, diag_cs, set_vertical) call define_axes_group(diag_cs, (/ id_xh, id_yq /), diag_cs%axesCv1, & x_cell_method='mean', y_cell_method='point', is_v_point=.true.) - ! Axis group for special null axis from diag manager. (Could null_axis_id be made MOM specific?) - call define_axes_group(diag_cs, (/ null_axis_id /), diag_cs%axesNull) + ! Axis group for special null axis from diag manager. + id_null = diag_axis_init('scalar_axis', (/0./), 'none', 'N', 'none', null_axis=.true.) + call define_axes_group(diag_cs, (/ id_null /), diag_cs%axesNull) !Non-native Non-downsampled if (diag_cs%num_diag_coords>0) then @@ -602,9 +601,9 @@ subroutine set_axes_info_dsamp(G, GV, param_file, diag_cs, id_zl_native, id_zi_n do i=diag_cs%dsamp(dl)%isgB,diag_cs%dsamp(dl)%iegB; gridLonB_dsamp(i) = G%gridLonB(G%isgB+dl*i); enddo do j=diag_cs%dsamp(dl)%jsgB,diag_cs%dsamp(dl)%jegB; gridLatB_dsamp(j) = G%gridLatB(G%jsgB+dl*j); enddo id_xq = diag_axis_init('xq', gridLonB_dsamp, G%x_axis_units, 'x', & - 'q point nominal longitude', Domain2=G%Domain%mpp_domain_d2) + 'q point nominal longitude', G%Domain, coarsen=2) id_yq = diag_axis_init('yq', gridLatB_dsamp, G%y_axis_units, 'y', & - 'q point nominal latitude', Domain2=G%Domain%mpp_domain_d2) + 'q point nominal latitude', G%Domain, coarsen=2) deallocate(gridLonB_dsamp,gridLatB_dsamp) else allocate(gridLonB_dsamp(diag_cs%dsamp(dl)%isg:diag_cs%dsamp(dl)%ieg)) @@ -612,9 +611,9 @@ subroutine set_axes_info_dsamp(G, GV, param_file, diag_cs, id_zl_native, id_zi_n do i=diag_cs%dsamp(dl)%isg,diag_cs%dsamp(dl)%ieg; gridLonB_dsamp(i) = G%gridLonB(G%isg+dl*i-2); enddo do j=diag_cs%dsamp(dl)%jsg,diag_cs%dsamp(dl)%jeg; gridLatB_dsamp(j) = G%gridLatB(G%jsg+dl*j-2); enddo id_xq = diag_axis_init('xq', gridLonB_dsamp, G%x_axis_units, 'x', & - 'q point nominal longitude', Domain2=G%Domain%mpp_domain_d2) + 'q point nominal longitude', G%Domain, coarsen=2) id_yq = diag_axis_init('yq', gridLatB_dsamp, G%y_axis_units, 'y', & - 'q point nominal latitude', Domain2=G%Domain%mpp_domain_d2) + 'q point nominal latitude', G%Domain, coarsen=2) deallocate(gridLonB_dsamp,gridLatB_dsamp) endif @@ -623,9 +622,9 @@ subroutine set_axes_info_dsamp(G, GV, param_file, diag_cs, id_zl_native, id_zi_n do i=diag_cs%dsamp(dl)%isg,diag_cs%dsamp(dl)%ieg; gridLonT_dsamp(i) = G%gridLonT(G%isg+dl*i-2); enddo do j=diag_cs%dsamp(dl)%jsg,diag_cs%dsamp(dl)%jeg; gridLatT_dsamp(j) = G%gridLatT(G%jsg+dl*j-2); enddo id_xh = diag_axis_init('xh', gridLonT_dsamp, G%x_axis_units, 'x', & - 'h point nominal longitude', Domain2=G%Domain%mpp_domain_d2) + 'h point nominal longitude', G%Domain, coarsen=2) id_yh = diag_axis_init('yh', gridLatT_dsamp, G%y_axis_units, 'y', & - 'h point nominal latitude', Domain2=G%Domain%mpp_domain_d2) + 'h point nominal latitude', G%Domain, coarsen=2) deallocate(gridLonT_dsamp,gridLatT_dsamp) diff --git a/src/framework/MOM_domain_infra.F90 b/src/framework/MOM_domain_infra.F90 new file mode 100644 index 0000000000..482e01871f --- /dev/null +++ b/src/framework/MOM_domain_infra.F90 @@ -0,0 +1,1817 @@ +!> Describes the decomposed MOM domain and has routines for communications across PEs +module MOM_domain_infra + +! This file is part of MOM6. See LICENSE.md for the license. + +use MOM_coms_infra, only : PE_here, root_PE, num_PEs +use MOM_cpu_clock, only : cpu_clock_begin, cpu_clock_end +use MOM_error_infra, only : MOM_error=>MOM_err, NOTE, WARNING, FATAL + +use mpp_domains_mod, only : MOM_define_layout => mpp_define_layout, mpp_get_boundary +use mpp_domains_mod, only : MOM_define_io_domain => mpp_define_io_domain +use mpp_domains_mod, only : MOM_define_domain => mpp_define_domains +use mpp_domains_mod, only : domain2D, domain1D, mpp_get_data_domain, mpp_get_domain_components +use mpp_domains_mod, only : mpp_get_compute_domain, mpp_get_global_domain +use mpp_domains_mod, only : mpp_get_domain_extents, mpp_deallocate_domain +use mpp_domains_mod, only : mpp_update_domains, global_field_sum => mpp_global_sum +use mpp_domains_mod, only : mpp_start_update_domains, mpp_complete_update_domains +use mpp_domains_mod, only : mpp_create_group_update, mpp_do_group_update +use mpp_domains_mod, only : group_pass_type => mpp_group_update_type +use mpp_domains_mod, only : mpp_reset_group_update_field, mpp_group_update_initialized +use mpp_domains_mod, only : mpp_start_group_update, mpp_complete_group_update +use mpp_domains_mod, only : compute_block_extent => mpp_compute_block_extent +use mpp_domains_mod, only : mpp_redistribute +use mpp_domains_mod, only : global_field => mpp_global_field +use mpp_domains_mod, only : broadcast_domain => mpp_broadcast_domain +use mpp_domains_mod, only : AGRID, BGRID_NE, CGRID_NE, SCALAR_PAIR, BITWISE_EXACT_SUM +use mpp_domains_mod, only : CYCLIC_GLOBAL_DOMAIN, FOLD_NORTH_EDGE +use mpp_domains_mod, only : To_East => WUPDATE, To_West => EUPDATE, Omit_Corners => EDGEUPDATE +use mpp_domains_mod, only : To_North => SUPDATE, To_South => NUPDATE +use mpp_domains_mod, only : CENTER, CORNER, NORTH_FACE => NORTH, EAST_FACE => EAST +use fms_io_mod, only : file_exist, parse_mask_table +use fms_affinity_mod, only : fms_affinity_init, fms_affinity_set, fms_affinity_get + +implicit none ; private + +public :: MOM_define_domain, MOM_define_layout +public :: create_MOM_domain, clone_MOM_domain, get_domain_components +public :: deallocate_MOM_domain +public :: get_domain_extent +public :: pass_var, pass_vector, fill_symmetric_edges, global_field_sum +public :: pass_var_start, pass_var_complete +public :: pass_vector_start, pass_vector_complete +public :: AGRID, BGRID_NE, CGRID_NE, SCALAR_PAIR, BITWISE_EXACT_SUM +public :: CORNER, CENTER, NORTH_FACE, EAST_FACE +public :: To_East, To_West, To_North, To_South, To_All, Omit_Corners +public :: create_group_pass, do_group_pass, group_pass_type +public :: start_group_pass, complete_group_pass +public :: compute_block_extent, get_global_shape +public :: global_field, redistribute_array, broadcast_domain +public :: MOM_thread_affinity_set, set_MOM_thread_affinity +public :: get_simple_array_i_ind, get_simple_array_j_ind +public :: domain2D, domain1D + +!> Do a halo update on an array +interface pass_var + module procedure pass_var_3d, pass_var_2d +end interface pass_var + +!> Do a halo update on a pair of arrays representing the two components of a vector +interface pass_vector + module procedure pass_vector_3d, pass_vector_2d +end interface pass_vector + +!> Initiate a non-blocking halo update on an array +interface pass_var_start + module procedure pass_var_start_3d, pass_var_start_2d +end interface pass_var_start + +!> Complete a non-blocking halo update on an array +interface pass_var_complete + module procedure pass_var_complete_3d, pass_var_complete_2d +end interface pass_var_complete + +!> Initiate a halo update on a pair of arrays representing the two components of a vector +interface pass_vector_start + module procedure pass_vector_start_3d, pass_vector_start_2d +end interface pass_vector_start + +!> Complete a halo update on a pair of arrays representing the two components of a vector +interface pass_vector_complete + module procedure pass_vector_complete_3d, pass_vector_complete_2d +end interface pass_vector_complete + +!> Set up a group of halo updates +interface create_group_pass + module procedure create_var_group_pass_2d + module procedure create_var_group_pass_3d + module procedure create_vector_group_pass_2d + module procedure create_vector_group_pass_3d +end interface create_group_pass + +!> Do a set of halo updates that fill in the values at the duplicated edges +!! of a staggered symmetric memory domain +interface fill_symmetric_edges + module procedure fill_vector_symmetric_edges_2d !, fill_vector_symmetric_edges_3d +! module procedure fill_scalar_symmetric_edges_2d, fill_scalar_symmetric_edges_3d +end interface fill_symmetric_edges + +!> Pass an array from one MOM domain to another +interface redistribute_array + module procedure redistribute_array_3d, redistribute_array_2d +end interface redistribute_array + +!> Copy one MOM_domain_type into another +interface clone_MOM_domain + module procedure clone_MD_to_MD, clone_MD_to_d2D +end interface clone_MOM_domain + +!> Extract the 1-d domain components from a MOM_domain or domain2d +interface get_domain_components + module procedure get_domain_components_MD, get_domain_components_d2D +end interface get_domain_components + +!> The MOM_domain_type contains information about the domain decomposition. +type, public :: MOM_domain_type + character(len=64) :: name !< The name of this domain + type(domain2D), pointer :: mpp_domain => NULL() !< The FMS domain with halos + !! on this processor, centered at h points. + type(domain2D), pointer :: mpp_domain_d2 => NULL() !< A coarse FMS domain with halos + !! on this processor, centered at h points. + integer :: niglobal !< The total horizontal i-domain size. + integer :: njglobal !< The total horizontal j-domain size. + integer :: nihalo !< The i-halo size in memory. + integer :: njhalo !< The j-halo size in memory. + logical :: symmetric !< True if symmetric memory is used with this domain. + logical :: nonblocking_updates !< If true, non-blocking halo updates are + !! allowed. The default is .false. (for now). + logical :: thin_halo_updates !< If true, optional arguments may be used to + !! specify the width of the halos that are + !! updated with each call. + integer :: layout(2) !< This domain's processor layout. This is + !! saved to enable the construction of related + !! new domains with different resolutions or + !! other properties. + integer :: io_layout(2) !< The IO-layout used with this domain. + integer :: X_FLAGS !< Flag that specifies the properties of the + !! domain in the i-direction in a define_domain call. + integer :: Y_FLAGS !< Flag that specifies the properties of the + !! domain in the j-direction in a define_domain call. + logical, pointer :: maskmap(:,:) => NULL() !< A pointer to an array indicating + !! which logical processors are actually used for + !! the ocean code. The other logical processors + !! would be contain only land points and are not + !! assigned to actual processors. This need not be + !! assigned if all logical processors are used. +end type MOM_domain_type + +integer, parameter :: To_All = To_East + To_West + To_North + To_South !< A flag for passing in all directions + +contains + +!> pass_var_3d does a halo update for a three-dimensional array. +subroutine pass_var_3d(array, MOM_dom, sideflag, complete, position, halo, & + clock) + real, dimension(:,:,:), intent(inout) :: array !< The array which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: sideflag !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH. For example, TO_EAST sends the data to the processor to the east, + !! sothe halos on the western side are filled. TO_ALL is the default if sideflag is omitted. + logical, optional, intent(in) :: complete !< An optional argument indicating whether the + !! halo updates should be completed before + !! progress resumes. Omitting complete is the + !! same as setting complete to .true. + integer, optional, intent(in) :: position !< An optional argument indicating the position. + !! This is CENTER by default and is often CORNER, + !! but could also be EAST_FACE or NORTH_FACE. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + + integer :: dirflag + logical :: block_til_complete + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + dirflag = To_All ! 60 + if (present(sideflag)) then ; if (sideflag > 0) dirflag = sideflag ; endif + block_til_complete = .true. + if (present(complete)) block_til_complete = complete + + if (present(halo) .and. MOM_dom%thin_halo_updates) then + call mpp_update_domains(array, MOM_dom%mpp_domain, flags=dirflag, & + complete=block_til_complete, position=position, & + whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) + else + call mpp_update_domains(array, MOM_dom%mpp_domain, flags=dirflag, & + complete=block_til_complete, position=position) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine pass_var_3d + +!> pass_var_2d does a halo update for a two-dimensional array. +subroutine pass_var_2d(array, MOM_dom, sideflag, complete, position, halo, inner_halo, clock) + real, dimension(:,:), intent(inout) :: array !< The array which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be sent. + integer, optional, intent(in) :: sideflag !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH. For example, TO_EAST sends the data to the processor to the east, + !! so the halos on the western side are filled. TO_ALL is the default if sideflag is omitted. + logical, optional, intent(in) :: complete !< An optional argument indicating whether the + !! halo updates should be completed before + !! progress resumes. Omitting complete is the + !! same as setting complete to .true. + integer, optional, intent(in) :: position !< An optional argument indicating the position. + !! This is CENTER by default and is often CORNER, + !! but could also be EAST_FACE or NORTH_FACE. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full halo + !! by default. + integer, optional, intent(in) :: inner_halo !< The size of an inner halo to avoid updating, + !! or 0 to avoid updating symmetric memory + !! computational domain points. Setting this >=0 + !! also enforces that complete=.true. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + + ! Local variables + real, allocatable, dimension(:,:) :: tmp + integer :: pos, i_halo, j_halo + integer :: isc, iec, jsc, jec, isd, ied, jsd, jed, IscB, IecB, JscB, JecB + integer :: inner, i, j, isfw, iefw, isfe, iefe, jsfs, jefs, jsfn, jefn + integer :: dirflag + logical :: block_til_complete + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + dirflag = To_All ! 60 + if (present(sideflag)) then ; if (sideflag > 0) dirflag = sideflag ; endif + block_til_complete = .true. ; if (present(complete)) block_til_complete = complete + pos = CENTER ; if (present(position)) pos = position + + if (present(inner_halo)) then ; if (inner_halo >= 0) then + ! Store the original values. + allocate(tmp(size(array,1), size(array,2))) + tmp(:,:) = array(:,:) + block_til_complete = .true. + endif ; endif + + if (present(halo) .and. MOM_dom%thin_halo_updates) then + call mpp_update_domains(array, MOM_dom%mpp_domain, flags=dirflag, & + complete=block_til_complete, position=position, & + whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) + else + call mpp_update_domains(array, MOM_dom%mpp_domain, flags=dirflag, & + complete=block_til_complete, position=position) + endif + + if (present(inner_halo)) then ; if (inner_halo >= 0) then + call mpp_get_compute_domain(MOM_dom%mpp_domain, isc, iec, jsc, jec) + call mpp_get_data_domain(MOM_dom%mpp_domain, isd, ied, jsd, jed) + ! Convert to local indices for arrays starting at 1. + isc = isc - (isd-1) ; iec = iec - (isd-1) ; ied = ied - (isd-1) ; isd = 1 + jsc = jsc - (jsd-1) ; jec = jec - (jsd-1) ; jed = jed - (jsd-1) ; jsd = 1 + i_halo = min(inner_halo, isc-1) ; j_halo = min(inner_halo, jsc-1) + + ! Figure out the array index extents of the eastern, western, northern and southern regions to copy. + if (pos == CENTER) then + if (size(array,1) == ied) then + isfw = isc - i_halo ; iefw = isc ; isfe = iec ; iefe = iec + i_halo + else ; call MOM_error(FATAL, "pass_var_2d: wrong i-size for CENTER array.") ; endif + if (size(array,2) == jed) then + isfw = isc - i_halo ; iefw = isc ; isfe = iec ; iefe = iec + i_halo + else ; call MOM_error(FATAL, "pass_var_2d: wrong j-size for CENTER array.") ; endif + elseif (pos == CORNER) then + if (size(array,1) == ied) then + isfw = max(isc - (i_halo+1), 1) ; iefw = isc ; isfe = iec ; iefe = iec + i_halo + elseif (size(array,1) == ied+1) then + isfw = isc - i_halo ; iefw = isc+1 ; isfe = iec+1 ; iefe = min(iec + 1 + i_halo, ied+1) + else ; call MOM_error(FATAL, "pass_var_2d: wrong i-size for CORNER array.") ; endif + if (size(array,2) == jed) then + jsfs = max(jsc - (j_halo+1), 1) ; jefs = jsc ; jsfn = jec ; jefn = jec + j_halo + elseif (size(array,2) == jed+1) then + jsfs = jsc - j_halo ; jefs = jsc+1 ; jsfn = jec+1 ; jefn = min(jec + 1 + j_halo, jed+1) + else ; call MOM_error(FATAL, "pass_var_2d: wrong j-size for CORNER array.") ; endif + elseif (pos == NORTH_FACE) then + if (size(array,1) == ied) then + isfw = isc - i_halo ; iefw = isc ; isfe = iec ; iefe = iec + i_halo + else ; call MOM_error(FATAL, "pass_var_2d: wrong i-size for NORTH_FACE array.") ; endif + if (size(array,2) == jed) then + jsfs = max(jsc - (j_halo+1), 1) ; jefs = jsc ; jsfn = jec ; jefn = jec + j_halo + elseif (size(array,2) == jed+1) then + jsfs = jsc - j_halo ; jefs = jsc+1 ; jsfn = jec+1 ; jefn = min(jec + 1 + j_halo, jed+1) + else ; call MOM_error(FATAL, "pass_var_2d: wrong j-size for NORTH_FACE array.") ; endif + elseif (pos == EAST_FACE) then + if (size(array,1) == ied) then + isfw = max(isc - (i_halo+1), 1) ; iefw = isc ; isfe = iec ; iefe = iec + i_halo + elseif (size(array,1) == ied+1) then + isfw = isc - i_halo ; iefw = isc+1 ; isfe = iec+1 ; iefe = min(iec + 1 + i_halo, ied+1) + else ; call MOM_error(FATAL, "pass_var_2d: wrong i-size for EAST_FACE array.") ; endif + if (size(array,2) == jed) then + isfw = isc - i_halo ; iefw = isc ; isfe = iec ; iefe = iec + i_halo + else ; call MOM_error(FATAL, "pass_var_2d: wrong j-size for EAST_FACE array.") ; endif + else + call MOM_error(FATAL, "pass_var_2d: Unrecognized position") + endif + + ! Copy back the stored inner halo points + do j=jsfs,jefn ; do i=isfw,iefw ; array(i,j) = tmp(i,j) ; enddo ; enddo + do j=jsfs,jefn ; do i=isfe,iefe ; array(i,j) = tmp(i,j) ; enddo ; enddo + do j=jsfs,jefs ; do i=isfw,iefe ; array(i,j) = tmp(i,j) ; enddo ; enddo + do j=jsfn,jefn ; do i=isfw,iefe ; array(i,j) = tmp(i,j) ; enddo ; enddo + + deallocate(tmp) + endif ; endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine pass_var_2d + +!> pass_var_start_2d starts a halo update for a two-dimensional array. +function pass_var_start_2d(array, MOM_dom, sideflag, position, complete, halo, & + clock) + real, dimension(:,:), intent(inout) :: array !< The array which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: sideflag !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH. For example, TO_EAST sends the data to the processor to the east, + !! so the halos on the western side are filled. TO_ALL is the default if sideflag is omitted. + integer, optional, intent(in) :: position !< An optional argument indicating the position. + !! This is CENTER by default and is often CORNER, + !! but could also be EAST_FACE or NORTH_FACE. + logical, optional, intent(in) :: complete !< An optional argument indicating whether the + !! halo updates should be completed before + !! progress resumes. Omitting complete is the + !! same as setting complete to .true. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + integer :: pass_var_start_2d !0) call cpu_clock_begin(clock) ; endif + + dirflag = To_All ! 60 + if (present(sideflag)) then ; if (sideflag > 0) dirflag = sideflag ; endif + + if (present(halo) .and. MOM_dom%thin_halo_updates) then + pass_var_start_2d = mpp_start_update_domains(array, MOM_dom%mpp_domain, & + flags=dirflag, position=position, & + whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) + else + pass_var_start_2d = mpp_start_update_domains(array, MOM_dom%mpp_domain, & + flags=dirflag, position=position) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end function pass_var_start_2d + +!> pass_var_start_3d starts a halo update for a three-dimensional array. +function pass_var_start_3d(array, MOM_dom, sideflag, position, complete, halo, & + clock) + real, dimension(:,:,:), intent(inout) :: array !< The array which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: sideflag !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH. For example, TO_EAST sends the data to the processor to the east, + !! so the halos on the western side are filled. TO_ALL is the default if sideflag is omitted. + integer, optional, intent(in) :: position !< An optional argument indicating the position. + !! This is CENTER by default and is often CORNER, + !! but could also be EAST_FACE or NORTH_FACE. + logical, optional, intent(in) :: complete !< An optional argument indicating whether the + !! halo updates should be completed before + !! progress resumes. Omitting complete is the + !! same as setting complete to .true. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + integer :: pass_var_start_3d !< The integer index for this update. + + integer :: dirflag + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + dirflag = To_All ! 60 + if (present(sideflag)) then ; if (sideflag > 0) dirflag = sideflag ; endif + + if (present(halo) .and. MOM_dom%thin_halo_updates) then + pass_var_start_3d = mpp_start_update_domains(array, MOM_dom%mpp_domain, & + flags=dirflag, position=position, & + whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) + else + pass_var_start_3d = mpp_start_update_domains(array, MOM_dom%mpp_domain, & + flags=dirflag, position=position) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end function pass_var_start_3d + +!> pass_var_complete_2d completes a halo update for a two-dimensional array. +subroutine pass_var_complete_2d(id_update, array, MOM_dom, sideflag, position, halo, & + clock) + integer, intent(in) :: id_update !< The integer id of this update which has + !! been returned from a previous call to + !! pass_var_start. + real, dimension(:,:), intent(inout) :: array !< The array which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: sideflag !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH. For example, TO_EAST sends the data to the processor to the east, + !! so the halos on the western side are filled. TO_ALL is the default if sideflag is omitted. + integer, optional, intent(in) :: position !< An optional argument indicating the position. + !! This is CENTER by default and is often CORNER, + !! but could also be EAST_FACE or NORTH_FACE. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + + integer :: dirflag + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + dirflag = To_All ! 60 + if (present(sideflag)) then ; if (sideflag > 0) dirflag = sideflag ; endif + + if (present(halo) .and. MOM_dom%thin_halo_updates) then + call mpp_complete_update_domains(id_update, array, MOM_dom%mpp_domain, & + flags=dirflag, position=position, & + whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) + else + call mpp_complete_update_domains(id_update, array, MOM_dom%mpp_domain, & + flags=dirflag, position=position) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine pass_var_complete_2d + +!> pass_var_complete_3d completes a halo update for a three-dimensional array. +subroutine pass_var_complete_3d(id_update, array, MOM_dom, sideflag, position, halo, & + clock) + integer, intent(in) :: id_update !< The integer id of this update which has + !! been returned from a previous call to + !! pass_var_start. + real, dimension(:,:,:), intent(inout) :: array !< The array which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: sideflag !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH. For example, TO_EAST sends the data to the processor to the east, + !! so the halos on the western side are filled. TO_ALL is the default if sideflag is omitted. + integer, optional, intent(in) :: position !< An optional argument indicating the position. + !! This is CENTER by default and is often CORNER, + !! but could also be EAST_FACE or NORTH_FACE. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + + integer :: dirflag + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + dirflag = To_All ! 60 + if (present(sideflag)) then ; if (sideflag > 0) dirflag = sideflag ; endif + + if (present(halo) .and. MOM_dom%thin_halo_updates) then + call mpp_complete_update_domains(id_update, array, MOM_dom%mpp_domain, & + flags=dirflag, position=position, & + whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) + else + call mpp_complete_update_domains(id_update, array, MOM_dom%mpp_domain, & + flags=dirflag, position=position) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine pass_var_complete_3d + +!> pass_vector_2d does a halo update for a pair of two-dimensional arrays +!! representing the compontents of a two-dimensional horizontal vector. +subroutine pass_vector_2d(u_cmpt, v_cmpt, MOM_dom, direction, stagger, complete, halo, & + clock) + real, dimension(:,:), intent(inout) :: u_cmpt !< The nominal zonal (u) component of the vector + !! pair which is having its halos points + !! exchanged. + real, dimension(:,:), intent(inout) :: v_cmpt !< The nominal meridional (v) component of the + !! vector pair which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: direction !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH, possibly plus SCALAR_PAIR if these are paired non-directional + !! scalars discretized at the typical vector component locations. For example, TO_EAST sends + !! the data to the processor to the east, so the halos on the western side are filled. TO_ALL + !! is the default if omitted. + integer, optional, intent(in) :: stagger !< An optional flag, which may be one of A_GRID, + !! BGRID_NE, or CGRID_NE, indicating where the two components of the vector are + !! discretized. Omitting stagger is the same as setting it to CGRID_NE. + logical, optional, intent(in) :: complete !< An optional argument indicating whether the + !! halo updates should be completed before progress resumes. + !! Omitting complete is the same as setting complete to .true. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + + ! Local variables + integer :: stagger_local + integer :: dirflag + logical :: block_til_complete + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + stagger_local = CGRID_NE ! Default value for type of grid + if (present(stagger)) stagger_local = stagger + + dirflag = To_All ! 60 + if (present(direction)) then ; if (direction > 0) dirflag = direction ; endif + block_til_complete = .true. + if (present(complete)) block_til_complete = complete + + if (present(halo) .and. MOM_dom%thin_halo_updates) then + call mpp_update_domains(u_cmpt, v_cmpt, MOM_dom%mpp_domain, flags=dirflag, & + gridtype=stagger_local, complete = block_til_complete, & + whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) + else + call mpp_update_domains(u_cmpt, v_cmpt, MOM_dom%mpp_domain, flags=dirflag, & + gridtype=stagger_local, complete = block_til_complete) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine pass_vector_2d + +!> fill_vector_symmetric_edges_2d does an usual set of halo updates that only +!! fill in the values at the edge of a pair of symmetric memory two-dimensional +!! arrays representing the compontents of a two-dimensional horizontal vector. +!! If symmetric memory is not being used, this subroutine does nothing except to +!! possibly turn optional cpu clocks on or off. +subroutine fill_vector_symmetric_edges_2d(u_cmpt, v_cmpt, MOM_dom, stagger, scalar, & + clock) + real, dimension(:,:), intent(inout) :: u_cmpt !< The nominal zonal (u) component of the vector + !! pair which is having its halos points + !! exchanged. + real, dimension(:,:), intent(inout) :: v_cmpt !< The nominal meridional (v) component of the + !! vector pair which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: stagger !< An optional flag, which may be one of A_GRID, + !! BGRID_NE, or CGRID_NE, indicating where the two components of the vector are + !! discretized. Omitting stagger is the same as setting it to CGRID_NE. + logical, optional, intent(in) :: scalar !< An optional argument indicating whether. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + + ! Local variables + integer :: stagger_local + integer :: dirflag + integer :: i, j, isc, iec, jsc, jec, isd, ied, jsd, jed, IscB, IecB, JscB, JecB + real, allocatable, dimension(:) :: sbuff_x, sbuff_y, wbuff_x, wbuff_y + logical :: block_til_complete + + if (.not. MOM_dom%symmetric) then + return + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + stagger_local = CGRID_NE ! Default value for type of grid + if (present(stagger)) stagger_local = stagger + + if (.not.(stagger_local == CGRID_NE .or. stagger_local == BGRID_NE)) return + + call mpp_get_compute_domain(MOM_dom%mpp_domain, isc, iec, jsc, jec) + call mpp_get_data_domain(MOM_dom%mpp_domain, isd, ied, jsd, jed) + + ! Adjust isc, etc., to account for the fact that the input arrays indices all + ! start at 1 (and are effectively on a SW grid!). + isc = isc - (isd-1) ; iec = iec - (isd-1) + jsc = jsc - (jsd-1) ; jec = jec - (jsd-1) + IscB = isc ; IecB = iec+1 ; JscB = jsc ; JecB = jec+1 + + dirflag = To_All ! 60 + if (present(scalar)) then ; if (scalar) dirflag = To_All+SCALAR_PAIR ; endif + + if (stagger_local == CGRID_NE) then + allocate(wbuff_x(jsc:jec)) ; allocate(sbuff_y(isc:iec)) + wbuff_x(:) = 0.0 ; sbuff_y(:) = 0.0 + call mpp_get_boundary(u_cmpt, v_cmpt, MOM_dom%mpp_domain, flags=dirflag, & + wbufferx=wbuff_x, sbuffery=sbuff_y, & + gridtype=CGRID_NE) + do i=isc,iec + v_cmpt(i,JscB) = sbuff_y(i) + enddo + do j=jsc,jec + u_cmpt(IscB,j) = wbuff_x(j) + enddo + deallocate(wbuff_x) ; deallocate(sbuff_y) + elseif (stagger_local == BGRID_NE) then + allocate(wbuff_x(JscB:JecB)) ; allocate(sbuff_x(IscB:IecB)) + allocate(wbuff_y(JscB:JecB)) ; allocate(sbuff_y(IscB:IecB)) + wbuff_x(:) = 0.0 ; wbuff_y(:) = 0.0 ; sbuff_x(:) = 0.0 ; sbuff_y(:) = 0.0 + call mpp_get_boundary(u_cmpt, v_cmpt, MOM_dom%mpp_domain, flags=dirflag, & + wbufferx=wbuff_x, sbufferx=sbuff_x, & + wbuffery=wbuff_y, sbuffery=sbuff_y, & + gridtype=BGRID_NE) + do I=IscB,IecB + u_cmpt(I,JscB) = sbuff_x(I) ; v_cmpt(I,JscB) = sbuff_y(I) + enddo + do J=JscB,JecB + u_cmpt(IscB,J) = wbuff_x(J) ; v_cmpt(IscB,J) = wbuff_y(J) + enddo + deallocate(wbuff_x) ; deallocate(sbuff_x) + deallocate(wbuff_y) ; deallocate(sbuff_y) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine fill_vector_symmetric_edges_2d + +!> pass_vector_3d does a halo update for a pair of three-dimensional arrays +!! representing the compontents of a three-dimensional horizontal vector. +subroutine pass_vector_3d(u_cmpt, v_cmpt, MOM_dom, direction, stagger, complete, halo, & + clock) + real, dimension(:,:,:), intent(inout) :: u_cmpt !< The nominal zonal (u) component of the vector + !! pair which is having its halos points + !! exchanged. + real, dimension(:,:,:), intent(inout) :: v_cmpt !< The nominal meridional (v) component of the + !! vector pair which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: direction !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH, possibly plus SCALAR_PAIR if these are paired non-directional + !! scalars discretized at the typical vector component locations. For example, TO_EAST sends + !! the data to the processor to the east, so the halos on the western side are filled. TO_ALL + !! is the default if omitted. + integer, optional, intent(in) :: stagger !< An optional flag, which may be one of A_GRID, + !! BGRID_NE, or CGRID_NE, indicating where the two components of the vector are + !! discretized. Omitting stagger is the same as setting it to CGRID_NE. + logical, optional, intent(in) :: complete !< An optional argument indicating whether the + !! halo updates should be completed before progress resumes. + !! Omitting complete is the same as setting complete to .true. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + + ! Local variables + integer :: stagger_local + integer :: dirflag + logical :: block_til_complete + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + stagger_local = CGRID_NE ! Default value for type of grid + if (present(stagger)) stagger_local = stagger + + dirflag = To_All ! 60 + if (present(direction)) then ; if (direction > 0) dirflag = direction ; endif + block_til_complete = .true. + if (present(complete)) block_til_complete = complete + + if (present(halo) .and. MOM_dom%thin_halo_updates) then + call mpp_update_domains(u_cmpt, v_cmpt, MOM_dom%mpp_domain, flags=dirflag, & + gridtype=stagger_local, complete = block_til_complete, & + whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) + else + call mpp_update_domains(u_cmpt, v_cmpt, MOM_dom%mpp_domain, flags=dirflag, & + gridtype=stagger_local, complete = block_til_complete) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine pass_vector_3d + +!> pass_vector_start_2d starts a halo update for a pair of two-dimensional arrays +!! representing the compontents of a two-dimensional horizontal vector. +function pass_vector_start_2d(u_cmpt, v_cmpt, MOM_dom, direction, stagger, complete, halo, & + clock) + real, dimension(:,:), intent(inout) :: u_cmpt !< The nominal zonal (u) component of the vector + !! pair which is having its halos points + !! exchanged. + real, dimension(:,:), intent(inout) :: v_cmpt !< The nominal meridional (v) component of the + !! vector pair which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: direction !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH, possibly plus SCALAR_PAIR if these are paired non-directional + !! scalars discretized at the typical vector component locations. For example, TO_EAST sends + !! the data to the processor to the east, so the halos on the western side are filled. TO_ALL + !! is the default if omitted. + integer, optional, intent(in) :: stagger !< An optional flag, which may be one of A_GRID, + !! BGRID_NE, or CGRID_NE, indicating where the two components of the vector are + !! discretized. Omitting stagger is the same as setting it to CGRID_NE. + logical, optional, intent(in) :: complete !< An optional argument indicating whether the + !! halo updates should be completed before progress resumes. + !! Omitting complete is the same as setting complete to .true. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + integer :: pass_vector_start_2d !< The integer index for this + !! update. + + ! Local variables + integer :: stagger_local + integer :: dirflag + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + stagger_local = CGRID_NE ! Default value for type of grid + if (present(stagger)) stagger_local = stagger + + dirflag = To_All ! 60 + if (present(direction)) then ; if (direction > 0) dirflag = direction ; endif + + if (present(halo) .and. MOM_dom%thin_halo_updates) then + pass_vector_start_2d = mpp_start_update_domains(u_cmpt, v_cmpt, & + MOM_dom%mpp_domain, flags=dirflag, gridtype=stagger_local, & + whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) + else + pass_vector_start_2d = mpp_start_update_domains(u_cmpt, v_cmpt, & + MOM_dom%mpp_domain, flags=dirflag, gridtype=stagger_local) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end function pass_vector_start_2d + +!> pass_vector_start_3d starts a halo update for a pair of three-dimensional arrays +!! representing the compontents of a three-dimensional horizontal vector. +function pass_vector_start_3d(u_cmpt, v_cmpt, MOM_dom, direction, stagger, complete, halo, & + clock) + real, dimension(:,:,:), intent(inout) :: u_cmpt !< The nominal zonal (u) component of the vector + !! pair which is having its halos points + !! exchanged. + real, dimension(:,:,:), intent(inout) :: v_cmpt !< The nominal meridional (v) component of the + !! vector pair which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: direction !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH, possibly plus SCALAR_PAIR if these are paired non-directional + !! scalars discretized at the typical vector component locations. For example, TO_EAST sends + !! the data to the processor to the east, so the halos on the western side are filled. TO_ALL + !! is the default if omitted. + integer, optional, intent(in) :: stagger !< An optional flag, which may be one of A_GRID, + !! BGRID_NE, or CGRID_NE, indicating where the two components of the vector are + !! discretized. Omitting stagger is the same as setting it to CGRID_NE. + logical, optional, intent(in) :: complete !< An optional argument indicating whether the + !! halo updates should be completed before progress resumes. + !! Omitting complete is the same as setting complete to .true. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + integer :: pass_vector_start_3d !< The integer index for this + !! update. + ! Local variables + integer :: stagger_local + integer :: dirflag + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + stagger_local = CGRID_NE ! Default value for type of grid + if (present(stagger)) stagger_local = stagger + + dirflag = To_All ! 60 + if (present(direction)) then ; if (direction > 0) dirflag = direction ; endif + + if (present(halo) .and. MOM_dom%thin_halo_updates) then + pass_vector_start_3d = mpp_start_update_domains(u_cmpt, v_cmpt, & + MOM_dom%mpp_domain, flags=dirflag, gridtype=stagger_local, & + whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) + else + pass_vector_start_3d = mpp_start_update_domains(u_cmpt, v_cmpt, & + MOM_dom%mpp_domain, flags=dirflag, gridtype=stagger_local) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end function pass_vector_start_3d + +!> pass_vector_complete_2d completes a halo update for a pair of two-dimensional arrays +!! representing the compontents of a two-dimensional horizontal vector. +subroutine pass_vector_complete_2d(id_update, u_cmpt, v_cmpt, MOM_dom, direction, stagger, halo, & + clock) + integer, intent(in) :: id_update !< The integer id of this update which has been + !! returned from a previous call to + !! pass_var_start. + real, dimension(:,:), intent(inout) :: u_cmpt !< The nominal zonal (u) component of the vector + !! pair which is having its halos points + !! exchanged. + real, dimension(:,:), intent(inout) :: v_cmpt !< The nominal meridional (v) component of the + !! vector pair which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: direction !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH, possibly plus SCALAR_PAIR if these are paired non-directional + !! scalars discretized at the typical vector component locations. For example, TO_EAST sends + !! the data to the processor to the east, so the halos on the western side are filled. TO_ALL + !! is the default if omitted. + integer, optional, intent(in) :: stagger !< An optional flag, which may be one of A_GRID, + !! BGRID_NE, or CGRID_NE, indicating where the two components of the vector are + !! discretized. Omitting stagger is the same as setting it to CGRID_NE. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + ! Local variables + integer :: stagger_local + integer :: dirflag + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + stagger_local = CGRID_NE ! Default value for type of grid + if (present(stagger)) stagger_local = stagger + + dirflag = To_All ! 60 + if (present(direction)) then ; if (direction > 0) dirflag = direction ; endif + + if (present(halo) .and. MOM_dom%thin_halo_updates) then + call mpp_complete_update_domains(id_update, u_cmpt, v_cmpt, & + MOM_dom%mpp_domain, flags=dirflag, gridtype=stagger_local, & + whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) + else + call mpp_complete_update_domains(id_update, u_cmpt, v_cmpt, & + MOM_dom%mpp_domain, flags=dirflag, gridtype=stagger_local) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine pass_vector_complete_2d + +!> pass_vector_complete_3d completes a halo update for a pair of three-dimensional +!! arrays representing the compontents of a three-dimensional horizontal vector. +subroutine pass_vector_complete_3d(id_update, u_cmpt, v_cmpt, MOM_dom, direction, stagger, halo, & + clock) + integer, intent(in) :: id_update !< The integer id of this update which has been + !! returned from a previous call to + !! pass_var_start. + real, dimension(:,:,:), intent(inout) :: u_cmpt !< The nominal zonal (u) component of the vector + !! pair which is having its halos points + !! exchanged. + real, dimension(:,:,:), intent(inout) :: v_cmpt !< The nominal meridional (v) component of the + !! vector pair which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: direction !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH, possibly plus SCALAR_PAIR if these are paired non-directional + !! scalars discretized at the typical vector component locations. For example, TO_EAST sends + !! the data to the processor to the east, so the halos on the western side are filled. TO_ALL + !! is the default if omitted. + integer, optional, intent(in) :: stagger !< An optional flag, which may be one of A_GRID, + !! BGRID_NE, or CGRID_NE, indicating where the two components of the vector are + !! discretized. Omitting stagger is the same as setting it to CGRID_NE. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + ! Local variables + integer :: stagger_local + integer :: dirflag + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + stagger_local = CGRID_NE ! Default value for type of grid + if (present(stagger)) stagger_local = stagger + + dirflag = To_All ! 60 + if (present(direction)) then ; if (direction > 0) dirflag = direction ; endif + + if (present(halo) .and. MOM_dom%thin_halo_updates) then + call mpp_complete_update_domains(id_update, u_cmpt, v_cmpt, & + MOM_dom%mpp_domain, flags=dirflag, gridtype=stagger_local, & + whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) + else + call mpp_complete_update_domains(id_update, u_cmpt, v_cmpt, & + MOM_dom%mpp_domain, flags=dirflag, gridtype=stagger_local) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine pass_vector_complete_3d + +!> create_var_group_pass_2d sets up a group of two-dimensional array halo updates. +subroutine create_var_group_pass_2d(group, array, MOM_dom, sideflag, position, & + halo, clock) + type(group_pass_type), intent(inout) :: group !< The data type that store information for + !! group update. This data will be used in + !! do_group_pass. + real, dimension(:,:), intent(inout) :: array !< The array which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: sideflag !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH. For example, TO_EAST sends the data to the processor to the east, + !! so the halos on the western side are filled. TO_ALL is the default if sideflag is omitted. + integer, optional, intent(in) :: position !< An optional argument indicating the position. + !! This is CENTER by default and is often CORNER, + !! but could also be EAST_FACE or NORTH_FACE. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + ! Local variables + integer :: dirflag + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + dirflag = To_All ! 60 + if (present(sideflag)) then ; if (sideflag > 0) dirflag = sideflag ; endif + + if (mpp_group_update_initialized(group)) then + call mpp_reset_group_update_field(group,array) + elseif (present(halo) .and. MOM_dom%thin_halo_updates) then + call mpp_create_group_update(group, array, MOM_dom%mpp_domain, flags=dirflag, & + position=position, whalo=halo, ehalo=halo, & + shalo=halo, nhalo=halo) + else + call mpp_create_group_update(group, array, MOM_dom%mpp_domain, flags=dirflag, & + position=position) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine create_var_group_pass_2d + +!> create_var_group_pass_3d sets up a group of three-dimensional array halo updates. +subroutine create_var_group_pass_3d(group, array, MOM_dom, sideflag, position, halo, & + clock) + type(group_pass_type), intent(inout) :: group !< The data type that store information for + !! group update. This data will be used in + !! do_group_pass. + real, dimension(:,:,:), intent(inout) :: array !< The array which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: sideflag !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH. For example, TO_EAST sends the data to the processor to the east, + !! so the halos on the western side are filled. TO_ALL is the default if sideflag is omitted. + integer, optional, intent(in) :: position !< An optional argument indicating the position. + !! This is CENTER by default and is often CORNER, + !! but could also be EAST_FACE or NORTH_FACE. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + ! Local variables + integer :: dirflag + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + dirflag = To_All ! 60 + if (present(sideflag)) then ; if (sideflag > 0) dirflag = sideflag ; endif + + if (mpp_group_update_initialized(group)) then + call mpp_reset_group_update_field(group,array) + elseif (present(halo) .and. MOM_dom%thin_halo_updates) then + call mpp_create_group_update(group, array, MOM_dom%mpp_domain, flags=dirflag, & + position=position, whalo=halo, ehalo=halo, & + shalo=halo, nhalo=halo) + else + call mpp_create_group_update(group, array, MOM_dom%mpp_domain, flags=dirflag, & + position=position) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine create_var_group_pass_3d + +!> create_vector_group_pass_2d sets up a group of two-dimensional vector halo updates. +subroutine create_vector_group_pass_2d(group, u_cmpt, v_cmpt, MOM_dom, direction, stagger, halo, & + clock) + type(group_pass_type), intent(inout) :: group !< The data type that store information for + !! group update. This data will be used in + !! do_group_pass. + real, dimension(:,:), intent(inout) :: u_cmpt !< The nominal zonal (u) component of the vector + !! pair which is having its halos points + !! exchanged. + real, dimension(:,:), intent(inout) :: v_cmpt !< The nominal meridional (v) component of the + !! vector pair which is having its halos points + !! exchanged. + + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent + integer, optional, intent(in) :: direction !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH, possibly plus SCALAR_PAIR if these are paired non-directional + !! scalars discretized at the typical vector component locations. For example, TO_EAST sends + !! the data to the processor to the east, so the halos on the western side are filled. TO_ALL + !! is the default if omitted. + integer, optional, intent(in) :: stagger !< An optional flag, which may be one of A_GRID, + !! BGRID_NE, or CGRID_NE, indicating where the two components of the vector are + !! discretized. Omitting stagger is the same as setting it to CGRID_NE. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + ! Local variables + integer :: stagger_local + integer :: dirflag + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + stagger_local = CGRID_NE ! Default value for type of grid + if (present(stagger)) stagger_local = stagger + + dirflag = To_All ! 60 + if (present(direction)) then ; if (direction > 0) dirflag = direction ; endif + + if (mpp_group_update_initialized(group)) then + call mpp_reset_group_update_field(group,u_cmpt, v_cmpt) + elseif (present(halo) .and. MOM_dom%thin_halo_updates) then + call mpp_create_group_update(group, u_cmpt, v_cmpt, MOM_dom%mpp_domain, & + flags=dirflag, gridtype=stagger_local, whalo=halo, ehalo=halo, & + shalo=halo, nhalo=halo) + else + call mpp_create_group_update(group, u_cmpt, v_cmpt, MOM_dom%mpp_domain, & + flags=dirflag, gridtype=stagger_local) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine create_vector_group_pass_2d + +!> create_vector_group_pass_3d sets up a group of three-dimensional vector halo updates. +subroutine create_vector_group_pass_3d(group, u_cmpt, v_cmpt, MOM_dom, direction, stagger, halo, & + clock) + type(group_pass_type), intent(inout) :: group !< The data type that store information for + !! group update. This data will be used in + !! do_group_pass. + real, dimension(:,:,:), intent(inout) :: u_cmpt !< The nominal zonal (u) component of the vector + !! pair which is having its halos points + !! exchanged. + real, dimension(:,:,:), intent(inout) :: v_cmpt !< The nominal meridional (v) component of the + !! vector pair which is having its halos points + !! exchanged. + + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: direction !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH, possibly plus SCALAR_PAIR if these are paired non-directional + !! scalars discretized at the typical vector component locations. For example, TO_EAST sends + !! the data to the processor to the east, so the halos on the western side are filled. TO_ALL + !! is the default if omitted. + integer, optional, intent(in) :: stagger !< An optional flag, which may be one of A_GRID, + !! BGRID_NE, or CGRID_NE, indicating where the two components of the vector are + !! discretized. Omitting stagger is the same as setting it to CGRID_NE. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + + ! Local variables + integer :: stagger_local + integer :: dirflag + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + stagger_local = CGRID_NE ! Default value for type of grid + if (present(stagger)) stagger_local = stagger + + dirflag = To_All ! 60 + if (present(direction)) then ; if (direction > 0) dirflag = direction ; endif + + if (mpp_group_update_initialized(group)) then + call mpp_reset_group_update_field(group,u_cmpt, v_cmpt) + elseif (present(halo) .and. MOM_dom%thin_halo_updates) then + call mpp_create_group_update(group, u_cmpt, v_cmpt, MOM_dom%mpp_domain, & + flags=dirflag, gridtype=stagger_local, whalo=halo, ehalo=halo, & + shalo=halo, nhalo=halo) + else + call mpp_create_group_update(group, u_cmpt, v_cmpt, MOM_dom%mpp_domain, & + flags=dirflag, gridtype=stagger_local) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine create_vector_group_pass_3d + +!> do_group_pass carries out a group halo update. +subroutine do_group_pass(group, MOM_dom, clock) + type(group_pass_type), intent(inout) :: group !< The data type that store information for + !! group update. This data will be used in + !! do_group_pass. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + real :: d_type + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + call mpp_do_group_update(group, MOM_dom%mpp_domain, d_type) + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine do_group_pass + +!> start_group_pass starts out a group halo update. +subroutine start_group_pass(group, MOM_dom, clock) + type(group_pass_type), intent(inout) :: group !< The data type that store information for + !! group update. This data will be used in + !! do_group_pass. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + + real :: d_type + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + call mpp_start_group_update(group, MOM_dom%mpp_domain, d_type) + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine start_group_pass + +!> complete_group_pass completes a group halo update. +subroutine complete_group_pass(group, MOM_dom, clock) + type(group_pass_type), intent(inout) :: group !< The data type that store information for + !! group update. This data will be used in + !! do_group_pass. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + real :: d_type + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + call mpp_complete_group_update(group, MOM_dom%mpp_domain, d_type) + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine complete_group_pass + + +!> Pass a 2-D array from one MOM domain to another +subroutine redistribute_array_2d(Domain1, array1, Domain2, array2, complete) + type(domain2d), & + intent(in) :: Domain1 !< The MOM domain from which to extract information. + real, dimension(:,:), intent(in) :: array1 !< The array from which to extract information. + type(domain2d), & + intent(in) :: Domain2 !< The MOM domain receiving information. + real, dimension(:,:), intent(out) :: array2 !< The array receiving information. + logical, optional, intent(in) :: complete !< If true, finish communication before proceeding. + + ! Local variables + logical :: do_complete + + do_complete=.true.;if (PRESENT(complete)) do_complete = complete + + call mpp_redistribute(Domain1, array1, Domain2, array2, do_complete) + +end subroutine redistribute_array_2d + +!> Pass a 3-D array from one MOM domain to another +subroutine redistribute_array_3d(Domain1, array1, Domain2, array2, complete) + type(domain2d), & + intent(in) :: Domain1 !< The MOM domain from which to extract information. + real, dimension(:,:,:), intent(in) :: array1 !< The array from which to extract information. + type(domain2d), & + intent(in) :: Domain2 !< The MOM domain receiving information. + real, dimension(:,:,:), intent(out) :: array2 !< The array receiving information. + logical, optional, intent(in) :: complete !< If true, finish communication before proceeding. + + ! Local variables + logical :: do_complete + + do_complete=.true.;if (PRESENT(complete)) do_complete = complete + + call mpp_redistribute(Domain1, array1, Domain2, array2, do_complete) + +end subroutine redistribute_array_3d + + +!> create_MOM_domain creates and initializes a MOM_domain_type variables, based on the information +!! provided in arguments. +subroutine create_MOM_domain(MOM_dom, n_global, n_halo, reentrant, tripolar_N, layout, io_layout, & + domain_name, mask_table, symmetric, thin_halos, nonblocking) + type(MOM_domain_type), pointer :: MOM_dom !< A pointer to the MOM_domain_type being defined here. + integer, dimension(2), intent(in) :: n_global !< The number of points on the global grid in + !! the i- and j-directions + integer, dimension(2), intent(in) :: n_halo !< The number of halo points on each processor + logical, dimension(2), intent(in) :: reentrant !< If true the grid is periodic in the i- and j- directions + logical, intent(in) :: tripolar_N !< If true the grid uses northern tripolar connectivity + integer, dimension(2), intent(in) :: layout !< The layout of logical PEs in the i- and j-directions. + integer, dimension(2), optional, intent(in) :: io_layout !< The layout for parallel input and output. + character(len=*), optional, intent(in) :: domain_name !< A name for this domain, "MOM" if missing. + character(len=*), optional, intent(in) :: mask_table !< The full relative or absolute path to the mask table. + logical, optional, intent(in) :: symmetric !< If present, this specifies whether this domain + !! uses symmetric memory, or true if missing. + logical, optional, intent(in) :: thin_halos !< If present, this specifies whether to permit the use of + !! thin halo updates, or true if missing. + logical, optional, intent(in) :: nonblocking !< If present, this specifies whether to permit the use of + !! nonblocking halo updates, or false if missing. + + ! local variables + integer, dimension(4) :: global_indices ! The lower and upper global i- and j-index bounds + integer :: X_FLAGS ! A combination of integers encoding the x-direction grid connectivity. + integer :: Y_FLAGS ! A combination of integers encoding the y-direction grid connectivity. + integer :: xhalo_d2, yhalo_d2 + character(len=200) :: mesg ! A string for use in error messages + logical :: mask_table_exists ! Mask_table is present and the file it points to exists + + if (.not.associated(MOM_dom)) then + allocate(MOM_dom) + allocate(MOM_dom%mpp_domain) + allocate(MOM_dom%mpp_domain_d2) + endif + + MOM_dom%name = "MOM" ; if (present(domain_name)) MOM_dom%name = trim(domain_name) + + X_FLAGS = 0 ; Y_FLAGS = 0 + if (reentrant(1)) X_FLAGS = CYCLIC_GLOBAL_DOMAIN + if (reentrant(2)) Y_FLAGS = CYCLIC_GLOBAL_DOMAIN + if (tripolar_N) then + Y_FLAGS = FOLD_NORTH_EDGE + if (reentrant(2)) call MOM_error(FATAL,"MOM_domains: "// & + "TRIPOLAR_N and REENTRANT_Y may not be used together.") + endif + + MOM_dom%nonblocking_updates = nonblocking + MOM_dom%thin_halo_updates = thin_halos + MOM_dom%symmetric = .true. ; if (present(symmetric)) MOM_dom%symmetric = symmetric + MOM_dom%niglobal = n_global(1) ; MOM_dom%njglobal = n_global(2) + MOM_dom%nihalo = n_halo(1) ; MOM_dom%njhalo = n_halo(2) + + ! Save the extra data for creating other domains of different resolution that overlay this domain. + MOM_dom%X_FLAGS = X_FLAGS + MOM_dom%Y_FLAGS = Y_FLAGS + MOM_dom%layout(:) = layout(:) + + ! Set up the io_layout, with error handling. + MOM_dom%io_layout(:) = (/ 1, 1 /) + if (present(io_layout)) then + if (io_layout(1) == 0) then + MOM_dom%io_layout(1) = layout(1) + elseif (io_layout(1) > 1) then + MOM_dom%io_layout(1) = io_layout(1) + if (modulo(layout(1), io_layout(1)) /= 0) then + write(mesg,'("MOM_domains_init: The i-direction I/O-layout, IO_LAYOUT(1)=",i4, & + &", does not evenly divide the i-direction layout, NIPROC=,",i4,".")') io_layout(1), layout(1) + call MOM_error(FATAL, mesg) + endif + endif + + if (io_layout(2) == 0) then + MOM_dom%io_layout(2) = layout(2) + elseif (io_layout(2) > 1) then + MOM_dom%io_layout(2) = io_layout(2) + if (modulo(layout(2), io_layout(2)) /= 0) then + write(mesg,'("MOM_domains_init: The j-direction I/O-layout, IO_LAYOUT(2)=",i4, & + &", does not evenly divide the j-direction layout, NJPROC=,",i4,".")') io_layout(2), layout(2) + call MOM_error(FATAL, mesg) + endif + endif + endif + + if (present(mask_table)) then + mask_table_exists = file_exist(mask_table) + if (mask_table_exists) then + allocate(MOM_dom%maskmap(layout(1), layout(2))) + call parse_mask_table(mask_table, MOM_dom%maskmap, MOM_dom%name) + endif + else + mask_table_exists = .false. + endif + + call clone_MD_to_d2D(MOM_dom, MOM_dom%mpp_domain) + + !For downsampled domain, recommend a halo of 1 (or 0?) since we're not doing wide-stencil computations. + !But that does not work because the downsampled field would not have the correct size to pass the checks, e.g., we get + !error: downsample_diag_indices_get: peculiar size 28 in i-direction\ndoes not match one of 24 25 26 27 + ! call clone_MD_to_d2D(MOM_dom, MOM_dom%mpp_domain_d2, halo_size=(MOM_dom%nihalo/2), coarsen=2) + call clone_MD_to_d2D(MOM_dom, MOM_dom%mpp_domain_d2, coarsen=2) + +end subroutine create_MOM_domain + +!> dealloc_MOM_domain deallocates memory associated with a pointer to a MOM_domain_type +!! and potentially all of its contents +subroutine deallocate_MOM_domain(MOM_domain, cursory) + type(MOM_domain_type), pointer :: MOM_domain !< A pointer to the MOM_domain_type being deallocated + logical, optional, intent(in) :: cursory !< If true do not deallocate fields associated + !! with the underlying infrastructure + logical :: invasive ! If true, deallocate fields associated with the underlying infrastructure + + invasive = .true. ; if (present(cursory)) invasive = .not.cursory + + if (associated(MOM_domain)) then + if (associated(MOM_domain%mpp_domain)) then + if (invasive) call mpp_deallocate_domain(MOM_domain%mpp_domain) + deallocate(MOM_domain%mpp_domain) + endif + if (associated(MOM_domain%mpp_domain_d2)) then + if (invasive) call mpp_deallocate_domain(MOM_domain%mpp_domain_d2) + deallocate(MOM_domain%mpp_domain_d2) + endif + if (associated(MOM_domain%maskmap)) deallocate(MOM_domain%maskmap) + deallocate(MOM_domain) + endif + +end subroutine deallocate_MOM_domain + +!> MOM_thread_affinity_set returns true if the number of openMP threads have been set to a value greater than 1. +function MOM_thread_affinity_set() + ! Local variables + !$ integer :: ocean_nthreads ! Number of openMP threads + !$ integer :: omp_get_num_threads ! An openMP function that returns the number of threads + logical :: MOM_thread_affinity_set + + MOM_thread_affinity_set = .false. + !$ call fms_affinity_init() + !$OMP PARALLEL + !$OMP MASTER + !$ ocean_nthreads = omp_get_num_threads() + !$OMP END MASTER + !$OMP END PARALLEL + !$ MOM_thread_affinity_set = (ocean_nthreads > 1 ) +end function MOM_thread_affinity_set + +!> set_MOM_thread_affinity sest the number of openMP threads to use with the ocean. +subroutine set_MOM_thread_affinity(ocean_nthreads, ocean_hyper_thread) + integer, intent(in) :: ocean_nthreads !< Number of openMP threads to use for the ocean model + logical, intent(in) :: ocean_hyper_thread !< If true, use hyper threading + + ! Local variables + !$ integer :: omp_get_thread_num, omp_get_num_threads !< These are the results of openMP functions + + !$ call fms_affinity_set('OCEAN', ocean_hyper_thread, ocean_nthreads) + !$ call omp_set_num_threads(ocean_nthreads) + !$ write(6,*) "MOM_domains_mod OMPthreading ", fms_affinity_get(), omp_get_thread_num(), omp_get_num_threads() + !$ flush(6) +end subroutine set_MOM_thread_affinity + +!> This subroutine retrieves the 1-d domains that make up the 2d-domain in a MOM_domain +subroutine get_domain_components_MD(MOM_dom, x_domain, y_domain) + type(MOM_domain_type), intent(in) :: MOM_dom !< The MOM_domain whose contents are being extracted + type(domain1D), optional, intent(inout) :: x_domain !< The 1-d logical x-domain + type(domain1D), optional, intent(inout) :: y_domain !< The 1-d logical y-domain + + call mpp_get_domain_components(MOM_dom%mpp_domain, x_domain, y_domain) +end subroutine get_domain_components_MD + +!> This subroutine retrieves the 1-d domains that make up a 2d-domain +subroutine get_domain_components_d2D(domain, x_domain, y_domain) + type(domain2D), intent(in) :: domain !< The 2D domain whose contents are being extracted + type(domain1D), optional, intent(inout) :: x_domain !< The 1-d logical x-domain + type(domain1D), optional, intent(inout) :: y_domain !< The 1-d logical y-domain + + call mpp_get_domain_components(domain, x_domain, y_domain) +end subroutine get_domain_components_d2D + +!> clone_MD_to_MD copies one MOM_domain_type into another, while allowing +!! some properties of the new type to differ from the original one. +subroutine clone_MD_to_MD(MD_in, MOM_dom, min_halo, halo_size, symmetric, domain_name, & + turns, refine, extra_halo) + type(MOM_domain_type), intent(in) :: MD_in !< An existing MOM_domain + type(MOM_domain_type), pointer :: MOM_dom !< A pointer to a MOM_domain that will be + !! allocated if it is unassociated, and will have data + !! copied from MD_in + integer, dimension(2), & + optional, intent(inout) :: min_halo !< If present, this sets the + !! minimum halo size for this domain in the i- and j- + !! directions, and returns the actual halo size used. + integer, optional, intent(in) :: halo_size !< If present, this sets the halo + !! size for the domain in the i- and j-directions. + !! min_halo and halo_size can not both be present. + logical, optional, intent(in) :: symmetric !< If present, this specifies + !! whether the new domain is symmetric, regardless of + !! whether the macro SYMMETRIC_MEMORY_ is defined. + character(len=*), & + optional, intent(in) :: domain_name !< A name for the new domain, copied + !! from MD_in if missing. + integer, optional, intent(in) :: turns !< Number of quarter turns + integer, optional, intent(in) :: refine !< A factor by which to enhance the grid resolution. + integer, optional, intent(in) :: extra_halo !< An extra number of points in the halos + !! compared with MD_in + + integer :: global_indices(4) + logical :: mask_table_exists + integer, dimension(:), allocatable :: exni ! The extents of the grid for each i-row of the layout. + ! The sum of exni must equal MOM_dom%niglobal. + integer, dimension(:), allocatable :: exnj ! The extents of the grid for each j-row of the layout. + ! The sum of exni must equal MOM_dom%niglobal. + integer :: qturns ! The number of quarter turns, restricted to the range of 0 to 3. + integer :: i, j, nl1, nl2 + + qturns = 0 + if (present(turns)) qturns = modulo(turns, 4) + + if (.not.associated(MOM_dom)) then + allocate(MOM_dom) + allocate(MOM_dom%mpp_domain) + allocate(MOM_dom%mpp_domain_d2) + endif + +! Save the extra data for creating other domains of different resolution that overlay this domain + MOM_dom%symmetric = MD_in%symmetric + MOM_dom%nonblocking_updates = MD_in%nonblocking_updates + MOM_dom%thin_halo_updates = MD_in%thin_halo_updates + + if (modulo(qturns, 2) /= 0) then + MOM_dom%niglobal = MD_in%njglobal ; MOM_dom%njglobal = MD_in%niglobal + MOM_dom%nihalo = MD_in%njhalo ; MOM_dom%njhalo = MD_in%nihalo + call get_layout_extents(MD_in, exnj, exni) + + MOM_dom%X_FLAGS = MD_in%Y_FLAGS ; MOM_dom%Y_FLAGS = MD_in%X_FLAGS + MOM_dom%layout(:) = MD_in%layout(2:1:-1) + MOM_dom%io_layout(:) = MD_in%io_layout(2:1:-1) + else + MOM_dom%niglobal = MD_in%niglobal ; MOM_dom%njglobal = MD_in%njglobal + MOM_dom%nihalo = MD_in%nihalo ; MOM_dom%njhalo = MD_in%njhalo + call get_layout_extents(MD_in, exni, exnj) + + MOM_dom%X_FLAGS = MD_in%X_FLAGS ; MOM_dom%Y_FLAGS = MD_in%Y_FLAGS + MOM_dom%layout(:) = MD_in%layout(:) + MOM_dom%io_layout(:) = MD_in%io_layout(:) + endif + + ! Ensure that the points per processor are the same on the source and densitation grids. + select case (qturns) + case (1) ; call invert(exni) + case (2) ; call invert(exni) ; call invert(exnj) + case (3) ; call invert(exnj) + end select + + if (associated(MD_in%maskmap)) then + mask_table_exists = .true. + allocate(MOM_dom%maskmap(MOM_dom%layout(1), MOM_dom%layout(2))) + + nl1 = MOM_dom%layout(1) ; nl2 = MOM_dom%layout(2) + select case (qturns) + case (0) + do j=1,nl2 ; do i=1,nl1 + MOM_dom%maskmap(i,j) = MD_in%maskmap(i, j) + enddo ; enddo + case (1) + do j=1,nl2 ; do i=1,nl1 + MOM_dom%maskmap(i,j) = MD_in%maskmap(j, nl1+1-i) + enddo ; enddo + case (2) + do j=1,nl2 ; do i=1,nl1 + MOM_dom%maskmap(i,j) = MD_in%maskmap(nl1+1-i, nl2+1-j) + enddo ; enddo + case (3) + do j=1,nl2 ; do i=1,nl1 + MOM_dom%maskmap(i,j) = MD_in%maskmap(nl2+1-j, i) + enddo ; enddo + end select + else + mask_table_exists = .false. + endif + + ! Optionally enhance the grid resolution. + if (present(refine)) then ; if (refine > 1) then + MOM_dom%niglobal = refine*MOM_dom%niglobal ; MOM_dom%njglobal = refine*MOM_dom%njglobal + MOM_dom%nihalo = refine*MOM_dom%nihalo ; MOM_dom%njhalo = refine*MOM_dom%njhalo + do i=1,MOM_dom%layout(1) ; exni(i) = refine*exni(i) ; enddo + do j=1,MOM_dom%layout(2) ; exnj(j) = refine*exnj(j) ; enddo + endif ; endif + + ! Optionally enhance the grid resolution. + if (present(extra_halo)) then ; if (extra_halo > 0) then + MOM_dom%nihalo = MOM_dom%nihalo + extra_halo ; MOM_dom%njhalo = MOM_dom%njhalo + extra_halo + endif ; endif + + if (present(halo_size) .and. present(min_halo)) call MOM_error(FATAL, & + "clone_MOM_domain can not have both halo_size and min_halo present.") + + if (present(min_halo)) then + MOM_dom%nihalo = max(MOM_dom%nihalo, min_halo(1)) + min_halo(1) = MOM_dom%nihalo + MOM_dom%njhalo = max(MOM_dom%njhalo, min_halo(2)) + min_halo(2) = MOM_dom%njhalo + endif + + if (present(halo_size)) then + MOM_dom%nihalo = halo_size ; MOM_dom%njhalo = halo_size + endif + + if (present(symmetric)) then ; MOM_dom%symmetric = symmetric ; endif + + if (present(domain_name)) then + MOM_dom%name = trim(domain_name) + else + MOM_dom%name = MD_in%name + endif + + call clone_MD_to_d2D(MOM_dom, MOM_dom%mpp_domain, xextent=exni, yextent=exnj) + + call clone_MD_to_d2D(MOM_dom, MOM_dom%mpp_domain_d2, domain_name=MOM_dom%name, coarsen=2) + +end subroutine clone_MD_to_MD + + +!> clone_MD_to_d2D uses information from a MOM_domain_type to create a new +!! domain2d type, while allowing some properties of the new type to differ from +!! the original one. +subroutine clone_MD_to_d2D(MD_in, mpp_domain, min_halo, halo_size, symmetric, & + domain_name, turns, xextent, yextent, coarsen) + type(MOM_domain_type), intent(in) :: MD_in !< An existing MOM_domain to be cloned + type(domain2d), intent(inout) :: mpp_domain !< The new mpp_domain to be set up + integer, dimension(2), & + optional, intent(inout) :: min_halo !< If present, this sets the + !! minimum halo size for this domain in the i- and j- + !! directions, and returns the actual halo size used. + integer, optional, intent(in) :: halo_size !< If present, this sets the halo + !! size for the domain in the i- and j-directions. + !! min_halo and halo_size can not both be present. + logical, optional, intent(in) :: symmetric !< If present, this specifies + !! whether the new domain is symmetric, regardless of + !! whether the macro SYMMETRIC_MEMORY_ is defined or + !! whether MD_in is symmetric. + character(len=*), & + optional, intent(in) :: domain_name !< A name for the new domain, "MOM" + !! if missing. + integer, optional, intent(in) :: turns !< Number of quarter turns - not implemented here. + integer, optional, intent(in) :: coarsen !< A factor by which to coarsen this grid. + !! The default of 1 is for no coarsening. + integer, dimension(:), optional, intent(in) :: xextent !< The number of grid points in the + !! tracer computational domain for division of the x-layout. + integer, dimension(:), optional, intent(in) :: yextent !< The number of grid points in the + !! tracer computational domain for division of the y-layout. + + integer :: global_indices(4) + integer :: nihalo, njhalo + logical :: symmetric_dom, do_coarsen + character(len=64) :: dom_name + + if (present(turns)) & + call MOM_error(FATAL, "Rotation not supported for MOM_domain to domain2d") + + if (present(halo_size) .and. present(min_halo)) call MOM_error(FATAL, & + "clone_MOM_domain can not have both halo_size and min_halo present.") + + do_coarsen = .false. ; if (present(coarsen)) then ; do_coarsen = (coarsen > 1) ; endif + + nihalo = MD_in%nihalo ; njhalo = MD_in%njhalo + if (do_coarsen) then + nihalo = int(MD_in%nihalo / coarsen) ; njhalo = int(MD_in%njhalo / coarsen) + endif + + if (present(min_halo)) then + nihalo = max(nihalo, min_halo(1)) + njhalo = max(njhalo, min_halo(2)) + min_halo(1) = nihalo ; min_halo(2) = njhalo + endif + if (present(halo_size)) then + nihalo = halo_size ; njhalo = halo_size + endif + + symmetric_dom = MD_in%symmetric + if (present(symmetric)) then ; symmetric_dom = symmetric ; endif + + dom_name = MD_in%name + if (do_coarsen) dom_name = trim(MD_in%name)//"c" + if (present(domain_name)) dom_name = trim(domain_name) + + global_indices(1:4) = (/ 1, MD_in%niglobal, 1, MD_in%njglobal /) + if (do_coarsen) then + global_indices(1:4) = (/ 1, (MD_in%niglobal/coarsen), 1, (MD_in%njglobal/coarsen) /) + endif + + if (associated(MD_in%maskmap)) then + call MOM_define_domain( global_indices, MD_in%layout, mpp_domain, & + xflags=MD_in%X_FLAGS, yflags=MD_in%Y_FLAGS, xhalo=nihalo, yhalo=njhalo, & + xextent=xextent, yextent=yextent, symmetry=symmetric_dom, name=dom_name, & + maskmap=MD_in%maskmap ) + else + call MOM_define_domain( global_indices, MD_in%layout, mpp_domain, & + xflags=MD_in%X_FLAGS, yflags=MD_in%Y_FLAGS, xhalo=nihalo, yhalo=njhalo, & + symmetry=symmetric_dom, xextent=xextent, yextent=yextent, name=dom_name) + endif + + if ((MD_in%io_layout(1) + MD_in%io_layout(2) > 0) .and. & + (MD_in%layout(1)*MD_in%layout(2) > 1)) then + call MOM_define_io_domain(mpp_domain, MD_in%io_layout) + endif + +end subroutine clone_MD_to_d2D + +!> Returns various data that has been stored in a MOM_domain_type +subroutine get_domain_extent(Domain, isc, iec, jsc, jec, isd, ied, jsd, jed, & + isg, ieg, jsg, jeg, idg_offset, jdg_offset, & + symmetric, local_indexing, index_offset, coarsen) + type(MOM_domain_type), & + intent(in) :: Domain !< The MOM domain from which to extract information + integer, intent(out) :: isc !< The start i-index of the computational domain + integer, intent(out) :: iec !< The end i-index of the computational domain + integer, intent(out) :: jsc !< The start j-index of the computational domain + integer, intent(out) :: jec !< The end j-index of the computational domain + integer, intent(out) :: isd !< The start i-index of the data domain + integer, intent(out) :: ied !< The end i-index of the data domain + integer, intent(out) :: jsd !< The start j-index of the data domain + integer, intent(out) :: jed !< The end j-index of the data domain + integer, intent(out) :: isg !< The start i-index of the global domain + integer, intent(out) :: ieg !< The end i-index of the global domain + integer, intent(out) :: jsg !< The start j-index of the global domain + integer, intent(out) :: jeg !< The end j-index of the global domain + integer, optional, intent(out) :: idg_offset !< The offset between the corresponding global and + !! data i-index spaces. + integer, optional, intent(out) :: jdg_offset !< The offset between the corresponding global and + !! data j-index spaces. + logical, optional, intent(out) :: symmetric !< True if symmetric memory is used. + logical, optional, intent(in) :: local_indexing !< If true, local tracer array indices start at 1, + !! as in most MOM6 code. + integer, optional, intent(in) :: index_offset !< A fixed additional offset to all indices. This + !! can be useful for some types of debugging with + !! dynamic memory allocation. + integer, optional, intent(in) :: coarsen !< A factor by which the grid is coarsened. + !! The default is 1, for no coarsening. + + ! Local variables + integer :: ind_off, idg_off, jdg_off, coarsen_lev + logical :: local + + local = .true. ; if (present(local_indexing)) local = local_indexing + ind_off = 0 ; if (present(index_offset)) ind_off = index_offset + + coarsen_lev = 1 ; if (present(coarsen)) coarsen_lev = coarsen + + if (coarsen_lev == 1) then + call mpp_get_compute_domain(Domain%mpp_domain, isc, iec, jsc, jec) + call mpp_get_data_domain(Domain%mpp_domain, isd, ied, jsd, jed) + call mpp_get_global_domain(Domain%mpp_domain, isg, ieg, jsg, jeg) + elseif (coarsen_lev == 2) then + if (.not.associated(Domain%mpp_domain_d2)) call MOM_error(FATAL, & + "get_domain_extent called with coarsen=2, but Domain%mpp_domain_d2 is not associated.") + call mpp_get_compute_domain(Domain%mpp_domain_d2, isc, iec, jsc, jec) + call mpp_get_data_domain(Domain%mpp_domain_d2, isd, ied, jsd, jed) + call mpp_get_global_domain(Domain%mpp_domain_d2, isg, ieg, jsg, jeg) + else + call MOM_error(FATAL, "get_domain_extent called with an unsupported level of coarsening.") + endif + + if (local) then + ! This code institutes the MOM convention that local array indices start at 1. + idg_off = isd-1 ; jdg_off = jsd-1 + isc = isc-isd+1 ; iec = iec-isd+1 ; jsc = jsc-jsd+1 ; jec = jec-jsd+1 + ied = ied-isd+1 ; jed = jed-jsd+1 + isd = 1 ; jsd = 1 + else + idg_off = 0 ; jdg_off = 0 + endif + if (ind_off /= 0) then + idg_off = idg_off + ind_off ; jdg_off = jdg_off + ind_off + isc = isc + ind_off ; iec = iec + ind_off + jsc = jsc + ind_off ; jec = jec + ind_off + isd = isd + ind_off ; ied = ied + ind_off + jsd = jsd + ind_off ; jed = jed + ind_off + endif + if (present(idg_offset)) idg_offset = idg_off + if (present(jdg_offset)) jdg_offset = jdg_off + if (present(symmetric)) symmetric = Domain%symmetric + +end subroutine get_domain_extent + +!> Return the (potentially symmetric) computational domain i-bounds for an array +!! passed without index specifications (i.e. indices start at 1) based on an array size. +subroutine get_simple_array_i_ind(domain, size, is, ie, symmetric) + type(MOM_domain_type), intent(in) :: domain !< MOM domain from which to extract information + integer, intent(in) :: size !< The i-array size + integer, intent(out) :: is !< The computational domain starting i-index. + integer, intent(out) :: ie !< The computational domain ending i-index. + logical, optional, intent(in) :: symmetric !< If present, indicates whether symmetric sizes + !! can be considered. + ! Local variables + logical :: sym + character(len=120) :: mesg, mesg2 + integer :: isc, iec, jsc, jec, isd, ied, jsd, jed + + call mpp_get_compute_domain(Domain%mpp_domain, isc, iec, jsc, jec) + call mpp_get_data_domain(Domain%mpp_domain, isd, ied, jsd, jed) + + isc = isc-isd+1 ; iec = iec-isd+1 ; ied = ied-isd+1 ; isd = 1 + sym = Domain%symmetric ; if (present(symmetric)) sym = symmetric + + if (size == ied) then ; is = isc ; ie = iec + elseif (size == 1+iec-isc) then ; is = 1 ; ie = size + elseif (sym .and. (size == 1+ied)) then ; is = isc ; ie = iec+1 + elseif (sym .and. (size == 2+iec-isc)) then ; is = 1 ; ie = size+1 + else + write(mesg,'("Unrecognized size ", i6, "in call to get_simple_array_i_ind. \")') size + if (sym) then + write(mesg2,'("Valid sizes are : ", 2i7)') ied, 1+iec-isc + else + write(mesg2,'("Valid sizes are : ", 4i7)') ied, 1+iec-isc, 1+ied, 2+iec-isc + endif + call MOM_error(FATAL, trim(mesg)//trim(mesg2)) + endif + +end subroutine get_simple_array_i_ind + + +!> Return the (potentially symmetric) computational domain j-bounds for an array +!! passed without index specifications (i.e. indices start at 1) based on an array size. +subroutine get_simple_array_j_ind(domain, size, js, je, symmetric) + type(MOM_domain_type), intent(in) :: domain !< MOM domain from which to extract information + integer, intent(in) :: size !< The j-array size + integer, intent(out) :: js !< The computational domain starting j-index. + integer, intent(out) :: je !< The computational domain ending j-index. + logical, optional, intent(in) :: symmetric !< If present, indicates whether symmetric sizes + !! can be considered. + ! Local variables + logical :: sym + character(len=120) :: mesg, mesg2 + integer :: isc, iec, jsc, jec, isd, ied, jsd, jed + + call mpp_get_compute_domain(Domain%mpp_domain, isc, iec, jsc, jec) + call mpp_get_data_domain(Domain%mpp_domain, isd, ied, jsd, jed) + + jsc = jsc-jsd+1 ; jec = jec-jsd+1 ; jed = jed-jsd+1 ; jsd = 1 + sym = Domain%symmetric ; if (present(symmetric)) sym = symmetric + + if (size == jed) then ; js = jsc ; je = jec + elseif (size == 1+jec-jsc) then ; js = 1 ; je = size + elseif (sym .and. (size == 1+jed)) then ; js = jsc ; je = jec+1 + elseif (sym .and. (size == 2+jec-jsc)) then ; js = 1 ; je = size+1 + else + write(mesg,'("Unrecognized size ", i6, "in call to get_simple_array_j_ind. \")') size + if (sym) then + write(mesg2,'("Valid sizes are : ", 2i7)') jed, 1+jec-jsc + else + write(mesg2,'("Valid sizes are : ", 4i7)') jed, 1+jec-jsc, 1+jed, 2+jec-jsc + endif + call MOM_error(FATAL, trim(mesg)//trim(mesg2)) + endif + +end subroutine get_simple_array_j_ind + +!> Invert the contents of a 1-d array +subroutine invert(array) + integer, dimension(:), intent(inout) :: array !< The 1-d array to invert + integer :: i, ni, swap + ni = size(array) + do i=1,ni + swap = array(i) + array(i) = array(ni+1-i) + array(ni+1-i) = swap + enddo +end subroutine invert + +!> Returns the global shape of h-point arrays +subroutine get_global_shape(domain, niglobal, njglobal) + type(MOM_domain_type), intent(in) :: domain !< MOM domain from which to extract information + integer, intent(out) :: niglobal !< i-index global size of h-point arrays + integer, intent(out) :: njglobal !< j-index global size of h-point arrays + + niglobal = domain%niglobal + njglobal = domain%njglobal +end subroutine get_global_shape + +!> Returns arrays of the i- and j- sizes of the h-point computational domains for each +!! element of the grid layout. Any input values in the extent arrays are discarded, so +!! they are effectively intent out despite their declared intent of inout. +subroutine get_layout_extents(Domain, extent_i, extent_j) + type(MOM_domain_type), intent(in) :: domain !< MOM domain from which to extract information + integer, dimension(:), allocatable, intent(inout) :: extent_i !< The number of points in the + !! i-direction in each i-row of the layout + integer, dimension(:), allocatable, intent(inout) :: extent_j !< The number of points in the + !! j-direction in each j-row of the layout + + if (allocated(extent_i)) deallocate(extent_i) + if (allocated(extent_j)) deallocate(extent_j) + allocate(extent_i(domain%layout(1))) ; extent_i(:) = 0 + allocate(extent_j(domain%layout(2))) ; extent_j(:) = 0 + call mpp_get_domain_extents(domain%mpp_domain, extent_i, extent_j) +end subroutine get_layout_extents + +end module MOM_domain_infra diff --git a/src/framework/MOM_domain_init.F90 b/src/framework/MOM_domain_init.F90 deleted file mode 100644 index 25064cf24e..0000000000 --- a/src/framework/MOM_domain_init.F90 +++ /dev/null @@ -1,330 +0,0 @@ -!> Describes the decomposed MOM domain and has routines for communications across PEs -module MOM_domain_init - -! This file is part of MOM6. See LICENSE.md for the license. - -use MOM_coms, only : num_PEs -use MOM_domains, only : MOM_domain_type, create_MOM_domain, MOM_define_layout -use MOM_domains, only : MOM_thread_affinity_set, set_MOM_thread_affinity -use MOM_error_handler, only : MOM_error, MOM_mesg, NOTE, WARNING, FATAL -use MOM_file_parser, only : get_param, log_param, log_version, param_file_type -use MOM_io, only : file_exists -use MOM_string_functions, only : slasher - -implicit none ; private - -public :: MOM_domains_init, MOM_domain_type - -contains - -!> MOM_domains_init initializes a MOM_domain_type variable, based on the information -!! read in from a param_file_type, and optionally returns data describing various' -!! properties of the domain type. -subroutine MOM_domains_init(MOM_dom, param_file, symmetric, static_memory, & - NIHALO, NJHALO, NIGLOBAL, NJGLOBAL, NIPROC, NJPROC, & - min_halo, domain_name, include_name, param_suffix) - type(MOM_domain_type), pointer :: MOM_dom !< A pointer to the MOM_domain_type - !! being defined here. - type(param_file_type), intent(in) :: param_file !< A structure to parse for - !! run-time parameters - logical, optional, intent(in) :: symmetric !< If present, this specifies - !! whether this domain is symmetric, regardless of - !! whether the macro SYMMETRIC_MEMORY_ is defined. - logical, optional, intent(in) :: static_memory !< If present and true, this - !! domain type is set up for static memory and - !! error checking of various input values is - !! performed against those in the input file. - integer, optional, intent(in) :: NIHALO !< Default halo sizes, required - !! with static memory. - integer, optional, intent(in) :: NJHALO !< Default halo sizes, required - !! with static memory. - integer, optional, intent(in) :: NIGLOBAL !< Total domain sizes, required - !! with static memory. - integer, optional, intent(in) :: NJGLOBAL !< Total domain sizes, required - !! with static memory. - integer, optional, intent(in) :: NIPROC !< Processor counts, required with - !! static memory. - integer, optional, intent(in) :: NJPROC !< Processor counts, required with - !! static memory. - integer, dimension(2), optional, intent(inout) :: min_halo !< If present, this sets the - !! minimum halo size for this domain in the i- and j- - !! directions, and returns the actual halo size used. - character(len=*), optional, intent(in) :: domain_name !< A name for this domain, "MOM" - !! if missing. - character(len=*), optional, intent(in) :: include_name !< A name for model's include file, - !! "MOM_memory.h" if missing. - character(len=*), optional, intent(in) :: param_suffix !< A suffix to apply to - !! layout-specific parameters. - - ! Local variables - integer, dimension(2) :: layout ! The number of logical processors in the i- and j- directions - integer, dimension(2) :: io_layout ! The layout of logical processors for input and output - !$ integer :: ocean_nthreads ! Number of openMP threads - !$ logical :: ocean_omp_hyper_thread ! If true use openMP hyper-threads - integer, dimension(2) :: n_global ! The number of i- and j- points in the global computational domain - integer, dimension(2) :: n_halo ! The number of i- and j- points in the halos - integer :: nihalo_dflt, njhalo_dflt ! The default halo sizes - integer :: PEs_used ! The number of processors used - logical, dimension(2) :: reentrant ! True if the x- and y- directions are periodic. - logical, dimension(2,2) :: tripolar ! A set of flag indicating whether there is tripolar - ! connectivity for any of the four logical edges of the grid. - ! Currently only tripolar_N is implemented. - logical :: is_static ! If true, static memory is being used for this domain. - logical :: is_symmetric ! True if the domain being set up will use symmetric memory. - logical :: nonblocking ! If true, nonblocking halo updates will be used. - logical :: thin_halos ! If true, If true, optional arguments may be used to specify the - ! width of the halos that are updated with each call. - logical :: mask_table_exists ! True if there is a mask table file - character(len=128) :: inputdir ! The directory in which to find the diag table - character(len=200) :: mask_table ! The file name and later the full path to the diag table - character(len=64) :: inc_nm ! The name of the memory include file - character(len=200) :: mesg ! A string to use for error messages - - integer :: nip_parsed, njp_parsed - character(len=8) :: char_xsiz, char_ysiz, char_niglobal, char_njglobal - character(len=40) :: nihalo_nm, njhalo_nm, layout_nm, io_layout_nm, masktable_nm - character(len=40) :: niproc_nm, njproc_nm - ! This include declares and sets the variable "version". -# include "version_variable.h" - character(len=40) :: mdl ! This module's name. - - PEs_used = num_PEs() - - mdl = "MOM_domains" !### Change this to "MOM_domain_init" - - is_symmetric = .true. ; if (present(symmetric)) is_symmetric = symmetric - if (present(min_halo)) mdl = trim(mdl)//" min_halo" - - inc_nm = "MOM_memory.h" ; if (present(include_name)) inc_nm = trim(include_name) - - nihalo_nm = "NIHALO" ; njhalo_nm = "NJHALO" - layout_nm = "LAYOUT" ; io_layout_nm = "IO_LAYOUT" ; masktable_nm = "MASKTABLE" - niproc_nm = "NIPROC" ; njproc_nm = "NJPROC" - if (present(param_suffix)) then ; if (len(trim(adjustl(param_suffix))) > 0) then - nihalo_nm = "NIHALO"//(trim(adjustl(param_suffix))) - njhalo_nm = "NJHALO"//(trim(adjustl(param_suffix))) - layout_nm = "LAYOUT"//(trim(adjustl(param_suffix))) - io_layout_nm = "IO_LAYOUT"//(trim(adjustl(param_suffix))) - masktable_nm = "MASKTABLE"//(trim(adjustl(param_suffix))) - niproc_nm = "NIPROC"//(trim(adjustl(param_suffix))) - njproc_nm = "NJPROC"//(trim(adjustl(param_suffix))) - endif ; endif - - is_static = .false. ; if (present(static_memory)) is_static = static_memory - if (is_static) then - if (.not.present(NIHALO)) call MOM_error(FATAL, "NIHALO must be "// & - "present in the call to MOM_domains_init with static memory.") - if (.not.present(NJHALO)) call MOM_error(FATAL, "NJHALO must be "// & - "present in the call to MOM_domains_init with static memory.") - if (.not.present(NIGLOBAL)) call MOM_error(FATAL, "NIGLOBAL must be "// & - "present in the call to MOM_domains_init with static memory.") - if (.not.present(NJGLOBAL)) call MOM_error(FATAL, "NJGLOBAL must be "// & - "present in the call to MOM_domains_init with static memory.") - if (.not.present(NIPROC)) call MOM_error(FATAL, "NIPROC must be "// & - "present in the call to MOM_domains_init with static memory.") - if (.not.present(NJPROC)) call MOM_error(FATAL, "NJPROC must be "// & - "present in the call to MOM_domains_init with static memory.") - endif - - ! Read all relevant parameters and write them to the model log. - call log_version(param_file, mdl, version, "", log_to_all=.true., layout=.true.) - call get_param(param_file, mdl, "REENTRANT_X", reentrant(1), & - "If true, the domain is zonally reentrant.", default=.true.) - call get_param(param_file, mdl, "REENTRANT_Y", reentrant(2), & - "If true, the domain is meridionally reentrant.", & - default=.false.) - tripolar(1:2,1:2) = .false. - call get_param(param_file, mdl, "TRIPOLAR_N", tripolar(2,2), & - "Use tripolar connectivity at the northern edge of the "//& - "domain. With TRIPOLAR_N, NIGLOBAL must be even.", & - default=.false.) - -# ifndef NOT_SET_AFFINITY - !$ if (.not.MOM_thread_affinity_set()) then - !$ call get_param(param_file, mdl, "OCEAN_OMP_THREADS", ocean_nthreads, & - !$ "The number of OpenMP threads that MOM6 will use.", & - !$ default = 1, layoutParam=.true.) - !$ call get_param(param_file, mdl, "OCEAN_OMP_HYPER_THREAD", ocean_omp_hyper_thread, & - !$ "If True, use hyper-threading.", default = .false., layoutParam=.true.) - !$ call set_MOM_thread_affinity(ocean_nthreads, ocean_omp_hyper_thread) - !$ endif -# endif - - call log_param(param_file, mdl, "!SYMMETRIC_MEMORY_", is_symmetric, & - "If defined, the velocity point data domain includes every face of the "//& - "thickness points. In other words, some arrays are larger than others, "//& - "depending on where they are on the staggered grid. Also, the starting "//& - "index of the velocity-point arrays is usually 0, not 1. "//& - "This can only be set at compile time.",& - layoutParam=.true.) - call get_param(param_file, mdl, "NONBLOCKING_UPDATES", nonblocking, & - "If true, non-blocking halo updates may be used.", & - default=.false., layoutParam=.true.) - !### Note the duplicated "the the" in the following description, which should be fixed as a part - ! of a larger commit that also changes other MOM_parameter_doc file messages, but for now - ! reproduces the existing output files. - call get_param(param_file, mdl, "THIN_HALO_UPDATES", thin_halos, & - "If true, optional arguments may be used to specify the the width of the "//& - "halos that are updated with each call.", & - default=.true., layoutParam=.true.) - - nihalo_dflt = 4 ; njhalo_dflt = 4 - if (present(NIHALO)) nihalo_dflt = NIHALO - if (present(NJHALO)) njhalo_dflt = NJHALO - - call log_param(param_file, mdl, "!STATIC_MEMORY_", is_static, & - "If STATIC_MEMORY_ is defined, the principle variables will have sizes that "//& - "are statically determined at compile time. Otherwise the sizes are not "//& - "determined until run time. The STATIC option is substantially faster, but "//& - "does not allow the PE count to be changed at run time. This can only be "//& - "set at compile time.", layoutParam=.true.) - - if (is_static) then - call get_param(param_file, mdl, "NIGLOBAL", n_global(1), & - "The total number of thickness grid points in the x-direction in the physical "//& - "domain. With STATIC_MEMORY_ this is set in "//trim(inc_nm)//" at compile time.", & - static_value=NIGLOBAL) - call get_param(param_file, mdl, "NJGLOBAL", n_global(2), & - "The total number of thickness grid points in the y-direction in the physical "//& - "domain. With STATIC_MEMORY_ this is set in "//trim(inc_nm)//" at compile time.", & - static_value=NJGLOBAL) - if (n_global(1) /= NIGLOBAL) call MOM_error(FATAL,"MOM_domains_init: " // & - "static mismatch for NIGLOBAL_ domain size. Header file does not match input namelist") - if (n_global(2) /= NJGLOBAL) call MOM_error(FATAL,"MOM_domains_init: " // & - "static mismatch for NJGLOBAL_ domain size. Header file does not match input namelist") - - ! Check the requirement of equal sized compute domains when STATIC_MEMORY_ is used. - if ((MOD(NIGLOBAL, NIPROC) /= 0) .OR. (MOD(NJGLOBAL, NJPROC) /= 0)) then - write( char_xsiz, '(i4)' ) NIPROC - write( char_ysiz, '(i4)' ) NJPROC - write( char_niglobal, '(i4)' ) NIGLOBAL - write( char_njglobal, '(i4)' ) NJGLOBAL - call MOM_error(WARNING, 'MOM_domains: Processor decomposition (NIPROC_,NJPROC_) = ('//& - trim(char_xsiz)//','//trim(char_ysiz)//') does not evenly divide size '//& - 'set by preprocessor macro ('//trim(char_niglobal)//','//trim(char_njglobal)//').') - call MOM_error(FATAL,'MOM_domains: #undef STATIC_MEMORY_ in '//trim(inc_nm)//' to use '//& - 'dynamic allocation, or change processor decomposition to evenly divide the domain.') - endif - else - call get_param(param_file, mdl, "NIGLOBAL", n_global(1), & - "The total number of thickness grid points in the x-direction in the physical "//& - "domain. With STATIC_MEMORY_ this is set in "//trim(inc_nm)//" at compile time.", & - fail_if_missing=.true.) - call get_param(param_file, mdl, "NJGLOBAL", n_global(2), & - "The total number of thickness grid points in the y-direction in the physical "//& - "domain. With STATIC_MEMORY_ this is set in "//trim(inc_nm)//" at compile time.", & - fail_if_missing=.true.) - endif - - call get_param(param_file, mdl, trim(nihalo_nm), n_halo(1), & - "The number of halo points on each side in the x-direction. How this is set "//& - "varies with the calling component and static or dynamic memory configuration.", & - default=nihalo_dflt, static_value=nihalo_dflt) - call get_param(param_file, mdl, trim(njhalo_nm), n_halo(2), & - "The number of halo points on each side in the y-direction. How this is set "//& - "varies with the calling component and static or dynamic memory configuration.", & - default=njhalo_dflt, static_value=njhalo_dflt) - if (present(min_halo)) then - n_halo(1) = max(n_halo(1), min_halo(1)) - min_halo(1) = n_halo(1) - n_halo(2) = max(n_halo(2), min_halo(2)) - min_halo(2) = n_halo(2) - ! These are generally used only with static memory, so they are considerd layout params. - call log_param(param_file, mdl, "!NIHALO min_halo", n_halo(1), layoutParam=.true.) - call log_param(param_file, mdl, "!NJHALO min_halo", n_halo(2), layoutParam=.true.) - endif - if (is_static .and. .not.present(min_halo)) then - if (n_halo(1) /= NIHALO) call MOM_error(FATAL,"MOM_domains_init: " // & - "static mismatch for "//trim(nihalo_nm)//" domain size") - if (n_halo(2) /= NJHALO) call MOM_error(FATAL,"MOM_domains_init: " // & - "static mismatch for "//trim(njhalo_nm)//" domain size") - endif - - call get_param(param_file, mdl, "INPUTDIR", inputdir, do_not_log=.true., default=".") - inputdir = slasher(inputdir) - - call get_param(param_file, mdl, trim(masktable_nm), mask_table, & - "A text file to specify n_mask, layout and mask_list. This feature masks out "//& - "processors that contain only land points. The first line of mask_table is the "//& - "number of regions to be masked out. The second line is the layout of the "//& - "model and must be consistent with the actual model layout. The following "//& - "(n_mask) lines give the logical positions of the processors that are masked "//& - "out. The mask_table can be created by tools like check_mask. The following "//& - "example of mask_table masks out 2 processors, (1,2) and (3,6), out of the 24 "//& - "in a 4x6 layout: \n 2\n 4,6\n 1,2\n 3,6\n", default="MOM_mask_table", & - layoutParam=.true.) - mask_table = trim(inputdir)//trim(mask_table) - mask_table_exists = file_exists(mask_table) - - if (is_static) then - layout(1) = NIPROC ; layout(2) = NJPROC - else - call get_param(param_file, mdl, trim(layout_nm), layout, & - "The processor layout to be used, or 0, 0 to automatically set the layout "//& - "based on the number of processors.", default=0, do_not_log=.true.) - call get_param(param_file, mdl, trim(niproc_nm), nip_parsed, & - "The number of processors in the x-direction.", default=-1, do_not_log=.true.) - call get_param(param_file, mdl, trim(njproc_nm), njp_parsed, & - "The number of processors in the y-direction.", default=-1, do_not_log=.true.) - if (nip_parsed > -1) then - if ((layout(1) > 0) .and. (layout(1) /= nip_parsed)) & - call MOM_error(FATAL, trim(layout_nm)//" and "//trim(niproc_nm)//" set inconsistently. "//& - "Only LAYOUT should be used.") - layout(1) = nip_parsed - call MOM_mesg(trim(niproc_nm)//" used to set "//trim(layout_nm)//" in dynamic mode. "//& - "Shift to using "//trim(layout_nm)//" instead.") - endif - if (njp_parsed > -1) then - if ((layout(2) > 0) .and. (layout(2) /= njp_parsed)) & - call MOM_error(FATAL, trim(layout_nm)//" and "//trim(njproc_nm)//" set inconsistently. "//& - "Only "//trim(layout_nm)//" should be used.") - layout(2) = njp_parsed - call MOM_mesg(trim(njproc_nm)//" used to set "//trim(layout_nm)//" in dynamic mode. "//& - "Shift to using "//trim(layout_nm)//" instead.") - endif - - if ( (layout(1) == 0) .and. (layout(2) == 0) ) & - call MOM_define_layout( (/ 1, n_global(1), 1, n_global(2) /), PEs_used, layout) - if ( (layout(1) /= 0) .and. (layout(2) == 0) ) layout(2) = PEs_used / layout(1) - if ( (layout(1) == 0) .and. (layout(2) /= 0) ) layout(1) = PEs_used / layout(2) - - if (layout(1)*layout(2) /= PEs_used .and. (.not. mask_table_exists) ) then - write(mesg,'("MOM_domains_init: The product of the two components of layout, ", & - & 2i4,", is not the number of PEs used, ",i5,".")') & - layout(1), layout(2), PEs_used - call MOM_error(FATAL, mesg) - endif - endif - call log_param(param_file, mdl, trim(niproc_nm), layout(1), & - "The number of processors in the x-direction. With STATIC_MEMORY_ this "//& - "is set in "//trim(inc_nm)//" at compile time.", layoutParam=.true.) - call log_param(param_file, mdl, trim(njproc_nm), layout(2), & - "The number of processors in the y-direction. With STATIC_MEMORY_ this "//& - "is set in "//trim(inc_nm)//" at compile time.", layoutParam=.true.) - call log_param(param_file, mdl, trim(layout_nm), layout, & - "The processor layout that was actually used.", layoutParam=.true.) - - ! Idiot check that fewer PEs than columns have been requested - if (layout(1)*layout(2) > n_global(1)*n_global(2)) then - write(mesg,'(a,2(i5,x,a))') 'You requested to use',layout(1)*layout(2), & - 'PEs but there are only', n_global(1)*n_global(2), 'columns in the model' - call MOM_error(FATAL, mesg) - endif - - if (mask_table_exists) & - call MOM_error(NOTE, 'MOM_domains_init: reading maskmap information from '//trim(mask_table)) - - ! Set up the I/O layout, it will be checked later that it uses an even multiple of the number of - ! PEs in each direction. - io_layout(:) = (/ 1, 1 /) - call get_param(param_file, mdl, trim(io_layout_nm), io_layout, & - "The processor layout to be used, or 0,0 to automatically set the io_layout "//& - "to be the same as the layout.", default=1, layoutParam=.true.) - - call create_MOM_domain(MOM_dom, n_global, n_halo, reentrant, tripolar, layout, & - io_layout=io_layout, domain_name=domain_name, mask_table=mask_table, & - symmetric=symmetric, thin_halos=thin_halos, nonblocking=nonblocking) - -end subroutine MOM_domains_init - -end module MOM_domain_init diff --git a/src/framework/MOM_domains.F90 b/src/framework/MOM_domains.F90 index 56ac0b3ccf..9ccef2888e 100644 --- a/src/framework/MOM_domains.F90 +++ b/src/framework/MOM_domains.F90 @@ -3,1181 +3,54 @@ module MOM_domains ! This file is part of MOM6. See LICENSE.md for the license. -use MOM_array_transform, only : rotate_array -use MOM_coms, only : PE_here, root_PE, num_PEs, MOM_infra_init, MOM_infra_end -use MOM_coms, only : broadcast, sum_across_PEs, min_across_PEs, max_across_PEs -use MOM_cpu_clock, only : cpu_clock_begin, cpu_clock_end -use MOM_error_handler, only : MOM_error, MOM_mesg, NOTE, WARNING, FATAL, is_root_pe -use MOM_file_parser, only : get_param, log_param, log_version -use MOM_file_parser, only : param_file_type +use MOM_coms_infra, only : MOM_infra_init, MOM_infra_end +use MOM_coms_infra, only : PE_here, root_PE, num_PEs, broadcast +use MOM_coms_infra, only : sum_across_PEs, min_across_PEs, max_across_PEs +use MOM_domain_infra, only : MOM_domain_type, domain2D, domain1D +use MOM_domain_infra, only : create_MOM_domain, clone_MOM_domain, deallocate_MOM_domain +use MOM_domain_infra, only : MOM_define_domain, MOM_define_layout +use MOM_domain_infra, only : get_domain_extent, get_domain_components +use MOM_domain_infra, only : compute_block_extent, get_global_shape +use MOM_domain_infra, only : pass_var, pass_vector, fill_symmetric_edges, global_field_sum +use MOM_domain_infra, only : pass_var_start, pass_var_complete +use MOM_domain_infra, only : pass_vector_start, pass_vector_complete +use MOM_domain_infra, only : create_group_pass, do_group_pass, group_pass_type +use MOM_domain_infra, only : start_group_pass, complete_group_pass +use MOM_domain_infra, only : global_field, redistribute_array, broadcast_domain +use MOM_domain_infra, only : AGRID, BGRID_NE, CGRID_NE, SCALAR_PAIR, BITWISE_EXACT_SUM +use MOM_domain_infra, only : CORNER, CENTER, NORTH_FACE, EAST_FACE +use MOM_domain_infra, only : To_East, To_West, To_North, To_South, To_All, Omit_Corners +use MOM_domain_infra, only : MOM_thread_affinity_set, set_MOM_thread_affinity +use MOM_error_handler, only : MOM_error, MOM_mesg, NOTE, WARNING, FATAL +use MOM_file_parser, only : get_param, log_param, log_version, param_file_type +use MOM_io_infra, only : file_exists use MOM_string_functions, only : slasher -use mpp_domains_mod, only : MOM_define_layout => mpp_define_layout, mpp_get_boundary -use mpp_domains_mod, only : MOM_define_io_domain => mpp_define_io_domain -use mpp_domains_mod, only : MOM_define_domain => mpp_define_domains -use mpp_domains_mod, only : domain2D, domain1D, mpp_get_data_domain, mpp_get_domain_components -use mpp_domains_mod, only : mpp_get_compute_domain, mpp_get_global_domain -use mpp_domains_mod, only : mpp_get_domain_extents, mpp_deallocate_domain -use mpp_domains_mod, only : global_field_sum => mpp_global_sum -use mpp_domains_mod, only : mpp_update_domains, CYCLIC_GLOBAL_DOMAIN, FOLD_NORTH_EDGE -use mpp_domains_mod, only : mpp_start_update_domains, mpp_complete_update_domains -use mpp_domains_mod, only : mpp_create_group_update, mpp_do_group_update -use mpp_domains_mod, only : group_pass_type => mpp_group_update_type -use mpp_domains_mod, only : mpp_reset_group_update_field -use mpp_domains_mod, only : mpp_group_update_initialized -use mpp_domains_mod, only : mpp_start_group_update, mpp_complete_group_update -use mpp_domains_mod, only : compute_block_extent => mpp_compute_block_extent -use mpp_domains_mod, only : AGRID, BGRID_NE, CGRID_NE, SCALAR_PAIR, BITWISE_EXACT_SUM -use mpp_domains_mod, only : To_East => WUPDATE, To_West => EUPDATE, Omit_Corners => EDGEUPDATE -use mpp_domains_mod, only : To_North => SUPDATE, To_South => NUPDATE -use mpp_domains_mod, only : CENTER, CORNER, NORTH_FACE => NORTH, EAST_FACE => EAST -use fms_io_mod, only : file_exist, parse_mask_table -use fms_affinity_mod, only : fms_affinity_init, fms_affinity_set, fms_affinity_get - implicit none ; private -public :: MOM_domains_init, MOM_infra_init, MOM_infra_end, get_domain_extent, get_domain_extent_dsamp2 -public :: create_MOM_domain, clone_MOM_domain, get_domain_components -public :: deallocate_MOM_domain, deallocate_domain_contents -public :: MOM_define_domain, MOM_define_layout, MOM_define_io_domain -public :: pass_var, pass_vector, PE_here, root_PE, num_PEs -public :: pass_var_start, pass_var_complete, fill_symmetric_edges, broadcast -public :: pass_vector_start, pass_vector_complete +public :: MOM_infra_init, MOM_infra_end +! Domain types and creation and destruction routines +public :: MOM_domain_type, domain2D, domain1D +public :: MOM_domains_init, create_MOM_domain, clone_MOM_domain, deallocate_MOM_domain +! Domain query routines +public :: get_domain_extent, get_domain_components, compute_block_extent, get_global_shape +public :: PE_here, root_PE, num_PEs +! Single call communication routines +public :: pass_var, pass_vector, fill_symmetric_edges, broadcast +! Non-blocking communication routines +public :: pass_var_start, pass_var_complete, pass_vector_start, pass_vector_complete +! Multi-variable group communication routines and type +public :: create_group_pass, do_group_pass, group_pass_type, start_group_pass, complete_group_pass +! Global reduction routines public :: global_field_sum, sum_across_PEs, min_across_PEs, max_across_PEs +public :: global_field, redistribute_array, broadcast_domain +! Coded integers for controlling communication or staggering public :: AGRID, BGRID_NE, CGRID_NE, SCALAR_PAIR, BITWISE_EXACT_SUM public :: CORNER, CENTER, NORTH_FACE, EAST_FACE public :: To_East, To_West, To_North, To_South, To_All, Omit_Corners -public :: create_group_pass, do_group_pass, group_pass_type -public :: start_group_pass, complete_group_pass -public :: compute_block_extent, get_global_shape, get_layout_extents -public :: MOM_thread_affinity_set, set_MOM_thread_affinity -public :: get_simple_array_i_ind, get_simple_array_j_ind -public :: domain2D, domain1D - -!> Do a halo update on an array -interface pass_var - module procedure pass_var_3d, pass_var_2d -end interface pass_var - -!> Do a halo update on a pair of arrays representing the two components of a vector -interface pass_vector - module procedure pass_vector_3d, pass_vector_2d -end interface pass_vector - -!> Initiate a non-blocking halo update on an array -interface pass_var_start - module procedure pass_var_start_3d, pass_var_start_2d -end interface pass_var_start - -!> Complete a non-blocking halo update on an array -interface pass_var_complete - module procedure pass_var_complete_3d, pass_var_complete_2d -end interface pass_var_complete - -!> Initiate a halo update on a pair of arrays representing the two components of a vector -interface pass_vector_start - module procedure pass_vector_start_3d, pass_vector_start_2d -end interface pass_vector_start - -!> Complete a halo update on a pair of arrays representing the two components of a vector -interface pass_vector_complete - module procedure pass_vector_complete_3d, pass_vector_complete_2d -end interface pass_vector_complete - -!> Set up a group of halo updates -interface create_group_pass - module procedure create_var_group_pass_2d - module procedure create_var_group_pass_3d - module procedure create_vector_group_pass_2d - module procedure create_vector_group_pass_3d -end interface create_group_pass - -!> Do a set of halo updates that fill in the values at the duplicated edges -!! of a staggered symmetric memory domain -interface fill_symmetric_edges - module procedure fill_vector_symmetric_edges_2d !, fill_vector_symmetric_edges_3d -! module procedure fill_scalar_symmetric_edges_2d, fill_scalar_symmetric_edges_3d -end interface fill_symmetric_edges - -!> Copy one MOM_domain_type into another -interface clone_MOM_domain - module procedure clone_MD_to_MD, clone_MD_to_d2D -end interface clone_MOM_domain - -!> Extract the 1-d domain components from a MOM_domain or domain2d -interface get_domain_components - module procedure get_domain_components_MD, get_domain_components_d2D -end interface get_domain_components - -!> The MOM_domain_type contains information about the domain decomposition. -type, public :: MOM_domain_type - type(domain2D), pointer :: mpp_domain => NULL() !< The FMS domain with halos - !! on this processor, centered at h points. - type(domain2D), pointer :: mpp_domain_d2 => NULL() !< A coarse FMS domain with halos - !! on this processor, centered at h points. - integer :: niglobal !< The total horizontal i-domain size. - integer :: njglobal !< The total horizontal j-domain size. - integer :: nihalo !< The i-halo size in memory. - integer :: njhalo !< The j-halo size in memory. - logical :: symmetric !< True if symmetric memory is used with - !! this domain. - logical :: nonblocking_updates !< If true, non-blocking halo updates are - !! allowed. The default is .false. (for now). - logical :: thin_halo_updates !< If true, optional arguments may be used to - !! specify the width of the halos that are - !! updated with each call. - integer :: layout(2) !< This domain's processor layout. This is - !! saved to enable the construction of related - !! new domains with different resolutions or - !! other properties. - integer :: io_layout(2) !< The IO-layout used with this domain. - integer :: X_FLAGS !< Flag that specifies the properties of the - !! domain in the i-direction in a define_domain call. - integer :: Y_FLAGS !< Flag that specifies the properties of the - !! domain in the j-direction in a define_domain call. - logical, pointer :: maskmap(:,:) => NULL() !< A pointer to an array indicating - !! which logical processors are actually used for - !! the ocean code. The other logical processors - !! would be contain only land points and are not - !! assigned to actual processors. This need not be - !! assigned if all logical processors are used. -end type MOM_domain_type - -integer, parameter :: To_All = To_East + To_West + To_North + To_South !< A flag for passing in all directions contains -!> pass_var_3d does a halo update for a three-dimensional array. -subroutine pass_var_3d(array, MOM_dom, sideflag, complete, position, halo, & - clock) - real, dimension(:,:,:), intent(inout) :: array !< The array which is having its halos points - !! exchanged. - type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain - !! needed to determine where data should be - !! sent. - integer, optional, intent(in) :: sideflag !< An optional integer indicating which - !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, - !! TO_NORTH, and TO_SOUTH. For example, TO_EAST sends the data to the processor to the east, - !! sothe halos on the western side are filled. TO_ALL is the default if sideflag is omitted. - logical, optional, intent(in) :: complete !< An optional argument indicating whether the - !! halo updates should be completed before - !! progress resumes. Omitting complete is the - !! same as setting complete to .true. - integer, optional, intent(in) :: position !< An optional argument indicating the position. - !! This is CENTER by default and is often CORNER, - !! but could also be EAST_FACE or NORTH_FACE. - integer, optional, intent(in) :: halo !< The size of the halo to update - the full - !! halo by default. - integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be - !! started then stopped to time this routine. - - integer :: dirflag - logical :: block_til_complete - - if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif - - dirflag = To_All ! 60 - if (present(sideflag)) then ; if (sideflag > 0) dirflag = sideflag ; endif - block_til_complete = .true. - if (present(complete)) block_til_complete = complete - - if (present(halo) .and. MOM_dom%thin_halo_updates) then - call mpp_update_domains(array, MOM_dom%mpp_domain, flags=dirflag, & - complete=block_til_complete, position=position, & - whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) - else - call mpp_update_domains(array, MOM_dom%mpp_domain, flags=dirflag, & - complete=block_til_complete, position=position) - endif - - if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif - -end subroutine pass_var_3d - -!> pass_var_2d does a halo update for a two-dimensional array. -subroutine pass_var_2d(array, MOM_dom, sideflag, complete, position, halo, inner_halo, clock) - real, dimension(:,:), intent(inout) :: array !< The array which is having its halos points - !! exchanged. - type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain - !! needed to determine where data should be sent. - integer, optional, intent(in) :: sideflag !< An optional integer indicating which - !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, - !! TO_NORTH, and TO_SOUTH. For example, TO_EAST sends the data to the processor to the east, - !! so the halos on the western side are filled. TO_ALL is the default if sideflag is omitted. - logical, optional, intent(in) :: complete !< An optional argument indicating whether the - !! halo updates should be completed before - !! progress resumes. Omitting complete is the - !! same as setting complete to .true. - integer, optional, intent(in) :: position !< An optional argument indicating the position. - !! This is CENTER by default and is often CORNER, - !! but could also be EAST_FACE or NORTH_FACE. - integer, optional, intent(in) :: halo !< The size of the halo to update - the full halo - !! by default. - integer, optional, intent(in) :: inner_halo !< The size of an inner halo to avoid updating, - !! or 0 to avoid updating symmetric memory - !! computational domain points. Setting this >=0 - !! also enforces that complete=.true. - integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be - !! started then stopped to time this routine. - - ! Local variables - real, allocatable, dimension(:,:) :: tmp - integer :: pos, i_halo, j_halo - integer :: isc, iec, jsc, jec, isd, ied, jsd, jed, IscB, IecB, JscB, JecB - integer :: inner, i, j, isfw, iefw, isfe, iefe, jsfs, jefs, jsfn, jefn - integer :: dirflag - logical :: block_til_complete - - if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif - - dirflag = To_All ! 60 - if (present(sideflag)) then ; if (sideflag > 0) dirflag = sideflag ; endif - block_til_complete = .true. ; if (present(complete)) block_til_complete = complete - pos = CENTER ; if (present(position)) pos = position - - if (present(inner_halo)) then ; if (inner_halo >= 0) then - ! Store the original values. - allocate(tmp(size(array,1), size(array,2))) - tmp(:,:) = array(:,:) - block_til_complete = .true. - endif ; endif - - if (present(halo) .and. MOM_dom%thin_halo_updates) then - call mpp_update_domains(array, MOM_dom%mpp_domain, flags=dirflag, & - complete=block_til_complete, position=position, & - whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) - else - call mpp_update_domains(array, MOM_dom%mpp_domain, flags=dirflag, & - complete=block_til_complete, position=position) - endif - - if (present(inner_halo)) then ; if (inner_halo >= 0) then - call mpp_get_compute_domain(MOM_dom%mpp_domain, isc, iec, jsc, jec) - call mpp_get_data_domain(MOM_dom%mpp_domain, isd, ied, jsd, jed) - ! Convert to local indices for arrays starting at 1. - isc = isc - (isd-1) ; iec = iec - (isd-1) ; ied = ied - (isd-1) ; isd = 1 - jsc = jsc - (jsd-1) ; jec = jec - (jsd-1) ; jed = jed - (jsd-1) ; jsd = 1 - i_halo = min(inner_halo, isc-1) ; j_halo = min(inner_halo, jsc-1) - - ! Figure out the array index extents of the eastern, western, northern and southern regions to copy. - if (pos == CENTER) then - if (size(array,1) == ied) then - isfw = isc - i_halo ; iefw = isc ; isfe = iec ; iefe = iec + i_halo - else ; call MOM_error(FATAL, "pass_var_2d: wrong i-size for CENTER array.") ; endif - if (size(array,2) == jed) then - isfw = isc - i_halo ; iefw = isc ; isfe = iec ; iefe = iec + i_halo - else ; call MOM_error(FATAL, "pass_var_2d: wrong j-size for CENTER array.") ; endif - elseif (pos == CORNER) then - if (size(array,1) == ied) then - isfw = max(isc - (i_halo+1), 1) ; iefw = isc ; isfe = iec ; iefe = iec + i_halo - elseif (size(array,1) == ied+1) then - isfw = isc - i_halo ; iefw = isc+1 ; isfe = iec+1 ; iefe = min(iec + 1 + i_halo, ied+1) - else ; call MOM_error(FATAL, "pass_var_2d: wrong i-size for CORNER array.") ; endif - if (size(array,2) == jed) then - jsfs = max(jsc - (j_halo+1), 1) ; jefs = jsc ; jsfn = jec ; jefn = jec + j_halo - elseif (size(array,2) == jed+1) then - jsfs = jsc - j_halo ; jefs = jsc+1 ; jsfn = jec+1 ; jefn = min(jec + 1 + j_halo, jed+1) - else ; call MOM_error(FATAL, "pass_var_2d: wrong j-size for CORNER array.") ; endif - elseif (pos == NORTH_FACE) then - if (size(array,1) == ied) then - isfw = isc - i_halo ; iefw = isc ; isfe = iec ; iefe = iec + i_halo - else ; call MOM_error(FATAL, "pass_var_2d: wrong i-size for NORTH_FACE array.") ; endif - if (size(array,2) == jed) then - jsfs = max(jsc - (j_halo+1), 1) ; jefs = jsc ; jsfn = jec ; jefn = jec + j_halo - elseif (size(array,2) == jed+1) then - jsfs = jsc - j_halo ; jefs = jsc+1 ; jsfn = jec+1 ; jefn = min(jec + 1 + j_halo, jed+1) - else ; call MOM_error(FATAL, "pass_var_2d: wrong j-size for NORTH_FACE array.") ; endif - elseif (pos == EAST_FACE) then - if (size(array,1) == ied) then - isfw = max(isc - (i_halo+1), 1) ; iefw = isc ; isfe = iec ; iefe = iec + i_halo - elseif (size(array,1) == ied+1) then - isfw = isc - i_halo ; iefw = isc+1 ; isfe = iec+1 ; iefe = min(iec + 1 + i_halo, ied+1) - else ; call MOM_error(FATAL, "pass_var_2d: wrong i-size for EAST_FACE array.") ; endif - if (size(array,2) == jed) then - isfw = isc - i_halo ; iefw = isc ; isfe = iec ; iefe = iec + i_halo - else ; call MOM_error(FATAL, "pass_var_2d: wrong j-size for EAST_FACE array.") ; endif - else - call MOM_error(FATAL, "pass_var_2d: Unrecognized position") - endif - - ! Copy back the stored inner halo points - do j=jsfs,jefn ; do i=isfw,iefw ; array(i,j) = tmp(i,j) ; enddo ; enddo - do j=jsfs,jefn ; do i=isfe,iefe ; array(i,j) = tmp(i,j) ; enddo ; enddo - do j=jsfs,jefs ; do i=isfw,iefe ; array(i,j) = tmp(i,j) ; enddo ; enddo - do j=jsfn,jefn ; do i=isfw,iefe ; array(i,j) = tmp(i,j) ; enddo ; enddo - - deallocate(tmp) - endif ; endif - - if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif - -end subroutine pass_var_2d - -!> pass_var_start_2d starts a halo update for a two-dimensional array. -function pass_var_start_2d(array, MOM_dom, sideflag, position, complete, halo, & - clock) - real, dimension(:,:), intent(inout) :: array !< The array which is having its halos points - !! exchanged. - type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain - !! needed to determine where data should be - !! sent. - integer, optional, intent(in) :: sideflag !< An optional integer indicating which - !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, - !! TO_NORTH, and TO_SOUTH. For example, TO_EAST sends the data to the processor to the east, - !! so the halos on the western side are filled. TO_ALL is the default if sideflag is omitted. - integer, optional, intent(in) :: position !< An optional argument indicating the position. - !! This is CENTER by default and is often CORNER, - !! but could also be EAST_FACE or NORTH_FACE. - logical, optional, intent(in) :: complete !< An optional argument indicating whether the - !! halo updates should be completed before - !! progress resumes. Omitting complete is the - !! same as setting complete to .true. - integer, optional, intent(in) :: halo !< The size of the halo to update - the full - !! halo by default. - integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be - !! started then stopped to time this routine. - integer :: pass_var_start_2d !0) call cpu_clock_begin(clock) ; endif - - dirflag = To_All ! 60 - if (present(sideflag)) then ; if (sideflag > 0) dirflag = sideflag ; endif - - if (present(halo) .and. MOM_dom%thin_halo_updates) then - pass_var_start_2d = mpp_start_update_domains(array, MOM_dom%mpp_domain, & - flags=dirflag, position=position, & - whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) - else - pass_var_start_2d = mpp_start_update_domains(array, MOM_dom%mpp_domain, & - flags=dirflag, position=position) - endif - - if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif - -end function pass_var_start_2d - -!> pass_var_start_3d starts a halo update for a three-dimensional array. -function pass_var_start_3d(array, MOM_dom, sideflag, position, complete, halo, & - clock) - real, dimension(:,:,:), intent(inout) :: array !< The array which is having its halos points - !! exchanged. - type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain - !! needed to determine where data should be - !! sent. - integer, optional, intent(in) :: sideflag !< An optional integer indicating which - !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, - !! TO_NORTH, and TO_SOUTH. For example, TO_EAST sends the data to the processor to the east, - !! so the halos on the western side are filled. TO_ALL is the default if sideflag is omitted. - integer, optional, intent(in) :: position !< An optional argument indicating the position. - !! This is CENTER by default and is often CORNER, - !! but could also be EAST_FACE or NORTH_FACE. - logical, optional, intent(in) :: complete !< An optional argument indicating whether the - !! halo updates should be completed before - !! progress resumes. Omitting complete is the - !! same as setting complete to .true. - integer, optional, intent(in) :: halo !< The size of the halo to update - the full - !! halo by default. - integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be - !! started then stopped to time this routine. - integer :: pass_var_start_3d !< The integer index for this update. - - integer :: dirflag - - if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif - - dirflag = To_All ! 60 - if (present(sideflag)) then ; if (sideflag > 0) dirflag = sideflag ; endif - - if (present(halo) .and. MOM_dom%thin_halo_updates) then - pass_var_start_3d = mpp_start_update_domains(array, MOM_dom%mpp_domain, & - flags=dirflag, position=position, & - whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) - else - pass_var_start_3d = mpp_start_update_domains(array, MOM_dom%mpp_domain, & - flags=dirflag, position=position) - endif - - if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif - -end function pass_var_start_3d - -!> pass_var_complete_2d completes a halo update for a two-dimensional array. -subroutine pass_var_complete_2d(id_update, array, MOM_dom, sideflag, position, halo, & - clock) - integer, intent(in) :: id_update !< The integer id of this update which has - !! been returned from a previous call to - !! pass_var_start. - real, dimension(:,:), intent(inout) :: array !< The array which is having its halos points - !! exchanged. - type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain - !! needed to determine where data should be - !! sent. - integer, optional, intent(in) :: sideflag !< An optional integer indicating which - !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, - !! TO_NORTH, and TO_SOUTH. For example, TO_EAST sends the data to the processor to the east, - !! so the halos on the western side are filled. TO_ALL is the default if sideflag is omitted. - integer, optional, intent(in) :: position !< An optional argument indicating the position. - !! This is CENTER by default and is often CORNER, - !! but could also be EAST_FACE or NORTH_FACE. - integer, optional, intent(in) :: halo !< The size of the halo to update - the full - !! halo by default. - integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be - !! started then stopped to time this routine. - - integer :: dirflag - - if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif - - dirflag = To_All ! 60 - if (present(sideflag)) then ; if (sideflag > 0) dirflag = sideflag ; endif - - if (present(halo) .and. MOM_dom%thin_halo_updates) then - call mpp_complete_update_domains(id_update, array, MOM_dom%mpp_domain, & - flags=dirflag, position=position, & - whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) - else - call mpp_complete_update_domains(id_update, array, MOM_dom%mpp_domain, & - flags=dirflag, position=position) - endif - - if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif - -end subroutine pass_var_complete_2d - -!> pass_var_complete_3d completes a halo update for a three-dimensional array. -subroutine pass_var_complete_3d(id_update, array, MOM_dom, sideflag, position, halo, & - clock) - integer, intent(in) :: id_update !< The integer id of this update which has - !! been returned from a previous call to - !! pass_var_start. - real, dimension(:,:,:), intent(inout) :: array !< The array which is having its halos points - !! exchanged. - type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain - !! needed to determine where data should be - !! sent. - integer, optional, intent(in) :: sideflag !< An optional integer indicating which - !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, - !! TO_NORTH, and TO_SOUTH. For example, TO_EAST sends the data to the processor to the east, - !! so the halos on the western side are filled. TO_ALL is the default if sideflag is omitted. - integer, optional, intent(in) :: position !< An optional argument indicating the position. - !! This is CENTER by default and is often CORNER, - !! but could also be EAST_FACE or NORTH_FACE. - integer, optional, intent(in) :: halo !< The size of the halo to update - the full - !! halo by default. - integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be - !! started then stopped to time this routine. - - integer :: dirflag - - if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif - - dirflag = To_All ! 60 - if (present(sideflag)) then ; if (sideflag > 0) dirflag = sideflag ; endif - - if (present(halo) .and. MOM_dom%thin_halo_updates) then - call mpp_complete_update_domains(id_update, array, MOM_dom%mpp_domain, & - flags=dirflag, position=position, & - whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) - else - call mpp_complete_update_domains(id_update, array, MOM_dom%mpp_domain, & - flags=dirflag, position=position) - endif - - if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif - -end subroutine pass_var_complete_3d - -!> pass_vector_2d does a halo update for a pair of two-dimensional arrays -!! representing the compontents of a two-dimensional horizontal vector. -subroutine pass_vector_2d(u_cmpt, v_cmpt, MOM_dom, direction, stagger, complete, halo, & - clock) - real, dimension(:,:), intent(inout) :: u_cmpt !< The nominal zonal (u) component of the vector - !! pair which is having its halos points - !! exchanged. - real, dimension(:,:), intent(inout) :: v_cmpt !< The nominal meridional (v) component of the - !! vector pair which is having its halos points - !! exchanged. - type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain - !! needed to determine where data should be - !! sent. - integer, optional, intent(in) :: direction !< An optional integer indicating which - !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, - !! TO_NORTH, and TO_SOUTH, possibly plus SCALAR_PAIR if these are paired non-directional - !! scalars discretized at the typical vector component locations. For example, TO_EAST sends - !! the data to the processor to the east, so the halos on the western side are filled. TO_ALL - !! is the default if omitted. - integer, optional, intent(in) :: stagger !< An optional flag, which may be one of A_GRID, - !! BGRID_NE, or CGRID_NE, indicating where the two components of the vector are - !! discretized. Omitting stagger is the same as setting it to CGRID_NE. - logical, optional, intent(in) :: complete !< An optional argument indicating whether the - !! halo updates should be completed before progress resumes. - !! Omitting complete is the same as setting complete to .true. - integer, optional, intent(in) :: halo !< The size of the halo to update - the full - !! halo by default. - integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be - !! started then stopped to time this routine. - - ! Local variables - integer :: stagger_local - integer :: dirflag - logical :: block_til_complete - - if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif - - stagger_local = CGRID_NE ! Default value for type of grid - if (present(stagger)) stagger_local = stagger - - dirflag = To_All ! 60 - if (present(direction)) then ; if (direction > 0) dirflag = direction ; endif - block_til_complete = .true. - if (present(complete)) block_til_complete = complete - - if (present(halo) .and. MOM_dom%thin_halo_updates) then - call mpp_update_domains(u_cmpt, v_cmpt, MOM_dom%mpp_domain, flags=dirflag, & - gridtype=stagger_local, complete = block_til_complete, & - whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) - else - call mpp_update_domains(u_cmpt, v_cmpt, MOM_dom%mpp_domain, flags=dirflag, & - gridtype=stagger_local, complete = block_til_complete) - endif - - if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif - -end subroutine pass_vector_2d - -!> fill_vector_symmetric_edges_2d does an usual set of halo updates that only -!! fill in the values at the edge of a pair of symmetric memory two-dimensional -!! arrays representing the compontents of a two-dimensional horizontal vector. -!! If symmetric memory is not being used, this subroutine does nothing except to -!! possibly turn optional cpu clocks on or off. -subroutine fill_vector_symmetric_edges_2d(u_cmpt, v_cmpt, MOM_dom, stagger, scalar, & - clock) - real, dimension(:,:), intent(inout) :: u_cmpt !< The nominal zonal (u) component of the vector - !! pair which is having its halos points - !! exchanged. - real, dimension(:,:), intent(inout) :: v_cmpt !< The nominal meridional (v) component of the - !! vector pair which is having its halos points - !! exchanged. - type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain - !! needed to determine where data should be - !! sent. - integer, optional, intent(in) :: stagger !< An optional flag, which may be one of A_GRID, - !! BGRID_NE, or CGRID_NE, indicating where the two components of the vector are - !! discretized. Omitting stagger is the same as setting it to CGRID_NE. - logical, optional, intent(in) :: scalar !< An optional argument indicating whether. - integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be - !! started then stopped to time this routine. - - ! Local variables - integer :: stagger_local - integer :: dirflag - integer :: i, j, isc, iec, jsc, jec, isd, ied, jsd, jed, IscB, IecB, JscB, JecB - real, allocatable, dimension(:) :: sbuff_x, sbuff_y, wbuff_x, wbuff_y - logical :: block_til_complete - - if (.not. MOM_dom%symmetric) then - return - endif - - if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif - - stagger_local = CGRID_NE ! Default value for type of grid - if (present(stagger)) stagger_local = stagger - - if (.not.(stagger_local == CGRID_NE .or. stagger_local == BGRID_NE)) return - - call mpp_get_compute_domain(MOM_dom%mpp_domain, isc, iec, jsc, jec) - call mpp_get_data_domain(MOM_dom%mpp_domain, isd, ied, jsd, jed) - - ! Adjust isc, etc., to account for the fact that the input arrays indices all - ! start at 1 (and are effectively on a SW grid!). - isc = isc - (isd-1) ; iec = iec - (isd-1) - jsc = jsc - (jsd-1) ; jec = jec - (jsd-1) - IscB = isc ; IecB = iec+1 ; JscB = jsc ; JecB = jec+1 - - dirflag = To_All ! 60 - if (present(scalar)) then ; if (scalar) dirflag = To_All+SCALAR_PAIR ; endif - - if (stagger_local == CGRID_NE) then - allocate(wbuff_x(jsc:jec)) ; allocate(sbuff_y(isc:iec)) - wbuff_x(:) = 0.0 ; sbuff_y(:) = 0.0 - call mpp_get_boundary(u_cmpt, v_cmpt, MOM_dom%mpp_domain, flags=dirflag, & - wbufferx=wbuff_x, sbuffery=sbuff_y, & - gridtype=CGRID_NE) - do i=isc,iec - v_cmpt(i,JscB) = sbuff_y(i) - enddo - do j=jsc,jec - u_cmpt(IscB,j) = wbuff_x(j) - enddo - deallocate(wbuff_x) ; deallocate(sbuff_y) - elseif (stagger_local == BGRID_NE) then - allocate(wbuff_x(JscB:JecB)) ; allocate(sbuff_x(IscB:IecB)) - allocate(wbuff_y(JscB:JecB)) ; allocate(sbuff_y(IscB:IecB)) - wbuff_x(:) = 0.0 ; wbuff_y(:) = 0.0 ; sbuff_x(:) = 0.0 ; sbuff_y(:) = 0.0 - call mpp_get_boundary(u_cmpt, v_cmpt, MOM_dom%mpp_domain, flags=dirflag, & - wbufferx=wbuff_x, sbufferx=sbuff_x, & - wbuffery=wbuff_y, sbuffery=sbuff_y, & - gridtype=BGRID_NE) - do I=IscB,IecB - u_cmpt(I,JscB) = sbuff_x(I) ; v_cmpt(I,JscB) = sbuff_y(I) - enddo - do J=JscB,JecB - u_cmpt(IscB,J) = wbuff_x(J) ; v_cmpt(IscB,J) = wbuff_y(J) - enddo - deallocate(wbuff_x) ; deallocate(sbuff_x) - deallocate(wbuff_y) ; deallocate(sbuff_y) - endif - - if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif - -end subroutine fill_vector_symmetric_edges_2d - -!> pass_vector_3d does a halo update for a pair of three-dimensional arrays -!! representing the compontents of a three-dimensional horizontal vector. -subroutine pass_vector_3d(u_cmpt, v_cmpt, MOM_dom, direction, stagger, complete, halo, & - clock) - real, dimension(:,:,:), intent(inout) :: u_cmpt !< The nominal zonal (u) component of the vector - !! pair which is having its halos points - !! exchanged. - real, dimension(:,:,:), intent(inout) :: v_cmpt !< The nominal meridional (v) component of the - !! vector pair which is having its halos points - !! exchanged. - type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain - !! needed to determine where data should be - !! sent. - integer, optional, intent(in) :: direction !< An optional integer indicating which - !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, - !! TO_NORTH, and TO_SOUTH, possibly plus SCALAR_PAIR if these are paired non-directional - !! scalars discretized at the typical vector component locations. For example, TO_EAST sends - !! the data to the processor to the east, so the halos on the western side are filled. TO_ALL - !! is the default if omitted. - integer, optional, intent(in) :: stagger !< An optional flag, which may be one of A_GRID, - !! BGRID_NE, or CGRID_NE, indicating where the two components of the vector are - !! discretized. Omitting stagger is the same as setting it to CGRID_NE. - logical, optional, intent(in) :: complete !< An optional argument indicating whether the - !! halo updates should be completed before progress resumes. - !! Omitting complete is the same as setting complete to .true. - integer, optional, intent(in) :: halo !< The size of the halo to update - the full - !! halo by default. - integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be - !! started then stopped to time this routine. - - ! Local variables - integer :: stagger_local - integer :: dirflag - logical :: block_til_complete - - if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif - - stagger_local = CGRID_NE ! Default value for type of grid - if (present(stagger)) stagger_local = stagger - - dirflag = To_All ! 60 - if (present(direction)) then ; if (direction > 0) dirflag = direction ; endif - block_til_complete = .true. - if (present(complete)) block_til_complete = complete - - if (present(halo) .and. MOM_dom%thin_halo_updates) then - call mpp_update_domains(u_cmpt, v_cmpt, MOM_dom%mpp_domain, flags=dirflag, & - gridtype=stagger_local, complete = block_til_complete, & - whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) - else - call mpp_update_domains(u_cmpt, v_cmpt, MOM_dom%mpp_domain, flags=dirflag, & - gridtype=stagger_local, complete = block_til_complete) - endif - - if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif - -end subroutine pass_vector_3d - -!> pass_vector_start_2d starts a halo update for a pair of two-dimensional arrays -!! representing the compontents of a two-dimensional horizontal vector. -function pass_vector_start_2d(u_cmpt, v_cmpt, MOM_dom, direction, stagger, complete, halo, & - clock) - real, dimension(:,:), intent(inout) :: u_cmpt !< The nominal zonal (u) component of the vector - !! pair which is having its halos points - !! exchanged. - real, dimension(:,:), intent(inout) :: v_cmpt !< The nominal meridional (v) component of the - !! vector pair which is having its halos points - !! exchanged. - type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain - !! needed to determine where data should be - !! sent. - integer, optional, intent(in) :: direction !< An optional integer indicating which - !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, - !! TO_NORTH, and TO_SOUTH, possibly plus SCALAR_PAIR if these are paired non-directional - !! scalars discretized at the typical vector component locations. For example, TO_EAST sends - !! the data to the processor to the east, so the halos on the western side are filled. TO_ALL - !! is the default if omitted. - integer, optional, intent(in) :: stagger !< An optional flag, which may be one of A_GRID, - !! BGRID_NE, or CGRID_NE, indicating where the two components of the vector are - !! discretized. Omitting stagger is the same as setting it to CGRID_NE. - logical, optional, intent(in) :: complete !< An optional argument indicating whether the - !! halo updates should be completed before progress resumes. - !! Omitting complete is the same as setting complete to .true. - integer, optional, intent(in) :: halo !< The size of the halo to update - the full - !! halo by default. - integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be - !! started then stopped to time this routine. - integer :: pass_vector_start_2d !< The integer index for this - !! update. - - ! Local variables - integer :: stagger_local - integer :: dirflag - - if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif - - stagger_local = CGRID_NE ! Default value for type of grid - if (present(stagger)) stagger_local = stagger - - dirflag = To_All ! 60 - if (present(direction)) then ; if (direction > 0) dirflag = direction ; endif - - if (present(halo) .and. MOM_dom%thin_halo_updates) then - pass_vector_start_2d = mpp_start_update_domains(u_cmpt, v_cmpt, & - MOM_dom%mpp_domain, flags=dirflag, gridtype=stagger_local, & - whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) - else - pass_vector_start_2d = mpp_start_update_domains(u_cmpt, v_cmpt, & - MOM_dom%mpp_domain, flags=dirflag, gridtype=stagger_local) - endif - - if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif - -end function pass_vector_start_2d - -!> pass_vector_start_3d starts a halo update for a pair of three-dimensional arrays -!! representing the compontents of a three-dimensional horizontal vector. -function pass_vector_start_3d(u_cmpt, v_cmpt, MOM_dom, direction, stagger, complete, halo, & - clock) - real, dimension(:,:,:), intent(inout) :: u_cmpt !< The nominal zonal (u) component of the vector - !! pair which is having its halos points - !! exchanged. - real, dimension(:,:,:), intent(inout) :: v_cmpt !< The nominal meridional (v) component of the - !! vector pair which is having its halos points - !! exchanged. - type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain - !! needed to determine where data should be - !! sent. - integer, optional, intent(in) :: direction !< An optional integer indicating which - !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, - !! TO_NORTH, and TO_SOUTH, possibly plus SCALAR_PAIR if these are paired non-directional - !! scalars discretized at the typical vector component locations. For example, TO_EAST sends - !! the data to the processor to the east, so the halos on the western side are filled. TO_ALL - !! is the default if omitted. - integer, optional, intent(in) :: stagger !< An optional flag, which may be one of A_GRID, - !! BGRID_NE, or CGRID_NE, indicating where the two components of the vector are - !! discretized. Omitting stagger is the same as setting it to CGRID_NE. - logical, optional, intent(in) :: complete !< An optional argument indicating whether the - !! halo updates should be completed before progress resumes. - !! Omitting complete is the same as setting complete to .true. - integer, optional, intent(in) :: halo !< The size of the halo to update - the full - !! halo by default. - integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be - !! started then stopped to time this routine. - integer :: pass_vector_start_3d !< The integer index for this - !! update. - ! Local variables - integer :: stagger_local - integer :: dirflag - - if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif - - stagger_local = CGRID_NE ! Default value for type of grid - if (present(stagger)) stagger_local = stagger - - dirflag = To_All ! 60 - if (present(direction)) then ; if (direction > 0) dirflag = direction ; endif - - if (present(halo) .and. MOM_dom%thin_halo_updates) then - pass_vector_start_3d = mpp_start_update_domains(u_cmpt, v_cmpt, & - MOM_dom%mpp_domain, flags=dirflag, gridtype=stagger_local, & - whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) - else - pass_vector_start_3d = mpp_start_update_domains(u_cmpt, v_cmpt, & - MOM_dom%mpp_domain, flags=dirflag, gridtype=stagger_local) - endif - - if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif - -end function pass_vector_start_3d - -!> pass_vector_complete_2d completes a halo update for a pair of two-dimensional arrays -!! representing the compontents of a two-dimensional horizontal vector. -subroutine pass_vector_complete_2d(id_update, u_cmpt, v_cmpt, MOM_dom, direction, stagger, halo, & - clock) - integer, intent(in) :: id_update !< The integer id of this update which has been - !! returned from a previous call to - !! pass_var_start. - real, dimension(:,:), intent(inout) :: u_cmpt !< The nominal zonal (u) component of the vector - !! pair which is having its halos points - !! exchanged. - real, dimension(:,:), intent(inout) :: v_cmpt !< The nominal meridional (v) component of the - !! vector pair which is having its halos points - !! exchanged. - type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain - !! needed to determine where data should be - !! sent. - integer, optional, intent(in) :: direction !< An optional integer indicating which - !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, - !! TO_NORTH, and TO_SOUTH, possibly plus SCALAR_PAIR if these are paired non-directional - !! scalars discretized at the typical vector component locations. For example, TO_EAST sends - !! the data to the processor to the east, so the halos on the western side are filled. TO_ALL - !! is the default if omitted. - integer, optional, intent(in) :: stagger !< An optional flag, which may be one of A_GRID, - !! BGRID_NE, or CGRID_NE, indicating where the two components of the vector are - !! discretized. Omitting stagger is the same as setting it to CGRID_NE. - integer, optional, intent(in) :: halo !< The size of the halo to update - the full - !! halo by default. - integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be - !! started then stopped to time this routine. - ! Local variables - integer :: stagger_local - integer :: dirflag - - if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif - - stagger_local = CGRID_NE ! Default value for type of grid - if (present(stagger)) stagger_local = stagger - - dirflag = To_All ! 60 - if (present(direction)) then ; if (direction > 0) dirflag = direction ; endif - - if (present(halo) .and. MOM_dom%thin_halo_updates) then - call mpp_complete_update_domains(id_update, u_cmpt, v_cmpt, & - MOM_dom%mpp_domain, flags=dirflag, gridtype=stagger_local, & - whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) - else - call mpp_complete_update_domains(id_update, u_cmpt, v_cmpt, & - MOM_dom%mpp_domain, flags=dirflag, gridtype=stagger_local) - endif - - if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif - -end subroutine pass_vector_complete_2d - -!> pass_vector_complete_3d completes a halo update for a pair of three-dimensional -!! arrays representing the compontents of a three-dimensional horizontal vector. -subroutine pass_vector_complete_3d(id_update, u_cmpt, v_cmpt, MOM_dom, direction, stagger, halo, & - clock) - integer, intent(in) :: id_update !< The integer id of this update which has been - !! returned from a previous call to - !! pass_var_start. - real, dimension(:,:,:), intent(inout) :: u_cmpt !< The nominal zonal (u) component of the vector - !! pair which is having its halos points - !! exchanged. - real, dimension(:,:,:), intent(inout) :: v_cmpt !< The nominal meridional (v) component of the - !! vector pair which is having its halos points - !! exchanged. - type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain - !! needed to determine where data should be - !! sent. - integer, optional, intent(in) :: direction !< An optional integer indicating which - !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, - !! TO_NORTH, and TO_SOUTH, possibly plus SCALAR_PAIR if these are paired non-directional - !! scalars discretized at the typical vector component locations. For example, TO_EAST sends - !! the data to the processor to the east, so the halos on the western side are filled. TO_ALL - !! is the default if omitted. - integer, optional, intent(in) :: stagger !< An optional flag, which may be one of A_GRID, - !! BGRID_NE, or CGRID_NE, indicating where the two components of the vector are - !! discretized. Omitting stagger is the same as setting it to CGRID_NE. - integer, optional, intent(in) :: halo !< The size of the halo to update - the full - !! halo by default. - integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be - !! started then stopped to time this routine. - ! Local variables - integer :: stagger_local - integer :: dirflag - - if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif - - stagger_local = CGRID_NE ! Default value for type of grid - if (present(stagger)) stagger_local = stagger - - dirflag = To_All ! 60 - if (present(direction)) then ; if (direction > 0) dirflag = direction ; endif - - if (present(halo) .and. MOM_dom%thin_halo_updates) then - call mpp_complete_update_domains(id_update, u_cmpt, v_cmpt, & - MOM_dom%mpp_domain, flags=dirflag, gridtype=stagger_local, & - whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) - else - call mpp_complete_update_domains(id_update, u_cmpt, v_cmpt, & - MOM_dom%mpp_domain, flags=dirflag, gridtype=stagger_local) - endif - - if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif - -end subroutine pass_vector_complete_3d - -!> create_var_group_pass_2d sets up a group of two-dimensional array halo updates. -subroutine create_var_group_pass_2d(group, array, MOM_dom, sideflag, position, & - halo, clock) - type(group_pass_type), intent(inout) :: group !< The data type that store information for - !! group update. This data will be used in - !! do_group_pass. - real, dimension(:,:), intent(inout) :: array !< The array which is having its halos points - !! exchanged. - type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain - !! needed to determine where data should be - !! sent. - integer, optional, intent(in) :: sideflag !< An optional integer indicating which - !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, - !! TO_NORTH, and TO_SOUTH. For example, TO_EAST sends the data to the processor to the east, - !! so the halos on the western side are filled. TO_ALL is the default if sideflag is omitted. - integer, optional, intent(in) :: position !< An optional argument indicating the position. - !! This is CENTER by default and is often CORNER, - !! but could also be EAST_FACE or NORTH_FACE. - integer, optional, intent(in) :: halo !< The size of the halo to update - the full - !! halo by default. - integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be - !! started then stopped to time this routine. - ! Local variables - integer :: dirflag - - if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif - - dirflag = To_All ! 60 - if (present(sideflag)) then ; if (sideflag > 0) dirflag = sideflag ; endif - - if (mpp_group_update_initialized(group)) then - call mpp_reset_group_update_field(group,array) - elseif (present(halo) .and. MOM_dom%thin_halo_updates) then - call mpp_create_group_update(group, array, MOM_dom%mpp_domain, flags=dirflag, & - position=position, whalo=halo, ehalo=halo, & - shalo=halo, nhalo=halo) - else - call mpp_create_group_update(group, array, MOM_dom%mpp_domain, flags=dirflag, & - position=position) - endif - - if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif - -end subroutine create_var_group_pass_2d - -!> create_var_group_pass_3d sets up a group of three-dimensional array halo updates. -subroutine create_var_group_pass_3d(group, array, MOM_dom, sideflag, position, halo, & - clock) - type(group_pass_type), intent(inout) :: group !< The data type that store information for - !! group update. This data will be used in - !! do_group_pass. - real, dimension(:,:,:), intent(inout) :: array !< The array which is having its halos points - !! exchanged. - type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain - !! needed to determine where data should be - !! sent. - integer, optional, intent(in) :: sideflag !< An optional integer indicating which - !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, - !! TO_NORTH, and TO_SOUTH. For example, TO_EAST sends the data to the processor to the east, - !! so the halos on the western side are filled. TO_ALL is the default if sideflag is omitted. - integer, optional, intent(in) :: position !< An optional argument indicating the position. - !! This is CENTER by default and is often CORNER, - !! but could also be EAST_FACE or NORTH_FACE. - integer, optional, intent(in) :: halo !< The size of the halo to update - the full - !! halo by default. - integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be - !! started then stopped to time this routine. - ! Local variables - integer :: dirflag - - if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif - - dirflag = To_All ! 60 - if (present(sideflag)) then ; if (sideflag > 0) dirflag = sideflag ; endif - - if (mpp_group_update_initialized(group)) then - call mpp_reset_group_update_field(group,array) - elseif (present(halo) .and. MOM_dom%thin_halo_updates) then - call mpp_create_group_update(group, array, MOM_dom%mpp_domain, flags=dirflag, & - position=position, whalo=halo, ehalo=halo, & - shalo=halo, nhalo=halo) - else - call mpp_create_group_update(group, array, MOM_dom%mpp_domain, flags=dirflag, & - position=position) - endif - - if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif - -end subroutine create_var_group_pass_3d - -!> create_vector_group_pass_2d sets up a group of two-dimensional vector halo updates. -subroutine create_vector_group_pass_2d(group, u_cmpt, v_cmpt, MOM_dom, direction, stagger, halo, & - clock) - type(group_pass_type), intent(inout) :: group !< The data type that store information for - !! group update. This data will be used in - !! do_group_pass. - real, dimension(:,:), intent(inout) :: u_cmpt !< The nominal zonal (u) component of the vector - !! pair which is having its halos points - !! exchanged. - real, dimension(:,:), intent(inout) :: v_cmpt !< The nominal meridional (v) component of the - !! vector pair which is having its halos points - !! exchanged. - - type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain - !! needed to determine where data should be - !! sent - integer, optional, intent(in) :: direction !< An optional integer indicating which - !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, - !! TO_NORTH, and TO_SOUTH, possibly plus SCALAR_PAIR if these are paired non-directional - !! scalars discretized at the typical vector component locations. For example, TO_EAST sends - !! the data to the processor to the east, so the halos on the western side are filled. TO_ALL - !! is the default if omitted. - integer, optional, intent(in) :: stagger !< An optional flag, which may be one of A_GRID, - !! BGRID_NE, or CGRID_NE, indicating where the two components of the vector are - !! discretized. Omitting stagger is the same as setting it to CGRID_NE. - integer, optional, intent(in) :: halo !< The size of the halo to update - the full - !! halo by default. - integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be - !! started then stopped to time this routine. - ! Local variables - integer :: stagger_local - integer :: dirflag - - if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif - - stagger_local = CGRID_NE ! Default value for type of grid - if (present(stagger)) stagger_local = stagger - - dirflag = To_All ! 60 - if (present(direction)) then ; if (direction > 0) dirflag = direction ; endif - - if (mpp_group_update_initialized(group)) then - call mpp_reset_group_update_field(group,u_cmpt, v_cmpt) - elseif (present(halo) .and. MOM_dom%thin_halo_updates) then - call mpp_create_group_update(group, u_cmpt, v_cmpt, MOM_dom%mpp_domain, & - flags=dirflag, gridtype=stagger_local, whalo=halo, ehalo=halo, & - shalo=halo, nhalo=halo) - else - call mpp_create_group_update(group, u_cmpt, v_cmpt, MOM_dom%mpp_domain, & - flags=dirflag, gridtype=stagger_local) - endif - - if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif - -end subroutine create_vector_group_pass_2d - -!> create_vector_group_pass_3d sets up a group of three-dimensional vector halo updates. -subroutine create_vector_group_pass_3d(group, u_cmpt, v_cmpt, MOM_dom, direction, stagger, halo, & - clock) - type(group_pass_type), intent(inout) :: group !< The data type that store information for - !! group update. This data will be used in - !! do_group_pass. - real, dimension(:,:,:), intent(inout) :: u_cmpt !< The nominal zonal (u) component of the vector - !! pair which is having its halos points - !! exchanged. - real, dimension(:,:,:), intent(inout) :: v_cmpt !< The nominal meridional (v) component of the - !! vector pair which is having its halos points - !! exchanged. - - type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain - !! needed to determine where data should be - !! sent. - integer, optional, intent(in) :: direction !< An optional integer indicating which - !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, - !! TO_NORTH, and TO_SOUTH, possibly plus SCALAR_PAIR if these are paired non-directional - !! scalars discretized at the typical vector component locations. For example, TO_EAST sends - !! the data to the processor to the east, so the halos on the western side are filled. TO_ALL - !! is the default if omitted. - integer, optional, intent(in) :: stagger !< An optional flag, which may be one of A_GRID, - !! BGRID_NE, or CGRID_NE, indicating where the two components of the vector are - !! discretized. Omitting stagger is the same as setting it to CGRID_NE. - integer, optional, intent(in) :: halo !< The size of the halo to update - the full - !! halo by default. - integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be - !! started then stopped to time this routine. - - ! Local variables - integer :: stagger_local - integer :: dirflag - - if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif - - stagger_local = CGRID_NE ! Default value for type of grid - if (present(stagger)) stagger_local = stagger - - dirflag = To_All ! 60 - if (present(direction)) then ; if (direction > 0) dirflag = direction ; endif - - if (mpp_group_update_initialized(group)) then - call mpp_reset_group_update_field(group,u_cmpt, v_cmpt) - elseif (present(halo) .and. MOM_dom%thin_halo_updates) then - call mpp_create_group_update(group, u_cmpt, v_cmpt, MOM_dom%mpp_domain, & - flags=dirflag, gridtype=stagger_local, whalo=halo, ehalo=halo, & - shalo=halo, nhalo=halo) - else - call mpp_create_group_update(group, u_cmpt, v_cmpt, MOM_dom%mpp_domain, & - flags=dirflag, gridtype=stagger_local) - endif - - if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif - -end subroutine create_vector_group_pass_3d - -!> do_group_pass carries out a group halo update. -subroutine do_group_pass(group, MOM_dom, clock) - type(group_pass_type), intent(inout) :: group !< The data type that store information for - !! group update. This data will be used in - !! do_group_pass. - type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain - !! needed to determine where data should be - !! sent. - integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be - !! started then stopped to time this routine. - real :: d_type - - if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif - - call mpp_do_group_update(group, MOM_dom%mpp_domain, d_type) - - if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif - -end subroutine do_group_pass - -!> start_group_pass starts out a group halo update. -subroutine start_group_pass(group, MOM_dom, clock) - type(group_pass_type), intent(inout) :: group !< The data type that store information for - !! group update. This data will be used in - !! do_group_pass. - type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain - !! needed to determine where data should be - !! sent. - integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be - !! started then stopped to time this routine. - - real :: d_type - - if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif - - call mpp_start_group_update(group, MOM_dom%mpp_domain, d_type) - - if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif - -end subroutine start_group_pass - -!> complete_group_pass completes a group halo update. -subroutine complete_group_pass(group, MOM_dom, clock) - type(group_pass_type), intent(inout) :: group !< The data type that store information for - !! group update. This data will be used in - !! do_group_pass. - type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain - !! needed to determine where data should be - !! sent. - integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be - !! started then stopped to time this routine. - real :: d_type - - if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif - - call mpp_complete_group_update(group, MOM_dom%mpp_domain, d_type) - - if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif - -end subroutine complete_group_pass - !> MOM_domains_init initializes a MOM_domain_type variable, based on the information !! read in from a param_file_type, and optionally returns data describing various' !! properties of the domain type. @@ -1189,12 +62,12 @@ subroutine MOM_domains_init(MOM_dom, param_file, symmetric, static_memory, & type(param_file_type), intent(in) :: param_file !< A structure to parse for !! run-time parameters logical, optional, intent(in) :: symmetric !< If present, this specifies - !! whether this domain is symmetric, regardless of - !! whether the macro SYMMETRIC_MEMORY_ is defined. + !! whether this domain is symmetric, regardless of + !! whether the macro SYMMETRIC_MEMORY_ is defined. logical, optional, intent(in) :: static_memory !< If present and true, this - !! domain type is set up for static memory and error - !! checking of various input values is performed against - !! those in the input file. + !! domain type is set up for static memory and + !! error checking of various input values is + !! performed against those in the input file. integer, optional, intent(in) :: NIHALO !< Default halo sizes, required !! with static memory. integer, optional, intent(in) :: NJHALO !< Default halo sizes, required @@ -1218,28 +91,26 @@ subroutine MOM_domains_init(MOM_dom, param_file, symmetric, static_memory, & !! layout-specific parameters. ! Local variables - integer, dimension(2) :: layout = (/ 1, 1 /) - integer, dimension(2) :: io_layout = (/ 0, 0 /) - integer, dimension(4) :: global_indices - !$ integer :: ocean_nthreads ! Number of Openmp threads - !$ logical :: ocean_omp_hyper_thread - integer, dimension(2) :: n_global ! The number of i- and j- points in the global computational domain. - integer, dimension(2) :: n_halo ! The number of i- and j- points in the halos. - integer :: nihalo_dflt, njhalo_dflt - integer :: pe, proc_used - logical, dimension(2) :: reentrant ! True if the x- and y- directions are periodic. - logical, dimension(2,2) :: tripolar ! A set of flag indicating whether there is tripolar - ! connectivity for any of the four logical edges of the grid. - ! Currently only tripolar_N is implemented. + integer, dimension(2) :: layout ! The number of logical processors in the i- and j- directions + integer, dimension(2) :: io_layout ! The layout of logical processors for input and output + !$ integer :: ocean_nthreads ! Number of openMP threads + !$ logical :: ocean_omp_hyper_thread ! If true use openMP hyper-threads + integer, dimension(2) :: n_global ! The number of i- and j- points in the global computational domain + integer, dimension(2) :: n_halo ! The number of i- and j- points in the halos + integer :: nihalo_dflt, njhalo_dflt ! The default halo sizes + integer :: PEs_used ! The number of processors used + logical, dimension(2) :: reentrant ! True if the x- and y- directions are periodic. + logical :: tripolar_N ! A flag indicating whether there is northern tripolar connectivity logical :: is_static ! If true, static memory is being used for this domain. - logical :: is_symmetric ! True if the domainn being set up will use symmetric memory. + logical :: is_symmetric ! True if the domain being set up will use symmetric memory. logical :: nonblocking ! If true, nonblocking halo updates will be used. logical :: thin_halos ! If true, If true, optional arguments may be used to specify the ! width of the halos that are updated with each call. - logical :: mask_table_exists - character(len=128) :: mask_table, inputdir - character(len=64) :: inc_nm - character(len=200) :: mesg + logical :: mask_table_exists ! True if there is a mask table file + character(len=128) :: inputdir ! The directory in which to find the diag table + character(len=200) :: mask_table ! The file name and later the full path to the diag table + character(len=64) :: inc_nm ! The name of the memory include file + character(len=200) :: mesg ! A string to use for error messages integer :: nip_parsed, njp_parsed character(len=8) :: char_xsiz, char_ysiz, char_niglobal, char_njglobal @@ -1249,8 +120,7 @@ subroutine MOM_domains_init(MOM_dom, param_file, symmetric, static_memory, & # include "version_variable.h" character(len=40) :: mdl ! This module's name. - pe = PE_here() - proc_used = num_PEs() + PEs_used = num_PEs() mdl = "MOM_domains" @@ -1295,13 +165,12 @@ subroutine MOM_domains_init(MOM_dom, param_file, symmetric, static_memory, & call get_param(param_file, mdl, "REENTRANT_Y", reentrant(2), & "If true, the domain is meridionally reentrant.", & default=.false.) - tripolar(1:2,1:2) = .false. - call get_param(param_file, mdl, "TRIPOLAR_N", tripolar(2,2), & + call get_param(param_file, mdl, "TRIPOLAR_N", tripolar_N, & "Use tripolar connectivity at the northern edge of the "//& "domain. With TRIPOLAR_N, NIGLOBAL must be even.", & default=.false.) -#ifndef NOT_SET_AFFINITY +# ifndef NOT_SET_AFFINITY !$ if (.not.MOM_thread_affinity_set()) then !$ call get_param(param_file, mdl, "OCEAN_OMP_THREADS", ocean_nthreads, & !$ "The number of OpenMP threads that MOM6 will use.", & @@ -1310,22 +179,24 @@ subroutine MOM_domains_init(MOM_dom, param_file, symmetric, static_memory, & !$ "If True, use hyper-threading.", default = .false., layoutParam=.true.) !$ call set_MOM_thread_affinity(ocean_nthreads, ocean_omp_hyper_thread) !$ endif -#endif +# endif call log_param(param_file, mdl, "!SYMMETRIC_MEMORY_", is_symmetric, & - "If defined, the velocity point data domain includes "//& - "every face of the thickness points. In other words, "//& - "some arrays are larger than others, depending on where "//& - "they are on the staggered grid. Also, the starting "//& + "If defined, the velocity point data domain includes every face of the "//& + "thickness points. In other words, some arrays are larger than others, "//& + "depending on where they are on the staggered grid. Also, the starting "//& "index of the velocity-point arrays is usually 0, not 1. "//& "This can only be set at compile time.",& layoutParam=.true.) call get_param(param_file, mdl, "NONBLOCKING_UPDATES", nonblocking, & "If true, non-blocking halo updates may be used.", & default=.false., layoutParam=.true.) + !### Note the duplicated "the the" in the following description, which should be fixed as a part + ! of a larger commit that also changes other MOM_parameter_doc file messages, but for now + ! reproduces the existing output files. call get_param(param_file, mdl, "THIN_HALO_UPDATES", thin_halos, & - "If true, optional arguments may be used to specify the "//& - "the width of the halos that are updated with each call.", & + "If true, optional arguments may be used to specify the the width of the "//& + "halos that are updated with each call.", & default=.true., layoutParam=.true.) nihalo_dflt = 4 ; njhalo_dflt = 4 @@ -1333,29 +204,25 @@ subroutine MOM_domains_init(MOM_dom, param_file, symmetric, static_memory, & if (present(NJHALO)) njhalo_dflt = NJHALO call log_param(param_file, mdl, "!STATIC_MEMORY_", is_static, & - "If STATIC_MEMORY_ is defined, the principle variables "//& - "will have sizes that are statically determined at "//& - "compile time. Otherwise the sizes are not determined "//& - "until run time. The STATIC option is substantially "//& - "faster, but does not allow the PE count to be changed "//& - "at run time. This can only be set at compile time.",& - layoutParam=.true.) + "If STATIC_MEMORY_ is defined, the principle variables will have sizes that "//& + "are statically determined at compile time. Otherwise the sizes are not "//& + "determined until run time. The STATIC option is substantially faster, but "//& + "does not allow the PE count to be changed at run time. This can only be "//& + "set at compile time.", layoutParam=.true.) if (is_static) then call get_param(param_file, mdl, "NIGLOBAL", n_global(1), & - "The total number of thickness grid points in the "//& - "x-direction in the physical domain. With STATIC_MEMORY_ "//& - "this is set in "//trim(inc_nm)//" at compile time.", & + "The total number of thickness grid points in the x-direction in the physical "//& + "domain. With STATIC_MEMORY_ this is set in "//trim(inc_nm)//" at compile time.", & static_value=NIGLOBAL) call get_param(param_file, mdl, "NJGLOBAL", n_global(2), & - "The total number of thickness grid points in the "//& - "y-direction in the physical domain. With STATIC_MEMORY_ "//& - "this is set in "//trim(inc_nm)//" at compile time.", & + "The total number of thickness grid points in the y-direction in the physical "//& + "domain. With STATIC_MEMORY_ this is set in "//trim(inc_nm)//" at compile time.", & static_value=NJGLOBAL) if (n_global(1) /= NIGLOBAL) call MOM_error(FATAL,"MOM_domains_init: " // & - "static mismatch for NIGLOBAL_ domain size. Header file does not match input namelist") + "static mismatch for NIGLOBAL_ domain size. Header file does not match input namelist") if (n_global(2) /= NJGLOBAL) call MOM_error(FATAL,"MOM_domains_init: " // & - "static mismatch for NJGLOBAL_ domain size. Header file does not match input namelist") + "static mismatch for NJGLOBAL_ domain size. Header file does not match input namelist") ! Check the requirement of equal sized compute domains when STATIC_MEMORY_ is used. if ((MOD(NIGLOBAL, NIPROC) /= 0) .OR. (MOD(NJGLOBAL, NJPROC) /= 0)) then @@ -1363,22 +230,20 @@ subroutine MOM_domains_init(MOM_dom, param_file, symmetric, static_memory, & write( char_ysiz, '(i4)' ) NJPROC write( char_niglobal, '(i4)' ) NIGLOBAL write( char_njglobal, '(i4)' ) NJGLOBAL - call MOM_error(WARNING, 'MOM_domains: Processor decomposition (NIPROC_,NJPROC_) = (' & - //trim(char_xsiz)//','//trim(char_ysiz)//') does not evenly divide size '//& - 'set by preprocessor macro ('//trim(char_niglobal)//','//trim(char_njglobal)//').') + call MOM_error(WARNING, 'MOM_domains: Processor decomposition (NIPROC_,NJPROC_) = ('//& + trim(char_xsiz)//','//trim(char_ysiz)//') does not evenly divide size '//& + 'set by preprocessor macro ('//trim(char_niglobal)//','//trim(char_njglobal)//').') call MOM_error(FATAL,'MOM_domains: #undef STATIC_MEMORY_ in '//trim(inc_nm)//' to use '//& - 'dynamic allocation, or change processor decomposition to evenly divide the domain.') + 'dynamic allocation, or change processor decomposition to evenly divide the domain.') endif else call get_param(param_file, mdl, "NIGLOBAL", n_global(1), & - "The total number of thickness grid points in the "//& - "x-direction in the physical domain. With STATIC_MEMORY_ "//& - "this is set in "//trim(inc_nm)//" at compile time.", & + "The total number of thickness grid points in the x-direction in the physical "//& + "domain. With STATIC_MEMORY_ this is set in "//trim(inc_nm)//" at compile time.", & fail_if_missing=.true.) call get_param(param_file, mdl, "NJGLOBAL", n_global(2), & - "The total number of thickness grid points in the "//& - "y-direction in the physical domain. With STATIC_MEMORY_ "//& - "this is set in "//trim(inc_nm)//" at compile time.", & + "The total number of thickness grid points in the y-direction in the physical "//& + "domain. With STATIC_MEMORY_ this is set in "//trim(inc_nm)//" at compile time.", & fail_if_missing=.true.) endif @@ -1406,41 +271,32 @@ subroutine MOM_domains_init(MOM_dom, param_file, symmetric, static_memory, & "static mismatch for "//trim(njhalo_nm)//" domain size") endif - global_indices(1) = 1 ; global_indices(2) = n_global(1) - global_indices(3) = 1 ; global_indices(4) = n_global(2) - call get_param(param_file, mdl, "INPUTDIR", inputdir, do_not_log=.true., default=".") inputdir = slasher(inputdir) call get_param(param_file, mdl, trim(masktable_nm), mask_table, & - "A text file to specify n_mask, layout and mask_list. "//& - "This feature masks out processors that contain only land points. "//& - "The first line of mask_table is the number of regions to be masked out. "//& - "The second line is the layout of the model and must be "//& - "consistent with the actual model layout. "//& - "The following (n_mask) lines give the logical positions "//& - "of the processors that are masked out. The mask_table "//& - "can be created by tools like check_mask. The "//& - "following example of mask_table masks out 2 processors, "//& - "(1,2) and (3,6), out of the 24 in a 4x6 layout: \n"//& - " 2\n 4,6\n 1,2\n 3,6\n", default="MOM_mask_table", & + "A text file to specify n_mask, layout and mask_list. This feature masks out "//& + "processors that contain only land points. The first line of mask_table is the "//& + "number of regions to be masked out. The second line is the layout of the "//& + "model and must be consistent with the actual model layout. The following "//& + "(n_mask) lines give the logical positions of the processors that are masked "//& + "out. The mask_table can be created by tools like check_mask. The following "//& + "example of mask_table masks out 2 processors, (1,2) and (3,6), out of the 24 "//& + "in a 4x6 layout: \n 2\n 4,6\n 1,2\n 3,6\n", default="MOM_mask_table", & layoutParam=.true.) mask_table = trim(inputdir)//trim(mask_table) - mask_table_exists = file_exist(mask_table) + mask_table_exists = file_exists(mask_table) if (is_static) then layout(1) = NIPROC ; layout(2) = NJPROC else call get_param(param_file, mdl, trim(layout_nm), layout, & - "The processor layout to be used, or 0, 0 to automatically "//& - "set the layout based on the number of processors.", default=0, & - do_not_log=.true.) + "The processor layout to be used, or 0, 0 to automatically set the layout "//& + "based on the number of processors.", default=0, do_not_log=.true.) call get_param(param_file, mdl, trim(niproc_nm), nip_parsed, & - "The number of processors in the x-direction.", default=-1, & - do_not_log=.true.) + "The number of processors in the x-direction.", default=-1, do_not_log=.true.) call get_param(param_file, mdl, trim(njproc_nm), njp_parsed, & - "The number of processors in the y-direction.", default=-1, & - do_not_log=.true.) + "The number of processors in the y-direction.", default=-1, do_not_log=.true.) if (nip_parsed > -1) then if ((layout(1) > 0) .and. (layout(1) /= nip_parsed)) & call MOM_error(FATAL, trim(layout_nm)//" and "//trim(niproc_nm)//" set inconsistently. "//& @@ -1458,32 +314,29 @@ subroutine MOM_domains_init(MOM_dom, param_file, symmetric, static_memory, & "Shift to using "//trim(layout_nm)//" instead.") endif - if ( layout(1)==0 .and. layout(2)==0 ) & - call MOM_define_layout(global_indices, proc_used, layout) - if ( layout(1)/=0 .and. layout(2)==0 ) layout(2) = proc_used/layout(1) - if ( layout(1)==0 .and. layout(2)/=0 ) layout(1) = proc_used/layout(2) + if ( (layout(1) == 0) .and. (layout(2) == 0) ) & + call MOM_define_layout( (/ 1, n_global(1), 1, n_global(2) /), PEs_used, layout) + if ( (layout(1) /= 0) .and. (layout(2) == 0) ) layout(2) = PEs_used / layout(1) + if ( (layout(1) == 0) .and. (layout(2) /= 0) ) layout(1) = PEs_used / layout(2) - if (layout(1)*layout(2) /= proc_used .and. (.not. mask_table_exists) ) then + if (layout(1)*layout(2) /= PEs_used .and. (.not. mask_table_exists) ) then write(mesg,'("MOM_domains_init: The product of the two components of layout, ", & & 2i4,", is not the number of PEs used, ",i5,".")') & - layout(1),layout(2),proc_used + layout(1), layout(2), PEs_used call MOM_error(FATAL, mesg) endif endif call log_param(param_file, mdl, trim(niproc_nm), layout(1), & - "The number of processors in the x-direction. With "//& - "STATIC_MEMORY_ this is set in "//trim(inc_nm)//" at compile time.",& - layoutParam=.true.) + "The number of processors in the x-direction. With STATIC_MEMORY_ this "//& + "is set in "//trim(inc_nm)//" at compile time.", layoutParam=.true.) call log_param(param_file, mdl, trim(njproc_nm), layout(2), & - "The number of processors in the y-direction. With "//& - "STATIC_MEMORY_ this is set in "//trim(inc_nm)//" at compile time.",& - layoutParam=.true.) + "The number of processors in the y-direction. With STATIC_MEMORY_ this "//& + "is set in "//trim(inc_nm)//" at compile time.", layoutParam=.true.) call log_param(param_file, mdl, trim(layout_nm), layout, & - "The processor layout that was actually used.",& - layoutParam=.true.) + "The processor layout that was actually used.", layoutParam=.true.) ! Idiot check that fewer PEs than columns have been requested - if (layout(1)*layout(2)>n_global(1)*n_global(2)) then + if (layout(1)*layout(2) > n_global(1)*n_global(2)) then write(mesg,'(a,2(i5,x,a))') 'You requested to use',layout(1)*layout(2), & 'PEs but there are only', n_global(1)*n_global(2), 'columns in the model' call MOM_error(FATAL, mesg) @@ -1496,635 +349,13 @@ subroutine MOM_domains_init(MOM_dom, param_file, symmetric, static_memory, & ! PEs in each direction. io_layout(:) = (/ 1, 1 /) call get_param(param_file, mdl, trim(io_layout_nm), io_layout, & - "The processor layout to be used, or 0,0 to automatically "//& - "set the io_layout to be the same as the layout.", default=1, & - layoutParam=.true.) + "The processor layout to be used, or 0,0 to automatically set the io_layout "//& + "to be the same as the layout.", default=1, layoutParam=.true.) - call create_MOM_domain(MOM_dom, n_global, n_halo, reentrant, tripolar, layout, io_layout=io_layout, & - domain_name=domain_name, mask_table=mask_table, symmetric=symmetric, & - thin_halos=thin_halos, nonblocking=nonblocking) + call create_MOM_domain(MOM_dom, n_global, n_halo, reentrant, tripolar_N, layout, & + io_layout=io_layout, domain_name=domain_name, mask_table=mask_table, & + symmetric=symmetric, thin_halos=thin_halos, nonblocking=nonblocking) end subroutine MOM_domains_init -!> create_MOM_domain creates and initializes a MOM_domain_type variables, based on the information -!! provided in arguments. -subroutine create_MOM_domain(MOM_dom, n_global, n_halo, reentrant, tripolar, layout, io_layout, & - domain_name, mask_table, symmetric, thin_halos, nonblocking) - type(MOM_domain_type), pointer :: MOM_dom !< A pointer to the MOM_domain_type being defined here. - integer, dimension(2), intent(in) :: n_global !< The number of points on the global grid in - !! the i- and j-directions - integer, dimension(2), intent(in) :: n_halo !< The number of halo points on each processor - logical, dimension(2), intent(in) :: reentrant !< If true the grid is periodic in the i- and j- directions - logical, dimension(2,2), intent(in) :: tripolar !< If true the grid uses tripolar connectivity on the two - !! ends (first index) of the i- and j-grids (second index) - integer, dimension(2), intent(in) :: layout !< The layout of logical PEs in the i- and j-directions. - integer, dimension(2), optional, intent(in) :: io_layout !< The layout for parallel input and output. - character(len=*), optional, intent(in) :: domain_name !< A name for this domain, "MOM" if missing. - character(len=*), optional, intent(in) :: mask_table !< The full relative or absolute path to the mask table. - logical, optional, intent(in) :: symmetric !< If present, this specifies whether this domain - !! uses symmetric memory, or true if missing. - logical, optional, intent(in) :: thin_halos !< If present, this specifies whether to permit the use of - !! thin halo updates, or true if missing. - logical, optional, intent(in) :: nonblocking !< If present, this specifies whether to permit the use of - !! nonblocking halo updates, or false if missing. - - ! local variables - integer, dimension(4) :: global_indices ! The lower and upper global i- and j-index bounds - integer :: X_FLAGS ! A combination of integers encoding the x-direction grid connectivity. - integer :: Y_FLAGS ! A combination of integers encoding the y-direction grid connectivity. - integer :: xhalo_d2, yhalo_d2 - character(len=200) :: mesg ! A string for use in error messages - character(len=64) :: dom_name ! The domain name - logical :: mask_table_exists ! Mask_table is present and the file it points to exists - - if (.not.associated(MOM_dom)) then - allocate(MOM_dom) - allocate(MOM_dom%mpp_domain) - allocate(MOM_dom%mpp_domain_d2) - endif - - dom_name = "MOM" ; if (present(domain_name)) dom_name = trim(domain_name) - - X_FLAGS = 0 ; Y_FLAGS = 0 - if (reentrant(1)) X_FLAGS = CYCLIC_GLOBAL_DOMAIN - if (reentrant(2)) Y_FLAGS = CYCLIC_GLOBAL_DOMAIN - if (tripolar(2,2)) then - Y_FLAGS = FOLD_NORTH_EDGE - if (reentrant(2)) call MOM_error(FATAL,"MOM_domains: "// & - "TRIPOLAR_N and REENTRANT_Y may not be used together.") - endif - - MOM_dom%nonblocking_updates = nonblocking - MOM_dom%thin_halo_updates = thin_halos - MOM_dom%symmetric = .true. ; if (present(symmetric)) MOM_dom%symmetric = symmetric - MOM_dom%niglobal = n_global(1) ; MOM_dom%njglobal = n_global(2) - MOM_dom%nihalo = n_halo(1) ; MOM_dom%njhalo = n_halo(2) - - ! Save the extra data for creating other domains of different resolution that overlay this domain. - MOM_dom%X_FLAGS = X_FLAGS - MOM_dom%Y_FLAGS = Y_FLAGS - MOM_dom%layout(:) = layout(:) - - ! Set up the io_layout, with error handling. - MOM_dom%io_layout(:) = (/ 1, 1 /) - if (present(io_layout)) then - if (io_layout(1) == 0) then - MOM_dom%io_layout(1) = layout(1) - elseif (io_layout(1) > 1) then - MOM_dom%io_layout(1) = io_layout(1) - if (modulo(layout(1), io_layout(1)) /= 0) then - write(mesg,'("MOM_domains_init: The i-direction I/O-layout, IO_LAYOUT(1)=",i4, & - &", does not evenly divide the i-direction layout, NIPROC=,",i4,".")') io_layout(1), layout(1) - call MOM_error(FATAL, mesg) - endif - endif - - if (io_layout(2) == 0) then - MOM_dom%io_layout(2) = layout(2) - elseif (io_layout(2) > 1) then - MOM_dom%io_layout(2) = io_layout(2) - if (modulo(layout(2), io_layout(2)) /= 0) then - write(mesg,'("MOM_domains_init: The j-direction I/O-layout, IO_LAYOUT(2)=",i4, & - &", does not evenly divide the j-direction layout, NJPROC=,",i4,".")') io_layout(2), layout(2) - call MOM_error(FATAL, mesg) - endif - endif - endif - - global_indices(1:4) = (/ 1, MOM_dom%niglobal, 1, MOM_dom%njglobal /) - - if (present(mask_table)) then - mask_table_exists = file_exist(mask_table) - if (mask_table_exists) then - allocate(MOM_dom%maskmap(layout(1), layout(2))) - call parse_mask_table(mask_table, MOM_dom%maskmap, dom_name) - endif - else - mask_table_exists = .false. - endif - - if (mask_table_exists) then - call MOM_define_domain( global_indices, layout, MOM_dom%mpp_domain, & - xflags=X_FLAGS, yflags=Y_FLAGS, & - xhalo=MOM_dom%nihalo, yhalo=MOM_dom%njhalo, & - symmetry = MOM_dom%symmetric, name=dom_name, & - maskmap=MOM_dom%maskmap ) - else - call MOM_define_domain( global_indices, layout, MOM_dom%mpp_domain, & - xflags=X_FLAGS, yflags=Y_FLAGS, & - xhalo=MOM_dom%nihalo, yhalo=MOM_dom%njhalo, & - symmetry = MOM_dom%symmetric, name=dom_name) - endif - - if ((MOM_dom%io_layout(1) > 0) .and. (MOM_dom%io_layout(2) > 0) .and. (layout(1)*layout(2) > 1)) then - call MOM_define_io_domain(MOM_dom%mpp_domain, MOM_dom%io_layout) - endif - - !For downsampled domain, recommend a halo of 1 (or 0?) since we're not doing wide-stencil computations. - !But that does not work because the downsampled field would not have the correct size to pass the checks, e.g., we get - !error: downsample_diag_indices_get: peculiar size 28 in i-direction\ndoes not match one of 24 25 26 27 - xhalo_d2 = int(MOM_dom%nihalo/2) - yhalo_d2 = int(MOM_dom%njhalo/2) - global_indices(1:4) = (/ 1, int(MOM_dom%niglobal/2), 1, int(MOM_dom%njglobal/2) /) - if (mask_table_exists) then - call MOM_define_domain( global_indices, layout, MOM_dom%mpp_domain_d2, & - xflags=X_FLAGS, yflags=Y_FLAGS, & - xhalo=xhalo_d2, yhalo=yhalo_d2, & - symmetry = MOM_dom%symmetric, name=trim("MOMc"), & - maskmap=MOM_dom%maskmap ) - else - call MOM_define_domain( global_indices, layout, MOM_dom%mpp_domain_d2, & - xflags=X_FLAGS, yflags=Y_FLAGS, & - xhalo=xhalo_d2, yhalo=yhalo_d2, & - symmetry = MOM_dom%symmetric, name=trim("MOMc")) - endif - - if ((MOM_dom%io_layout(1) > 0) .and. (MOM_dom%io_layout(2) > 0) .and. & - (layout(1)*layout(2) > 1)) then - call MOM_define_io_domain(MOM_dom%mpp_domain_d2, MOM_dom%io_layout) - endif - -end subroutine create_MOM_domain - -!> dealloc_MOM_domain deallocates memory associated with a pointer to a MOM_domain_type -!! and all of its contents -subroutine deallocate_MOM_domain(MOM_domain, cursory) - type(MOM_domain_type), pointer :: MOM_domain !< A pointer to the MOM_domain_type being deallocated - logical, optional, intent(in) :: cursory !< If true do not deallocate fields associated - !! with the underlying infrastructure - - if (associated(MOM_domain)) then - call deallocate_domain_contents(MOM_domain, cursory) - deallocate(MOM_domain) - endif - -end subroutine deallocate_MOM_domain - -!> deallocate_domain_contents deallocates memory associated with pointers -!! inside of a MOM_domain_type. -subroutine deallocate_domain_contents(MOM_domain, cursory) - type(MOM_domain_type), intent(inout) :: MOM_domain !< A MOM_domain_type whose contents will be deallocated - logical, optional, intent(in) :: cursory !< If true do not deallocate fields associated - !! with the underlying infrastructure - - logical :: invasive ! If true, deallocate fields associated with the underlying infrastructure - - invasive = .true. ; if (present(cursory)) invasive = .not.cursory - - if (associated(MOM_domain%mpp_domain)) then - if (invasive) call mpp_deallocate_domain(MOM_domain%mpp_domain) - deallocate(MOM_domain%mpp_domain) - endif - if (associated(MOM_domain%mpp_domain_d2)) then - if (invasive) call mpp_deallocate_domain(MOM_domain%mpp_domain_d2) - deallocate(MOM_domain%mpp_domain_d2) - endif - if (associated(MOM_domain%maskmap)) deallocate(MOM_domain%maskmap) - -end subroutine deallocate_domain_contents - -!> MOM_thread_affinity_set returns true if the number of openMP threads have been set to a value greater than 1. -function MOM_thread_affinity_set() - ! Local variables - !$ integer :: ocean_nthreads ! Number of openMP threads - !$ integer :: omp_get_num_threads ! An openMP function that returns the number of threads - logical :: MOM_thread_affinity_set - - MOM_thread_affinity_set = .false. - !$ call fms_affinity_init() - !$OMP PARALLEL - !$OMP MASTER - !$ ocean_nthreads = omp_get_num_threads() - !$OMP END MASTER - !$OMP END PARALLEL - !$ MOM_thread_affinity_set = (ocean_nthreads > 1 ) -end function MOM_thread_affinity_set - -!> set_MOM_thread_affinity sest the number of openMP threads to use with the ocean. -subroutine set_MOM_thread_affinity(ocean_nthreads, ocean_hyper_thread) - integer, intent(in) :: ocean_nthreads !< Number of openMP threads to use for the ocean model - logical, intent(in) :: ocean_hyper_thread !< If true, use hyper threading - - ! Local variables - !$ integer :: omp_get_thread_num, omp_get_num_threads !< These are the results of openMP functions - - !$ call fms_affinity_set('OCEAN', ocean_hyper_thread, ocean_nthreads) - !$ call omp_set_num_threads(ocean_nthreads) - !$ write(6,*) "MOM_domains_mod OMPthreading ", fms_affinity_get(), omp_get_thread_num(), omp_get_num_threads() - !$ flush(6) -end subroutine set_MOM_thread_affinity - -!> This subroutine retrieves the 1-d domains that make up the 2d-domain in a MOM_domain -subroutine get_domain_components_MD(MOM_dom, x_domain, y_domain) - type(MOM_domain_type), intent(in) :: MOM_dom !< The MOM_domain whose contents are being extracted - type(domain1D), optional, intent(inout) :: x_domain !< The 1-d logical x-domain - type(domain1D), optional, intent(inout) :: y_domain !< The 1-d logical y-domain - - call mpp_get_domain_components(MOM_dom%mpp_domain, x_domain, y_domain) -end subroutine get_domain_components_MD - -!> This subroutine retrieves the 1-d domains that make up a 2d-domain -subroutine get_domain_components_d2D(domain, x_domain, y_domain) - type(domain2D), intent(in) :: domain !< The 2D domain whose contents are being extracted - type(domain1D), optional, intent(inout) :: x_domain !< The 1-d logical x-domain - type(domain1D), optional, intent(inout) :: y_domain !< The 1-d logical y-domain - - call mpp_get_domain_components(domain, x_domain, y_domain) -end subroutine get_domain_components_d2D - -!> clone_MD_to_MD copies one MOM_domain_type into another, while allowing -!! some properties of the new type to differ from the original one. -subroutine clone_MD_to_MD(MD_in, MOM_dom, min_halo, halo_size, symmetric, & - domain_name, turns) - type(MOM_domain_type), intent(in) :: MD_in !< An existing MOM_domain - type(MOM_domain_type), pointer :: MOM_dom !< A pointer to a MOM_domain that will be - !! allocated if it is unassociated, and will have data - !! copied from MD_in - integer, dimension(2), & - optional, intent(inout) :: min_halo !< If present, this sets the - !! minimum halo size for this domain in the i- and j- - !! directions, and returns the actual halo size used. - integer, optional, intent(in) :: halo_size !< If present, this sets the halo - !! size for the domain in the i- and j-directions. - !! min_halo and halo_size can not both be present. - logical, optional, intent(in) :: symmetric !< If present, this specifies - !! whether the new domain is symmetric, regardless of - !! whether the macro SYMMETRIC_MEMORY_ is defined. - character(len=*), & - optional, intent(in) :: domain_name !< A name for the new domain, "MOM" - !! if missing. - integer, optional, intent(in) :: turns !< Number of quarter turns - - integer :: global_indices(4) - logical :: mask_table_exists - character(len=64) :: dom_name - integer :: qturns - - qturns = 0 - if (present(turns)) qturns = turns - - if (.not.associated(MOM_dom)) then - allocate(MOM_dom) - allocate(MOM_dom%mpp_domain) - allocate(MOM_dom%mpp_domain_d2) - endif - -! Save the extra data for creating other domains of different resolution that overlay this domain - MOM_dom%symmetric = MD_in%symmetric - MOM_dom%nonblocking_updates = MD_in%nonblocking_updates - MOM_dom%thin_halo_updates = MD_in%thin_halo_updates - - if (modulo(qturns, 2) /= 0) then - MOM_dom%niglobal = MD_in%njglobal ; MOM_dom%njglobal = MD_in%niglobal - MOM_dom%nihalo = MD_in%njhalo ; MOM_dom%njhalo = MD_in%nihalo - - MOM_dom%X_FLAGS = MD_in%Y_FLAGS ; MOM_dom%Y_FLAGS = MD_in%X_FLAGS - MOM_dom%layout(:) = MD_in%layout(2:1:-1) - MOM_dom%io_layout(:) = MD_in%io_layout(2:1:-1) - else - MOM_dom%niglobal = MD_in%niglobal ; MOM_dom%njglobal = MD_in%njglobal - MOM_dom%nihalo = MD_in%nihalo ; MOM_dom%njhalo = MD_in%njhalo - - MOM_dom%X_FLAGS = MD_in%X_FLAGS ; MOM_dom%Y_FLAGS = MD_in%Y_FLAGS - MOM_dom%layout(:) = MD_in%layout(:) - MOM_dom%io_layout(:) = MD_in%io_layout(:) - endif - - global_indices(1) = 1 ; global_indices(2) = MOM_dom%niglobal - global_indices(3) = 1 ; global_indices(4) = MOM_dom%njglobal - - if (associated(MD_in%maskmap)) then - mask_table_exists = .true. - allocate(MOM_dom%maskmap(MOM_dom%layout(1), MOM_dom%layout(2))) - if (qturns /= 0) then - call rotate_array(MD_in%maskmap(:,:), qturns, MOM_dom%maskmap(:,:)) - else - MOM_dom%maskmap(:,:) = MD_in%maskmap(:,:) - endif - else - mask_table_exists = .false. - endif - - if (present(halo_size) .and. present(min_halo)) call MOM_error(FATAL, & - "clone_MOM_domain can not have both halo_size and min_halo present.") - - if (present(min_halo)) then - MOM_dom%nihalo = max(MOM_dom%nihalo, min_halo(1)) - min_halo(1) = MOM_dom%nihalo - MOM_dom%njhalo = max(MOM_dom%njhalo, min_halo(2)) - min_halo(2) = MOM_dom%njhalo - endif - - if (present(halo_size)) then - MOM_dom%nihalo = halo_size ; MOM_dom%njhalo = halo_size - endif - - if (present(symmetric)) then ; MOM_dom%symmetric = symmetric ; endif - - dom_name = "MOM" - if (present(domain_name)) dom_name = trim(domain_name) - - if (mask_table_exists) then - call MOM_define_domain(global_indices, MOM_dom%layout, MOM_dom%mpp_domain, & - xflags=MOM_dom%X_FLAGS, yflags=MOM_dom%Y_FLAGS, & - xhalo=MOM_dom%nihalo, yhalo=MOM_dom%njhalo, & - symmetry=MOM_dom%symmetric, name=dom_name, & - maskmap=MOM_dom%maskmap) - - global_indices(2) = global_indices(2) / 2 - global_indices(4) = global_indices(4) / 2 - call MOM_define_domain(global_indices, MOM_dom%layout, & - MOM_dom%mpp_domain_d2, & - xflags=MOM_dom%X_FLAGS, yflags=MOM_dom%Y_FLAGS, & - xhalo=(MOM_dom%nihalo/2), yhalo=(MOM_dom%njhalo/2), & - symmetry=MOM_dom%symmetric, name=dom_name, & - maskmap=MOM_dom%maskmap) - else - call MOM_define_domain(global_indices, MOM_dom%layout, MOM_dom%mpp_domain, & - xflags=MOM_dom%X_FLAGS, yflags=MOM_dom%Y_FLAGS, & - xhalo=MOM_dom%nihalo, yhalo=MOM_dom%njhalo, & - symmetry=MOM_dom%symmetric, name=dom_name) - - global_indices(2) = global_indices(2) / 2 - global_indices(4) = global_indices(4) / 2 - call MOM_define_domain(global_indices, MOM_dom%layout, & - MOM_dom%mpp_domain_d2, & - xflags=MOM_dom%X_FLAGS, yflags=MOM_dom%Y_FLAGS, & - xhalo=(MOM_dom%nihalo/2), yhalo=(MOM_dom%njhalo/2), & - symmetry=MOM_dom%symmetric, name=dom_name) - endif - - if ((MOM_dom%io_layout(1) + MOM_dom%io_layout(2) > 0) .and. & - (MOM_dom%layout(1)*MOM_dom%layout(2) > 1)) then - call MOM_define_io_domain(MOM_dom%mpp_domain, MOM_dom%io_layout) - endif - -end subroutine clone_MD_to_MD - -!> clone_MD_to_d2D uses information from a MOM_domain_type to create a new -!! domain2d type, while allowing some properties of the new type to differ from -!! the original one. -subroutine clone_MD_to_d2D(MD_in, mpp_domain, min_halo, halo_size, symmetric, & - domain_name, turns) - type(MOM_domain_type), intent(in) :: MD_in !< An existing MOM_domain to be cloned - type(domain2d), intent(inout) :: mpp_domain !< The new mpp_domain to be set up - integer, dimension(2), & - optional, intent(inout) :: min_halo !< If present, this sets the - !! minimum halo size for this domain in the i- and j- - !! directions, and returns the actual halo size used. - integer, optional, intent(in) :: halo_size !< If present, this sets the halo - !! size for the domain in the i- and j-directions. - !! min_halo and halo_size can not both be present. - logical, optional, intent(in) :: symmetric !< If present, this specifies - !! whether the new domain is symmetric, regardless of - !! whether the macro SYMMETRIC_MEMORY_ is defined. - character(len=*), & - optional, intent(in) :: domain_name !< A name for the new domain, "MOM" - !! if missing. - integer, optional, intent(in) :: turns !< If true, swap X and Y axes - - integer :: global_indices(4), layout(2), io_layout(2) - integer :: X_FLAGS, Y_FLAGS, niglobal, njglobal, nihalo, njhalo - logical :: symmetric_dom - character(len=64) :: dom_name - - if (present(turns)) & - call MOM_error(FATAL, "Rotation not supported for MOM_domain to domain2d") - -! Save the extra data for creating other domains of different resolution that overlay this domain - niglobal = MD_in%niglobal ; njglobal = MD_in%njglobal - nihalo = MD_in%nihalo ; njhalo = MD_in%njhalo - - symmetric_dom = MD_in%symmetric - - X_FLAGS = MD_in%X_FLAGS ; Y_FLAGS = MD_in%Y_FLAGS - layout(:) = MD_in%layout(:) ; io_layout(:) = MD_in%io_layout(:) - - if (present(halo_size) .and. present(min_halo)) call MOM_error(FATAL, & - "clone_MOM_domain can not have both halo_size and min_halo present.") - - if (present(min_halo)) then - nihalo = max(nihalo, min_halo(1)) - njhalo = max(njhalo, min_halo(2)) - min_halo(1) = nihalo ; min_halo(2) = njhalo - endif - - if (present(halo_size)) then - nihalo = halo_size ; njhalo = halo_size - endif - - if (present(symmetric)) then ; symmetric_dom = symmetric ; endif - - dom_name = "MOM" - if (present(domain_name)) dom_name = trim(domain_name) - - global_indices(1) = 1 ; global_indices(2) = niglobal - global_indices(3) = 1 ; global_indices(4) = njglobal - if (associated(MD_in%maskmap)) then - call MOM_define_domain( global_indices, layout, mpp_domain, & - xflags=X_FLAGS, yflags=Y_FLAGS, & - xhalo=nihalo, yhalo=njhalo, & - symmetry = symmetric, name=dom_name, & - maskmap=MD_in%maskmap ) - else - call MOM_define_domain( global_indices, layout, mpp_domain, & - xflags=X_FLAGS, yflags=Y_FLAGS, & - xhalo=nihalo, yhalo=njhalo, & - symmetry = symmetric, name=dom_name) - endif - - if ((io_layout(1) + io_layout(2) > 0) .and. & - (layout(1)*layout(2) > 1)) then - call MOM_define_io_domain(mpp_domain, io_layout) - endif - -end subroutine clone_MD_to_d2D - -!> Returns various data that has been stored in a MOM_domain_type -subroutine get_domain_extent(Domain, isc, iec, jsc, jec, isd, ied, jsd, jed, & - isg, ieg, jsg, jeg, idg_offset, jdg_offset, & - symmetric, local_indexing, index_offset) - type(MOM_domain_type), & - intent(in) :: Domain !< The MOM domain from which to extract information - integer, intent(out) :: isc !< The start i-index of the computational domain - integer, intent(out) :: iec !< The end i-index of the computational domain - integer, intent(out) :: jsc !< The start j-index of the computational domain - integer, intent(out) :: jec !< The end j-index of the computational domain - integer, intent(out) :: isd !< The start i-index of the data domain - integer, intent(out) :: ied !< The end i-index of the data domain - integer, intent(out) :: jsd !< The start j-index of the data domain - integer, intent(out) :: jed !< The end j-index of the data domain - integer, intent(out) :: isg !< The start i-index of the global domain - integer, intent(out) :: ieg !< The end i-index of the global domain - integer, intent(out) :: jsg !< The start j-index of the global domain - integer, intent(out) :: jeg !< The end j-index of the global domain - integer, intent(out) :: idg_offset !< The offset between the corresponding global and - !! data i-index spaces. - integer, intent(out) :: jdg_offset !< The offset between the corresponding global and - !! data j-index spaces. - logical, intent(out) :: symmetric !< True if symmetric memory is used. - logical, optional, intent(in) :: local_indexing !< If true, local tracer array indices start at 1, - !! as in most MOM6 code. - integer, optional, intent(in) :: index_offset !< A fixed additional offset to all indices. This - !! can be useful for some types of debugging with - !! dynamic memory allocation. - ! Local variables - integer :: ind_off - logical :: local - - local = .true. ; if (present(local_indexing)) local = local_indexing - ind_off = 0 ; if (present(index_offset)) ind_off = index_offset - - call mpp_get_compute_domain(Domain%mpp_domain, isc, iec, jsc, jec) - call mpp_get_data_domain(Domain%mpp_domain, isd, ied, jsd, jed) - call mpp_get_global_domain(Domain%mpp_domain, isg, ieg, jsg, jeg) - - ! This code institutes the MOM convention that local array indices start at 1. - if (local) then - idg_offset = isd-1 ; jdg_offset = jsd-1 - isc = isc-isd+1 ; iec = iec-isd+1 ; jsc = jsc-jsd+1 ; jec = jec-jsd+1 - ied = ied-isd+1 ; jed = jed-jsd+1 - isd = 1 ; jsd = 1 - else - idg_offset = 0 ; jdg_offset = 0 - endif - if (ind_off /= 0) then - idg_offset = idg_offset + ind_off ; jdg_offset = jdg_offset + ind_off - isc = isc + ind_off ; iec = iec + ind_off - jsc = jsc + ind_off ; jec = jec + ind_off - isd = isd + ind_off ; ied = ied + ind_off - jsd = jsd + ind_off ; jed = jed + ind_off - endif - symmetric = Domain%symmetric - -end subroutine get_domain_extent - -subroutine get_domain_extent_dsamp2(Domain, isc_d2, iec_d2, jsc_d2, jec_d2,& - isd_d2, ied_d2, jsd_d2, jed_d2,& - isg_d2, ieg_d2, jsg_d2, jeg_d2) - type(MOM_domain_type), & - intent(in) :: Domain !< The MOM domain from which to extract information - integer, intent(out) :: isc_d2 !< The start i-index of the computational domain - integer, intent(out) :: iec_d2 !< The end i-index of the computational domain - integer, intent(out) :: jsc_d2 !< The start j-index of the computational domain - integer, intent(out) :: jec_d2 !< The end j-index of the computational domain - integer, intent(out) :: isd_d2 !< The start i-index of the data domain - integer, intent(out) :: ied_d2 !< The end i-index of the data domain - integer, intent(out) :: jsd_d2 !< The start j-index of the data domain - integer, intent(out) :: jed_d2 !< The end j-index of the data domain - integer, intent(out) :: isg_d2 !< The start i-index of the global domain - integer, intent(out) :: ieg_d2 !< The end i-index of the global domain - integer, intent(out) :: jsg_d2 !< The start j-index of the global domain - integer, intent(out) :: jeg_d2 !< The end j-index of the global domain - - call mpp_get_compute_domain(Domain%mpp_domain_d2, isc_d2, iec_d2, jsc_d2, jec_d2) - call mpp_get_data_domain(Domain%mpp_domain_d2, isd_d2, ied_d2, jsd_d2, jed_d2) - call mpp_get_global_domain (Domain%mpp_domain_d2, isg_d2, ieg_d2, jsg_d2, jeg_d2) - ! This code institutes the MOM convention that local array indices start at 1. - isc_d2 = isc_d2-isd_d2+1 ; iec_d2 = iec_d2-isd_d2+1 - jsc_d2 = jsc_d2-jsd_d2+1 ; jec_d2 = jec_d2-jsd_d2+1 - ied_d2 = ied_d2-isd_d2+1 ; jed_d2 = jed_d2-jsd_d2+1 - isd_d2 = 1 ; jsd_d2 = 1 -end subroutine get_domain_extent_dsamp2 - -!> Return the (potentially symmetric) computational domain i-bounds for an array -!! passed without index specifications (i.e. indices start at 1) based on an array size. -subroutine get_simple_array_i_ind(domain, size, is, ie, symmetric) - type(MOM_domain_type), intent(in) :: domain !< MOM domain from which to extract information - integer, intent(in) :: size !< The i-array size - integer, intent(out) :: is !< The computational domain starting i-index. - integer, intent(out) :: ie !< The computational domain ending i-index. - logical, optional, intent(in) :: symmetric !< If present, indicates whether symmetric sizes - !! can be considered. - ! Local variables - logical :: sym - character(len=120) :: mesg, mesg2 - integer :: isc, iec, jsc, jec, isd, ied, jsd, jed - - call mpp_get_compute_domain(Domain%mpp_domain, isc, iec, jsc, jec) - call mpp_get_data_domain(Domain%mpp_domain, isd, ied, jsd, jed) - - isc = isc-isd+1 ; iec = iec-isd+1 ; ied = ied-isd+1 ; isd = 1 - sym = Domain%symmetric ; if (present(symmetric)) sym = symmetric - - if (size == ied) then ; is = isc ; ie = iec - elseif (size == 1+iec-isc) then ; is = 1 ; ie = size - elseif (sym .and. (size == 1+ied)) then ; is = isc ; ie = iec+1 - elseif (sym .and. (size == 2+iec-isc)) then ; is = 1 ; ie = size+1 - else - write(mesg,'("Unrecognized size ", i6, "in call to get_simple_array_i_ind. \")') size - if (sym) then - write(mesg2,'("Valid sizes are : ", 2i7)') ied, 1+iec-isc - else - write(mesg2,'("Valid sizes are : ", 4i7)') ied, 1+iec-isc, 1+ied, 2+iec-isc - endif - call MOM_error(FATAL, trim(mesg)//trim(mesg2)) - endif - -end subroutine get_simple_array_i_ind - - -!> Return the (potentially symmetric) computational domain j-bounds for an array -!! passed without index specifications (i.e. indices start at 1) based on an array size. -subroutine get_simple_array_j_ind(domain, size, js, je, symmetric) - type(MOM_domain_type), intent(in) :: domain !< MOM domain from which to extract information - integer, intent(in) :: size !< The j-array size - integer, intent(out) :: js !< The computational domain starting j-index. - integer, intent(out) :: je !< The computational domain ending j-index. - logical, optional, intent(in) :: symmetric !< If present, indicates whether symmetric sizes - !! can be considered. - ! Local variables - logical :: sym - character(len=120) :: mesg, mesg2 - integer :: isc, iec, jsc, jec, isd, ied, jsd, jed - - call mpp_get_compute_domain(Domain%mpp_domain, isc, iec, jsc, jec) - call mpp_get_data_domain(Domain%mpp_domain, isd, ied, jsd, jed) - - jsc = jsc-jsd+1 ; jec = jec-jsd+1 ; jed = jed-jsd+1 ; jsd = 1 - sym = Domain%symmetric ; if (present(symmetric)) sym = symmetric - - if (size == jed) then ; js = jsc ; je = jec - elseif (size == 1+jec-jsc) then ; js = 1 ; je = size - elseif (sym .and. (size == 1+jed)) then ; js = jsc ; je = jec+1 - elseif (sym .and. (size == 2+jec-jsc)) then ; js = 1 ; je = size+1 - else - write(mesg,'("Unrecognized size ", i6, "in call to get_simple_array_j_ind. \")') size - if (sym) then - write(mesg2,'("Valid sizes are : ", 2i7)') jed, 1+jec-jsc - else - write(mesg2,'("Valid sizes are : ", 4i7)') jed, 1+jec-jsc, 1+jed, 2+jec-jsc - endif - call MOM_error(FATAL, trim(mesg)//trim(mesg2)) - endif - -end subroutine get_simple_array_j_ind - -!> Returns the global shape of h-point arrays -subroutine get_global_shape(domain, niglobal, njglobal) - type(MOM_domain_type), intent(in) :: domain !< MOM domain from which to extract information - integer, intent(out) :: niglobal !< i-index global size of h-point arrays - integer, intent(out) :: njglobal !< j-index global size of h-point arrays - - niglobal = domain%niglobal - njglobal = domain%njglobal -end subroutine get_global_shape - -!> Returns arrays of the i- and j- sizes of the h-point computational domains for each -!! element of the grid layout. Any input values in the extent arrays are discarded, so -!! they are effectively intent out despite their declared intent of inout. -subroutine get_layout_extents(Domain, extent_i, extent_j) - type(MOM_domain_type), intent(in) :: domain !< MOM domain from which to extract information - integer, dimension(:), allocatable, intent(inout) :: extent_i !< The number of points in the - !! i-direction in each i-row of the layout - integer, dimension(:), allocatable, intent(inout) :: extent_j !< The number of points in the - !! j-direction in each j-row of the layout - - if (allocated(extent_i)) deallocate(extent_i) - if (allocated(extent_j)) deallocate(extent_j) - allocate(extent_i(domain%layout(1))) ; extent_i(:) = 0 - allocate(extent_j(domain%layout(2))) ; extent_j(:) = 0 - call mpp_get_domain_extents(domain%mpp_domain, extent_i, extent_j) -end subroutine get_layout_extents - end module MOM_domains diff --git a/src/framework/MOM_ensemble_manager.F90 b/src/framework/MOM_ensemble_manager.F90 new file mode 100644 index 0000000000..191dd79c9a --- /dev/null +++ b/src/framework/MOM_ensemble_manager.F90 @@ -0,0 +1,14 @@ +!> A simple (very thin) wrapper for managing ensemble member layout information +module MOM_ensemble_manager + +! This file is part of MOM6. See LICENSE.md for the license. + +use ensemble_manager_mod, only : get_ensemble_id, get_ensemble_size +use ensemble_manager_mod, only : get_ensemble_pelist, get_ensemble_filter_pelist + +implicit none ; private + +public get_ensemble_id, get_ensemble_size, get_ensemble_pelist, get_ensemble_filter_pelist + + +end module MOM_ensemble_manager diff --git a/src/framework/MOM_error_handler.F90 b/src/framework/MOM_error_handler.F90 index 30300d6e33..336a4942be 100644 --- a/src/framework/MOM_error_handler.F90 +++ b/src/framework/MOM_error_handler.F90 @@ -3,8 +3,8 @@ module MOM_error_handler ! This file is part of MOM6. See LICENSE.md for the license. -use mpp_mod, only : mpp_error, NOTE, WARNING, FATAL -use mpp_mod, only : mpp_pe, mpp_root_pe, stdlog, stdout +use MOM_error_infra, only : MOM_err, NOTE, WARNING, FATAL +use MOM_error_infra, only : is_root_pe, stdlog, stdout implicit none ; private @@ -39,15 +39,6 @@ module MOM_error_handler contains -!> This returns .true. if the current PE is the root PE. -function is_root_pe() - ! This returns .true. if the current PE is the root PE. - logical :: is_root_pe - is_root_pe = .false. - if (mpp_pe() == mpp_root_pe()) is_root_pe = .true. - return -end function is_root_pe - !> This provides a convenient interface for writing an informative comment. subroutine MOM_mesg(message, verb, all_print) character(len=*), intent(in) :: message !< A message to write out @@ -62,18 +53,18 @@ subroutine MOM_mesg(message, verb, all_print) if (present(all_print)) write_msg = write_msg .or. all_print verb_msg = 2 ; if (present(verb)) verb_msg = verb - if (write_msg .and. (verbosity >= verb_msg)) call mpp_error(NOTE, message) + if (write_msg .and. (verbosity >= verb_msg)) call MOM_err(NOTE, message) end subroutine MOM_mesg -!> This provides a convenient interface for writing an mpp_error message +!> This provides a convenient interface for writing an error message !! with run-time filter based on a verbosity. subroutine MOM_error(level, message, all_print) integer, intent(in) :: level !< The verbosity level of this message character(len=*), intent(in) :: message !< A message to write out logical, optional, intent(in) :: all_print !< If present and true, any PEs are !! able to write this message. - ! This provides a convenient interface for writing an mpp_error message + ! This provides a convenient interface for writing an error message ! with run-time filter based on a verbosity. logical :: write_msg @@ -82,13 +73,13 @@ subroutine MOM_error(level, message, all_print) select case (level) case (NOTE) - if (write_msg.and.verbosity>=2) call mpp_error(NOTE, message) + if (write_msg.and.verbosity>=2) call MOM_err(NOTE, message) case (WARNING) - if (write_msg.and.verbosity>=1) call mpp_error(WARNING, message) + if (write_msg.and.verbosity>=1) call MOM_err(WARNING, message) case (FATAL) - if (verbosity>=0) call mpp_error(FATAL, message) + if (verbosity>=0) call MOM_err(FATAL, message) case default - call mpp_error(level, message) + call MOM_err(level, message) end select end subroutine MOM_error @@ -137,10 +128,10 @@ subroutine callTree_enter(mesg,n) nAsString = '' if (present(n)) then write(nAsString(1:8),'(i8)') n - call mpp_error(NOTE, 'callTree: '// & + call MOM_err(NOTE, 'callTree: '// & repeat(' ',callTreeIndentLevel-1)//'loop '//trim(mesg)//trim(nAsString)) else - call mpp_error(NOTE, 'callTree: '// & + call MOM_err(NOTE, 'callTree: '// & repeat(' ',callTreeIndentLevel-1)//'---> '//trim(mesg)) endif endif @@ -152,7 +143,7 @@ subroutine callTree_leave(mesg) if (callTreeIndentLevel<1) write(0,*) 'callTree_leave: error callTreeIndentLevel=',callTreeIndentLevel,trim(mesg) callTreeIndentLevel = callTreeIndentLevel - 1 if (verbosity<6) return - if (is_root_pe()) call mpp_error(NOTE, 'callTree: '// & + if (is_root_pe()) call MOM_err(NOTE, 'callTree: '// & repeat(' ',callTreeIndentLevel)//'<--- '//trim(mesg)) end subroutine callTree_leave @@ -168,10 +159,10 @@ subroutine callTree_waypoint(mesg,n) nAsString = '' if (present(n)) then write(nAsString(1:8),'(i8)') n - call mpp_error(NOTE, 'callTree: '// & + call MOM_err(NOTE, 'callTree: '// & repeat(' ',callTreeIndentLevel)//'loop '//trim(mesg)//trim(nAsString)) else - call mpp_error(NOTE, 'callTree: '// & + call MOM_err(NOTE, 'callTree: '// & repeat(' ',callTreeIndentLevel)//'o '//trim(mesg)) endif endif diff --git a/src/framework/MOM_error_infra.F90 b/src/framework/MOM_error_infra.F90 new file mode 100644 index 0000000000..21eb14ef3d --- /dev/null +++ b/src/framework/MOM_error_infra.F90 @@ -0,0 +1,25 @@ +!> Routines for error handling and I/O management +module MOM_error_infra + +! This file is part of MOM6. See LICENSE.md for the license. + +use mpp_mod, only : MOM_err => mpp_error, NOTE, WARNING, FATAL +use mpp_mod, only : mpp_pe, mpp_root_pe, stdlog, stdout + +implicit none ; private + +public MOM_err, NOTE, WARNING, FATAL, is_root_pe, stdlog, stdout + +contains + +! MOM_err writes an error message, and may stop the run depending on the +! severity of the error. + +!> is_root_pe returns .true. if the current PE is the root PE. +function is_root_pe() + logical :: is_root_pe + is_root_pe = .false. + if (mpp_pe() == mpp_root_pe()) is_root_pe = .true. +end function is_root_pe + +end module MOM_error_infra diff --git a/src/framework/MOM_horizontal_regridding.F90 b/src/framework/MOM_horizontal_regridding.F90 index b91509cc1d..d63e2d743a 100644 --- a/src/framework/MOM_horizontal_regridding.F90 +++ b/src/framework/MOM_horizontal_regridding.F90 @@ -11,9 +11,9 @@ module MOM_horizontal_regridding use MOM_error_handler, only : callTree_enter, callTree_leave, callTree_waypoint use MOM_file_parser, only : get_param, log_param, log_version, param_file_type use MOM_grid, only : ocean_grid_type -use MOM_interpolate, only : time_interp_extern, get_external_field_info, horiz_interp_init +use MOM_interpolate, only : time_interp_external, get_external_field_info, horiz_interp_init use MOM_interpolate, only : horiz_interp_new, horiz_interp, horiz_interp_type -use MOM_io_wrapper, only : axistype, get_axis_data +use MOM_io_infra, only : axistype, get_axis_data use MOM_time_manager, only : time_type use netcdf, only : NF90_OPEN, NF90_NOWRITE, NF90_GET_ATT, NF90_GET_VAR @@ -768,7 +768,7 @@ subroutine horiz_interp_and_extrap_tracer_fms_id(fms_id, Time, conversion, G, t if (.not.spongeDataOngrid) then if (is_root_pe()) & - call time_interp_extern(fms_id, Time, data_in, verbose=.true., turns=turns) + call time_interp_external(fms_id, Time, data_in, verbose=.true., turns=turns) ! Loop through each data level and interpolate to model grid. ! After interpolating, fill in points which will be needed to define the layers. do k=1,kd @@ -885,7 +885,7 @@ subroutine horiz_interp_and_extrap_tracer_fms_id(fms_id, Time, conversion, G, t enddo ! kd else - call time_interp_extern(fms_id, Time, data_in, verbose=.true., turns=turns) + call time_interp_external(fms_id, Time, data_in, verbose=.true., turns=turns) do k=1,kd do j=js,je do i=is,ie diff --git a/src/framework/MOM_interp_infra.F90 b/src/framework/MOM_interp_infra.F90 new file mode 100644 index 0000000000..d9de006224 --- /dev/null +++ b/src/framework/MOM_interp_infra.F90 @@ -0,0 +1,128 @@ +!> This module wraps the FMS temporal and spatial interpolation routines +module MOM_interp_infra + +! This file is part of MOM6. See LICENSE.md for the license. + +use MOM_domain_infra, only : MOM_domain_type, domain2d +use MOM_io_infra, only : axistype +use MOM_time_manager, only : time_type +use horiz_interp_mod, only : horiz_interp_new, horiz_interp, horiz_interp_init, horiz_interp_type +use time_interp_external_mod, only : time_interp_external +use time_interp_external_mod, only : init_external_field, time_interp_external_init +use time_interp_external_mod, only : get_external_field_size +use time_interp_external_mod, only : get_external_field_axes, get_external_field_missing + +implicit none ; private + +public :: time_interp_extern, init_extern_field, time_interp_external_init +public :: get_external_field_info +public :: horiz_interp_type, horiz_interp_init, horiz_interp, horiz_interp_new + +!> Read a field based on model time, and rotate to the model domain. +interface time_interp_extern + module procedure time_interp_extern_0d + module procedure time_interp_extern_2d + module procedure time_interp_extern_3d +end interface time_interp_extern + +contains + +!> Get information about the external fields. +subroutine get_external_field_info(field_id, size, axes, missing) + integer, intent(in) :: field_id !< The integer index of the external + !! field returned from a previous + !! call to init_external_field() + integer, dimension(4), optional, intent(inout) :: size !< Dimension sizes for the input data + type(axistype), dimension(4), optional, intent(inout) :: axes !< Axis types for the input data + real, optional, intent(inout) :: missing !< Missing value for the input data + + if (present(size)) then + size(1:4) = get_external_field_size(field_id) + endif + + if (present(axes)) then + axes(1:4) = get_external_field_axes(field_id) + endif + + if (present(missing)) then + missing = get_external_field_missing(field_id) + endif + +end subroutine get_external_field_info + + +!> Read a scalar field based on model time. +subroutine time_interp_extern_0d(field_id, time, data_in, verbose) + integer, intent(in) :: field_id !< The integer index of the external field returned + !! from a previous call to init_external_field() + type(time_type), intent(in) :: time !< The target time for the data + real, intent(inout) :: data_in !< The interpolated value + logical, optional, intent(in) :: verbose !< If true, write verbose output for debugging + + call time_interp_external(field_id, time, data_in, verbose=verbose) +end subroutine time_interp_extern_0d + +!> Read a 2d field from an external based on model time, potentially including horizontal +!! interpolation and rotation of the data +subroutine time_interp_extern_2d(field_id, time, data_in, interp, verbose, horz_interp, mask_out) + integer, intent(in) :: field_id !< The integer index of the external field returned + !! from a previous call to init_external_field() + type(time_type), intent(in) :: time !< The target time for the data + real, dimension(:,:), intent(inout) :: data_in !< The array in which to store the interpolated values + integer, optional, intent(in) :: interp !< A flag indicating the temporal interpolation method + logical, optional, intent(in) :: verbose !< If true, write verbose output for debugging + type(horiz_interp_type), & + optional, intent(in) :: horz_interp !< A structure to control horizontal interpolation + logical, dimension(:,:), & + optional, intent(out) :: mask_out !< An array that is true where there is valid data + + call time_interp_external(field_id, time, data_in, interp=interp, verbose=verbose, & + horz_interp=horz_interp, mask_out=mask_out) +end subroutine time_interp_extern_2d + + +!> Read a 3d field based on model time, and rotate to the model grid +subroutine time_interp_extern_3d(field_id, time, data_in, interp, verbose, horz_interp, mask_out) + integer, intent(in) :: field_id !< The integer index of the external field returned + !! from a previous call to init_external_field() + type(time_type), intent(in) :: time !< The target time for the data + real, dimension(:,:,:), intent(inout) :: data_in !< The array in which to store the interpolated values + integer, optional, intent(in) :: interp !< A flag indicating the temporal interpolation method + logical, optional, intent(in) :: verbose !< If true, write verbose output for debugging + type(horiz_interp_type), & + optional, intent(in) :: horz_interp !< A structure to control horizontal interpolation + logical, dimension(:,:,:), & + optional, intent(out) :: mask_out !< An array that is true where there is valid data + + call time_interp_external(field_id, time, data_in, interp=interp, verbose=verbose, & + horz_interp=horz_interp, mask_out=mask_out) +end subroutine time_interp_extern_3d + +integer function init_extern_field(file, fieldname, MOM_domain, domain, verbose, & + threading, ierr, ignore_axis_atts ) + + character(len=*), intent(in) :: file !< The name of the file to read + character(len=*), intent(in) :: fieldname !< The name of the field in the file + integer, optional, intent(in) :: threading !< A flag specifying whether the root PE reads + !! the data and broadcasts it (SINGLE_FILE) or all + !! processors read (MULTIPLE, the default). + logical, optional, intent(in) :: verbose !< If true, write verbose output for debugging + type(domain2d), optional, intent(in) :: domain !< A domain2d type that describes the decomposition + type(MOM_domain_type), & + optional, intent(in) :: MOM_Domain !< A MOM_Domain that describes the decomposition + integer, optional, intent(out) :: ierr !< Returns a non-zero error code in case of failure + logical, optional, intent(in) :: ignore_axis_atts !< If present and true, do not issue a + !! fatal error if the axis Cartesian attribute is + !! not set to a recognized value. + + if (present(MOM_Domain)) then + init_extern_field = init_external_field(file, fieldname, domain=MOM_domain%mpp_domain, & + verbose=verbose, threading=threading, ierr=ierr, ignore_axis_atts=ignore_axis_atts) + else + init_extern_field = init_external_field(file, fieldname, domain=domain, & + verbose=verbose, threading=threading, ierr=ierr, ignore_axis_atts=ignore_axis_atts) + endif + +end function init_extern_field + +end module MOM_interp_infra diff --git a/src/framework/MOM_interpolate.F90 b/src/framework/MOM_interpolate.F90 index c63c847e55..6a5a4b2cc6 100644 --- a/src/framework/MOM_interpolate.F90 +++ b/src/framework/MOM_interpolate.F90 @@ -1,59 +1,30 @@ -!> This module wraps the FMS temporal and spatial interpolation routines +!> This module provides added functionality to the FMS temporal and spatial interpolation routines module MOM_interpolate ! This file is part of MOM6. See LICENSE.md for the license. use MOM_array_transform, only : allocate_rotated_array, rotate_array -use MOM_error_handler, only : MOM_error, FATAL -use MOM_io_wrapper, only : axistype -use horiz_interp_mod, only : horiz_interp_new, horiz_interp, horiz_interp_init, horiz_interp_type -use time_interp_external_mod, only : time_interp_external_fms=>time_interp_external -use time_interp_external_mod, only : init_external_field, time_interp_external_init -use time_interp_external_mod, only : get_external_field_size -use time_interp_external_mod, only : get_external_field_axes, get_external_field_missing -use time_manager_mod, only : time_type +use MOM_error_handler, only : MOM_error, FATAL +use MOM_interp_infra, only : time_interp_extern, init_external_field=>init_extern_field +use MOM_interp_infra, only : time_interp_external_init, get_external_field_info +use MOM_interp_infra, only : horiz_interp_type, horiz_interp_init, horiz_interp, horiz_interp_new +use MOM_io_infra, only : axistype +use MOM_time_manager, only : time_type implicit none ; private -public :: time_interp_extern, init_external_field, time_interp_external_init -public :: get_external_field_info +public :: time_interp_external, init_external_field, time_interp_external_init, get_external_field_info public :: horiz_interp_type, horiz_interp_init, horiz_interp, horiz_interp_new !> Read a field based on model time, and rotate to the model domain. -! This inerface does not share the name time_interp_external with the module it primarily -! wraps because of errors (perhaps a bug) that arise with the PGI 19.10.0 compiler. -interface time_interp_extern +interface time_interp_external module procedure time_interp_external_0d module procedure time_interp_external_2d module procedure time_interp_external_3d -end interface time_interp_extern +end interface time_interp_external contains -!> Get information about the external fields. -subroutine get_external_field_info(field_id, size, axes, missing) - integer, intent(in) :: field_id !< The integer index of the external - !! field returned from a previous - !! call to init_external_field() - integer, dimension(4), optional, intent(inout) :: size !< Dimension sizes for the input data - type(axistype), dimension(4), optional, intent(inout) :: axes !< Axis types for the input data - real, optional, intent(inout) :: missing !< Missing value for the input data - - if (present(size)) then - size(1:4) = get_external_field_size(field_id) - endif - - if (present(axes)) then - axes(1:4) = get_external_field_axes(field_id) - endif - - if (present(missing)) then - missing = get_external_field_missing(field_id) - endif - -end subroutine get_external_field_info - - !> Read a scalar field based on model time. subroutine time_interp_external_0d(field_id, time, data_in, verbose) integer, intent(in) :: field_id !< The integer index of the external field returned @@ -62,7 +33,7 @@ subroutine time_interp_external_0d(field_id, time, data_in, verbose) real, intent(inout) :: data_in !< The interpolated value logical, optional, intent(in) :: verbose !< If true, write verbose output for debugging - call time_interp_external_fms(field_id, time, data_in, verbose=verbose) + call time_interp_extern(field_id, time, data_in, verbose=verbose) end subroutine time_interp_external_0d !> Read a 2d field from an external based on model time, potentially including horizontal @@ -87,16 +58,15 @@ subroutine time_interp_external_2d(field_id, time, data_in, interp, verbose, hor if (present(mask_out)) & call MOM_error(FATAL, "Rotation of masked output not yet support") - qturns = 0 - if (present(turns)) qturns = modulo(turns, 4) + qturns = 0 ; if (present(turns)) qturns = modulo(turns, 4) if (qturns == 0) then - call time_interp_external_fms(field_id, time, data_in, interp=interp, & - verbose=verbose, horz_interp=horz_interp) + call time_interp_extern(field_id, time, data_in, interp=interp, & + verbose=verbose, horz_interp=horz_interp) else call allocate_rotated_array(data_in, [1,1], -qturns, data_pre_rot) - call time_interp_external_fms(field_id, time, data_pre_rot, interp=interp, & - verbose=verbose, horz_interp=horz_interp) + call time_interp_extern(field_id, time, data_pre_rot, interp=interp, & + verbose=verbose, horz_interp=horz_interp) call rotate_array(data_pre_rot, turns, data_in) deallocate(data_pre_rot) endif @@ -125,16 +95,15 @@ subroutine time_interp_external_3d(field_id, time, data_in, interp, & if (present(mask_out)) & call MOM_error(FATAL, "Rotation of masked output not yet support") - qturns = 0 - if (present(turns)) qturns = modulo(turns, 4) + qturns = 0 ; if (present(turns)) qturns = modulo(turns, 4) if (qturns == 0) then - call time_interp_external_fms(field_id, time, data_in, interp=interp, & - verbose=verbose, horz_interp=horz_interp) + call time_interp_extern(field_id, time, data_in, interp=interp, & + verbose=verbose, horz_interp=horz_interp) else call allocate_rotated_array(data_in, [1,1,1], -qturns, data_pre_rot) - call time_interp_external_fms(field_id, time, data_pre_rot, interp=interp, & - verbose=verbose, horz_interp=horz_interp) + call time_interp_extern(field_id, time, data_pre_rot, interp=interp, & + verbose=verbose, horz_interp=horz_interp) call rotate_array(data_pre_rot, turns, data_in) deallocate(data_pre_rot) endif diff --git a/src/framework/MOM_io.F90 b/src/framework/MOM_io.F90 index c7d7e98e4b..2a547dbdd1 100644 --- a/src/framework/MOM_io.F90 +++ b/src/framework/MOM_io.F90 @@ -3,22 +3,23 @@ module MOM_io ! This file is part of MOM6. See LICENSE.md for the license. -use MOM_domains, only : MOM_domain_type, AGRID, BGRID_NE, CGRID_NE, get_domain_components -use MOM_domains, only : domain1D, get_simple_array_i_ind, get_simple_array_j_ind +use MOM_array_transform, only : allocate_rotated_array, rotate_array +use MOM_domains, only : MOM_domain_type, domain1D, get_domain_components +use MOM_domains, only : AGRID, BGRID_NE, CGRID_NE use MOM_dyn_horgrid, only : dyn_horgrid_type use MOM_error_handler, only : MOM_error, NOTE, FATAL, WARNING use MOM_file_parser, only : log_version, param_file_type use MOM_grid, only : ocean_grid_type -use MOM_io_wrapper, only : MOM_read_data, MOM_read_vector, MOM_write_field, read_axis_data -use MOM_io_wrapper, only : file_exists, field_exists, read_field_chksum -use MOM_io_wrapper, only : open_file, close_file, field_size, fieldtype, get_filename_appendix -use MOM_io_wrapper, only : flush_file, get_file_info, get_file_atts, get_file_fields -use MOM_io_wrapper, only : get_file_times, read_data, axistype, get_axis_data -use MOM_io_wrapper, only : write_field, write_metadata, write_version_number, get_ensemble_id -use MOM_io_wrapper, only : open_namelist_file, check_nml_error, io_infra_init, io_infra_end -use MOM_io_wrapper, only : APPEND_FILE, ASCII_FILE, MULTIPLE, NETCDF_FILE, OVERWRITE_FILE -use MOM_io_wrapper, only : READONLY_FILE, SINGLE_FILE, WRITEONLY_FILE -use MOM_io_wrapper, only : CENTER, CORNER, NORTH_FACE, EAST_FACE +use MOM_io_infra, only : MOM_read_data, read_data, MOM_read_vector, read_field_chksum +use MOM_io_infra, only : file_exists, get_file_info, get_file_atts, get_file_fields +use MOM_io_infra, only : open_file, close_file, field_size, fieldtype, field_exists +use MOM_io_infra, only : flush_file, get_filename_appendix, get_ensemble_id +use MOM_io_infra, only : get_file_times, axistype, get_axis_data +use MOM_io_infra, only : write_field, write_metadata, write_version_number +use MOM_io_infra, only : open_namelist_file, check_nml_error, io_infra_init, io_infra_end +use MOM_io_infra, only : APPEND_FILE, ASCII_FILE, MULTIPLE, NETCDF_FILE, OVERWRITE_FILE +use MOM_io_infra, only : READONLY_FILE, SINGLE_FILE, WRITEONLY_FILE +use MOM_io_infra, only : CENTER, CORNER, NORTH_FACE, EAST_FACE use MOM_string_functions, only : lowercase, slasher use MOM_verticalGrid, only : verticalGrid_type @@ -31,12 +32,12 @@ module MOM_io ! These interfaces are actually implemented in this file. public :: create_file, reopen_file, num_timelevels, cmor_long_std, ensembler, MOM_io_init -public :: var_desc, modify_vardesc, query_vardesc -! The following are simple pass throughs of routines from MOM_io_wrapper or other modules +public :: MOM_write_field, var_desc, modify_vardesc, query_vardesc +! The following are simple pass throughs of routines from MOM_io_infra or other modules public :: close_file, field_exists, field_size, fieldtype, get_filename_appendix public :: file_exists, flush_file, get_file_info, get_file_atts, get_file_fields -public :: get_file_times, open_file, read_axis_data, read_data, read_field_chksum -public :: MOM_read_data, MOM_read_vector, MOM_write_field, get_axis_data +public :: get_file_times, open_file, get_axis_data +public :: MOM_read_data, MOM_read_vector, read_data, read_field_chksum public :: slasher, write_field, write_version_number public :: open_namelist_file, check_nml_error, io_infra_init, io_infra_end ! These are encoding constants. @@ -44,7 +45,16 @@ module MOM_io public :: READONLY_FILE, SINGLE_FILE, WRITEONLY_FILE public :: CENTER, CORNER, NORTH_FACE, EAST_FACE -!> Type for describing a variable, typically a tracer +!> Write a registered field to an output file, potentially with rotation +interface MOM_write_field + module procedure MOM_write_field_4d + module procedure MOM_write_field_3d + module procedure MOM_write_field_2d + module procedure MOM_write_field_1d + module procedure MOM_write_field_0d +end interface MOM_write_field + +!> Type for describing a 3-d variable for output type, public :: vardesc character(len=64) :: name !< Variable name in a NetCDF file character(len=48) :: units !< Physical dimensions of the variable @@ -667,6 +677,118 @@ subroutine query_vardesc(vd, name, units, longname, hor_grid, z_grid, t_grid, & end subroutine query_vardesc + +!> Write a 4d field to an output file, potentially with rotation +subroutine MOM_write_field_4d(io_unit, field_md, MOM_domain, field, tstamp, tile_count, & + fill_value, turns) + integer, intent(in) :: io_unit !< File I/O unit handle + type(fieldtype), intent(in) :: field_md !< Field type with metadata + type(MOM_domain_type), intent(in) :: MOM_domain !< The MOM_Domain that describes the decomposition + real, dimension(:,:,:,:), intent(inout) :: field !< Unrotated field to write + real, optional, intent(in) :: tstamp !< Model timestamp + integer, optional, intent(in) :: tile_count !< PEs per tile (default: 1) + real, optional, intent(in) :: fill_value !< Missing data fill value + integer, optional, intent(in) :: turns !< Number of quarter-turns to rotate the data + + real, allocatable :: field_rot(:,:,:,:) ! A rotated version of field, with the same units + integer :: qturns ! The number of quarter turns through which to rotate field + + qturns = 0 ; if (present(turns)) qturns = modulo(turns, 4) + + if (qturns == 0) then + call write_field(io_unit, field_md, MOM_domain, field, tstamp=tstamp, & + tile_count=tile_count, fill_value=fill_value) + else + call allocate_rotated_array(field, [1,1,1,1], qturns, field_rot) + call rotate_array(field, qturns, field_rot) + call write_field(io_unit, field_md, MOM_domain, field_rot, tstamp=tstamp, & + tile_count=tile_count, fill_value=fill_value) + deallocate(field_rot) + endif +end subroutine MOM_write_field_4d + +!> Write a 3d field to an output file, potentially with rotation +subroutine MOM_write_field_3d(io_unit, field_md, MOM_domain, field, tstamp, tile_count, & + fill_value, turns) + integer, intent(in) :: io_unit !< File I/O unit handle + type(fieldtype), intent(in) :: field_md !< Field type with metadata + type(MOM_domain_type), intent(in) :: MOM_domain !< The MOM_Domain that describes the decomposition + real, dimension(:,:,:), intent(inout) :: field !< Unrotated field to write + real, optional, intent(in) :: tstamp !< Model timestamp + integer, optional, intent(in) :: tile_count !< PEs per tile (default: 1) + real, optional, intent(in) :: fill_value !< Missing data fill value + integer, optional, intent(in) :: turns !< Number of quarter-turns to rotate the data + + real, allocatable :: field_rot(:,:,:) ! A rotated version of field, with the same units + integer :: qturns ! The number of quarter turns through which to rotate field + + qturns = 0 ; if (present(turns)) qturns = modulo(turns, 4) + + if (qturns == 0) then + call write_field(io_unit, field_md, MOM_domain, field, tstamp=tstamp, & + tile_count=tile_count, fill_value=fill_value) + else + call allocate_rotated_array(field, [1,1,1], qturns, field_rot) + call rotate_array(field, qturns, field_rot) + call write_field(io_unit, field_md, MOM_domain, field_rot, tstamp=tstamp, & + tile_count=tile_count, fill_value=fill_value) + deallocate(field_rot) + endif +end subroutine MOM_write_field_3d + +!> Write a 2d field to an output file, potentially with rotation +subroutine MOM_write_field_2d(io_unit, field_md, MOM_domain, field, tstamp, tile_count, & + fill_value, turns) + integer, intent(in) :: io_unit !< File I/O unit handle + type(fieldtype), intent(in) :: field_md !< Field type with metadata + type(MOM_domain_type), intent(in) :: MOM_domain !< The MOM_Domain that describes the decomposition + real, dimension(:,:), intent(inout) :: field !< Unrotated field to write + real, optional, intent(in) :: tstamp !< Model timestamp + integer, optional, intent(in) :: tile_count !< PEs per tile (default: 1) + real, optional, intent(in) :: fill_value !< Missing data fill value + integer, optional, intent(in) :: turns !< Number of quarter-turns to rotate the data + + real, allocatable :: field_rot(:,:) ! A rotated version of field, with the same units + integer :: qturns ! The number of quarter turns through which to rotate field + + qturns = 0 + if (present(turns)) qturns = modulo(turns, 4) + + if (qturns == 0) then + call write_field(io_unit, field_md, MOM_domain, field, tstamp=tstamp, & + tile_count=tile_count, fill_value=fill_value) + else + call allocate_rotated_array(field, [1,1], qturns, field_rot) + call rotate_array(field, qturns, field_rot) + call write_field(io_unit, field_md, MOM_domain, field_rot, tstamp=tstamp, & + tile_count=tile_count, fill_value=fill_value) + deallocate(field_rot) + endif +end subroutine MOM_write_field_2d + +!> Write a 1d field to an output file +subroutine MOM_write_field_1d(io_unit, field_md, field, tstamp, fill_value) + integer, intent(in) :: io_unit !< File I/O unit handle + type(fieldtype), intent(in) :: field_md !< Field type with metadata + real, dimension(:), intent(in) :: field !< Field to write + real, optional, intent(in) :: tstamp !< Model timestamp + real, optional, intent(in) :: fill_value !< Missing data fill value + + call write_field(io_unit, field_md, field, tstamp=tstamp) +end subroutine MOM_write_field_1d + +!> Write a 0d field to an output file +subroutine MOM_write_field_0d(io_unit, field_md, field, tstamp, fill_value) + integer, intent(in) :: io_unit !< File I/O unit handle + type(fieldtype), intent(in) :: field_md !< Field type with metadata + real, intent(in) :: field !< Field to write + real, optional, intent(in) :: tstamp !< Model timestamp + real, optional, intent(in) :: fill_value !< Missing data fill value + + call write_field(io_unit, field_md, field, tstamp=tstamp) +end subroutine MOM_write_field_0d + + !> Copies a string subroutine safe_string_copy(str1, str2, fieldnm, caller) character(len=*), intent(in) :: str1 !< The string being copied diff --git a/src/framework/MOM_io_wrapper.F90 b/src/framework/MOM_io_infra.F90 similarity index 77% rename from src/framework/MOM_io_wrapper.F90 rename to src/framework/MOM_io_infra.F90 index 7437b59db1..a854cd6d2a 100644 --- a/src/framework/MOM_io_wrapper.F90 +++ b/src/framework/MOM_io_infra.F90 @@ -1,12 +1,11 @@ !> This module contains a thin inteface to mpp and fms I/O code -module MOM_io_wrapper +module MOM_io_infra ! This file is part of MOM6. See LICENSE.md for the license. -use MOM_array_transform, only : allocate_rotated_array, rotate_array -use MOM_domains, only : MOM_domain_type, AGRID, BGRID_NE, CGRID_NE -use MOM_domains, only : get_simple_array_i_ind, get_simple_array_j_ind -use MOM_error_handler, only : MOM_error, NOTE, FATAL, WARNING +use MOM_domain_infra, only : MOM_domain_type, AGRID, BGRID_NE, CGRID_NE +use MOM_domain_infra, only : get_simple_array_i_ind, get_simple_array_j_ind +use MOM_error_infra, only : MOM_error=>MOM_err, NOTE, FATAL, WARNING use ensemble_manager_mod, only : get_ensemble_id use fms_mod, only : write_version_number, open_namelist_file, check_nml_error @@ -14,7 +13,7 @@ module MOM_io_wrapper use fms_io_mod, only : io_infra_end=>fms_io_exit, get_filename_appendix use mpp_domains_mod, only : domain2d, CENTER, CORNER, NORTH_FACE=>NORTH, EAST_FACE=>EAST use mpp_io_mod, only : mpp_open, close_file=>mpp_close -use mpp_io_mod, only : write_metadata=>mpp_write_meta, write_field=>mpp_write +use mpp_io_mod, only : write_metadata=>mpp_write_meta, mpp_write use mpp_io_mod, only : get_field_atts=>mpp_get_atts, mpp_attribute_exist use mpp_io_mod, only : mpp_get_axes, axistype, get_axis_data=>mpp_get_axis_data use mpp_io_mod, only : mpp_get_fields, fieldtype, flush_file=>mpp_flush @@ -28,14 +27,15 @@ module MOM_io_wrapper implicit none ; private -! These interfaces are actually implemented in this file. -public :: MOM_read_data, MOM_read_vector, MOM_write_field, read_axis_data +! These interfaces are actually implemented or have explicit interfaces in this file. +public :: MOM_read_data, MOM_read_vector, write_field, open_file public :: file_exists, field_exists, read_field_chksum -! The following are simple pass throughs of routines from other modules. -public :: open_file, close_file, field_size, fieldtype, get_filename_appendix +! The following are simple pass throughs of routines from other modules. They need +! to have explicit interfaces added to this file. +public :: close_file, field_size, fieldtype, get_filename_appendix public :: flush_file, get_file_info, get_file_atts, get_file_fields, get_field_atts public :: get_file_times, read_data, axistype, get_axis_data -public :: write_field, write_metadata, write_version_number, get_ensemble_id +public :: write_metadata, write_version_number, get_ensemble_id public :: open_namelist_file, check_nml_error, io_infra_init, io_infra_end ! These are encoding constants. public :: APPEND_FILE, ASCII_FILE, MULTIPLE, NETCDF_FILE, OVERWRITE_FILE @@ -58,13 +58,14 @@ module MOM_io_wrapper end interface !> Write a registered field to an output file -interface MOM_write_field - module procedure MOM_write_field_4d - module procedure MOM_write_field_3d - module procedure MOM_write_field_2d - module procedure MOM_write_field_1d - module procedure MOM_write_field_0d -end interface MOM_write_field +interface write_field + module procedure write_field_4d + module procedure write_field_3d + module procedure write_field_2d + module procedure write_field_1d + module procedure write_field_0d + module procedure MOM_write_axis +end interface write_field !> Read a pair of data fields representing the two components of a vector from a file interface MOM_read_vector @@ -74,46 +75,8 @@ module MOM_io_wrapper contains -!> Read the data associated with a named axis in a file -subroutine read_axis_data(filename, axis_name, var) - character(len=*), intent(in) :: filename !< Name of the file to read - character(len=*), intent(in) :: axis_name !< Name of the axis to read - real, dimension(:), intent(out) :: var !< The axis location data - - integer :: i, len, unit, ndim, nvar, natt, ntime - logical :: axis_found - type(axistype), allocatable :: axes(:) - type(axistype) :: time_axis - character(len=32) :: name, units - - call open_file(unit, trim(filename), action=READONLY_FILE, form=NETCDF_FILE, & - threading=MULTIPLE, fileset=SINGLE_FILE) - -!Find the number of variables (nvar) in this file - call get_file_info(unit, ndim, nvar, natt, ntime) -! ------------------------------------------------------------------- -! Allocate space for the number of axes in the data file. -! ------------------------------------------------------------------- - allocate(axes(ndim)) - call mpp_get_axes(unit, axes, time_axis) - - axis_found = .false. - do i = 1, ndim - call get_file_atts(axes(i), name=name, len=len, units=units) - if (name == axis_name) then - axis_found = .true. - call get_axis_data(axes(i), var) - exit - endif - enddo - - if (.not.axis_found) call MOM_error(FATAL, "MOM_io read_axis_data: "//& - "Unable to find axis "//trim(axis_name)//" in file "//trim(filename)) - - deallocate(axes) - -end subroutine read_axis_data - +!> Reads the checksum value for a field that was recorded in a file, along with a flag indicating +!! whether the file contained a valid checksum for this field. subroutine read_field_chksum(field, chksum, valid_chksum) type(fieldtype), intent(in) :: field !< The field whose checksum attribute is to be read. integer(kind=8), intent(out) :: chksum !< The checksum for the field. @@ -131,7 +94,6 @@ subroutine read_field_chksum(field, chksum, valid_chksum) endif end subroutine read_field_chksum - !> Returns true if the named file or its domain-decomposed variant exists. function MOM_file_exists(filename, MOM_Domain) character(len=*), intent(in) :: filename !< The name of the file being inquired about @@ -417,116 +379,74 @@ subroutine MOM_read_vector_3d(filename, u_fieldname, v_fieldname, u_data, v_data end subroutine MOM_read_vector_3d -!> Write a 4d field to an output file, potentially with rotation -subroutine MOM_write_field_4d(io_unit, field_md, MOM_domain, field, tstamp, tile_count, & - fill_value, turns) +!> Write a 4d field to an output file. +subroutine write_field_4d(io_unit, field_md, MOM_domain, field, tstamp, tile_count, fill_value) integer, intent(in) :: io_unit !< File I/O unit handle type(fieldtype), intent(in) :: field_md !< Field type with metadata type(MOM_domain_type), intent(in) :: MOM_domain !< The MOM_Domain that describes the decomposition - real, dimension(:,:,:,:), intent(inout) :: field !< Unrotated field to write + real, dimension(:,:,:,:), intent(inout) :: field !< Field to write real, optional, intent(in) :: tstamp !< Model timestamp integer, optional, intent(in) :: tile_count !< PEs per tile (default: 1) real, optional, intent(in) :: fill_value !< Missing data fill value - integer, optional, intent(in) :: turns !< Number of quarter-turns to rotate the data - - real, allocatable :: field_rot(:,:,:,:) ! A rotated version of field, with the same units - integer :: qturns ! The number of quarter turns through which to rotate field - qturns = 0 - if (present(turns)) qturns = modulo(turns, 4) - - if (qturns == 0) then - call write_field(io_unit, field_md, MOM_domain%mpp_domain, field, tstamp=tstamp, & - tile_count=tile_count, default_data=fill_value) - else - call allocate_rotated_array(field, [1,1,1,1], qturns, field_rot) - call rotate_array(field, qturns, field_rot) - call write_field(io_unit, field_md, MOM_domain%mpp_domain, field_rot, tstamp=tstamp, & - tile_count=tile_count, default_data=fill_value) - deallocate(field_rot) - endif -end subroutine MOM_write_field_4d + call mpp_write(io_unit, field_md, MOM_domain%mpp_domain, field, tstamp=tstamp, & + tile_count=tile_count, default_data=fill_value) +end subroutine write_field_4d -!> Write a 3d field to an output file, potentially with rotation -subroutine MOM_write_field_3d(io_unit, field_md, MOM_domain, field, tstamp, tile_count, & - fill_value, turns) +!> Write a 3d field to an output file. +subroutine write_field_3d(io_unit, field_md, MOM_domain, field, tstamp, tile_count, fill_value) integer, intent(in) :: io_unit !< File I/O unit handle type(fieldtype), intent(in) :: field_md !< Field type with metadata type(MOM_domain_type), intent(in) :: MOM_domain !< The MOM_Domain that describes the decomposition - real, dimension(:,:,:), intent(inout) :: field !< Unrotated field to write + real, dimension(:,:,:), intent(inout) :: field !< Field to write real, optional, intent(in) :: tstamp !< Model timestamp integer, optional, intent(in) :: tile_count !< PEs per tile (default: 1) real, optional, intent(in) :: fill_value !< Missing data fill value - integer, optional, intent(in) :: turns !< Number of quarter-turns to rotate the data - - real, allocatable :: field_rot(:,:,:) ! A rotated version of field, with the same units - integer :: qturns ! The number of quarter turns through which to rotate field - - qturns = 0 - if (present(turns)) qturns = modulo(turns, 4) - if (qturns == 0) then - call write_field(io_unit, field_md, MOM_domain%mpp_domain, field, tstamp=tstamp, & - tile_count=tile_count, default_data=fill_value) - else - call allocate_rotated_array(field, [1,1,1], qturns, field_rot) - call rotate_array(field, qturns, field_rot) - call write_field(io_unit, field_md, MOM_domain%mpp_domain, field_rot, tstamp=tstamp, & - tile_count=tile_count, default_data=fill_value) - deallocate(field_rot) - endif -end subroutine MOM_write_field_3d + call mpp_write(io_unit, field_md, MOM_domain%mpp_domain, field, tstamp=tstamp, & + tile_count=tile_count, default_data=fill_value) +end subroutine write_field_3d -!> Write a 2d field to an output file, potentially with rotation -subroutine MOM_write_field_2d(io_unit, field_md, MOM_domain, field, tstamp, tile_count, & - fill_value, turns) +!> Write a 2d field to an output file. +subroutine write_field_2d(io_unit, field_md, MOM_domain, field, tstamp, tile_count, fill_value) integer, intent(in) :: io_unit !< File I/O unit handle type(fieldtype), intent(in) :: field_md !< Field type with metadata type(MOM_domain_type), intent(in) :: MOM_domain !< The MOM_Domain that describes the decomposition - real, dimension(:,:), intent(inout) :: field !< Unrotated field to write + real, dimension(:,:), intent(inout) :: field !< Field to write real, optional, intent(in) :: tstamp !< Model timestamp integer, optional, intent(in) :: tile_count !< PEs per tile (default: 1) real, optional, intent(in) :: fill_value !< Missing data fill value - integer, optional, intent(in) :: turns !< Number of quarter-turns to rotate the data - real, allocatable :: field_rot(:,:) ! A rotated version of field, with the same units - integer :: qturns ! The number of quarter turns through which to rotate field + call mpp_write(io_unit, field_md, MOM_domain%mpp_domain, field, tstamp=tstamp, & + tile_count=tile_count, default_data=fill_value) +end subroutine write_field_2d - qturns = 0 - if (present(turns)) qturns = modulo(turns, 4) - - if (qturns == 0) then - call write_field(io_unit, field_md, MOM_domain%mpp_domain, field, tstamp=tstamp, & - tile_count=tile_count, default_data=fill_value) - else - call allocate_rotated_array(field, [1,1], qturns, field_rot) - call rotate_array(field, qturns, field_rot) - call write_field(io_unit, field_md, MOM_domain%mpp_domain, field_rot, tstamp=tstamp, & - tile_count=tile_count, default_data=fill_value) - deallocate(field_rot) - endif -end subroutine MOM_write_field_2d - -!> Write a 1d field to an output file -subroutine MOM_write_field_1d(io_unit, field_md, field, tstamp, fill_value) +!> Write a 1d field to an output file. +subroutine write_field_1d(io_unit, field_md, field, tstamp) integer, intent(in) :: io_unit !< File I/O unit handle type(fieldtype), intent(in) :: field_md !< Field type with metadata - real, dimension(:), intent(inout) :: field !< Field to write + real, dimension(:), intent(in) :: field !< Field to write real, optional, intent(in) :: tstamp !< Model timestamp - real, optional, intent(in) :: fill_value !< Missing data fill value - call write_field(io_unit, field_md, field, tstamp=tstamp) -end subroutine MOM_write_field_1d + call mpp_write(io_unit, field_md, field, tstamp=tstamp) +end subroutine write_field_1d -!> Write a 0d field to an output file -subroutine MOM_write_field_0d(io_unit, field_md, field, tstamp, fill_value) +!> Write a 0d field to an output file. +subroutine write_field_0d(io_unit, field_md, field, tstamp) integer, intent(in) :: io_unit !< File I/O unit handle type(fieldtype), intent(in) :: field_md !< Field type with metadata - real, intent(inout) :: field !< Field to write + real, intent(in) :: field !< Field to write real, optional, intent(in) :: tstamp !< Model timestamp - real, optional, intent(in) :: fill_value !< Missing data fill value - call write_field(io_unit, field_md, field, tstamp=tstamp) -end subroutine MOM_write_field_0d + call mpp_write(io_unit, field_md, field, tstamp=tstamp) +end subroutine write_field_0d + +subroutine MOM_write_axis(io_unit, axis) + integer, intent(in) :: io_unit !< File I/O unit handle + type(axistype), intent(in) :: axis !< An axis type variable with information to write + + call mpp_write(io_unit, axis) + +end subroutine MOM_write_axis -end module MOM_io_wrapper +end module MOM_io_infra diff --git a/src/framework/MOM_time_manager.F90 b/src/framework/MOM_time_manager.F90 index 229c3ded3a..0f8ced0928 100644 --- a/src/framework/MOM_time_manager.F90 +++ b/src/framework/MOM_time_manager.F90 @@ -14,9 +14,6 @@ module MOM_time_manager use time_manager_mod, only : set_calendar_type, get_calendar_type use time_manager_mod, only : JULIAN, NOLEAP, THIRTY_DAY_MONTHS, GREGORIAN use time_manager_mod, only : NO_CALENDAR -use time_interp_external_mod, only : init_external_field, time_interp_external, time_interp_external_init -use time_interp_external_mod, only : get_external_field_size -use time_interp_external_mod, only : get_external_field_axes, get_external_field_missing implicit none ; private @@ -29,12 +26,6 @@ module MOM_time_manager public :: get_date, set_date, increment_date, month_name, days_in_month public :: JULIAN, NOLEAP, THIRTY_DAY_MONTHS, GREGORIAN, NO_CALENDAR public :: set_calendar_type, get_calendar_type -public :: init_external_field -public :: time_interp_external -public :: time_interp_external_init -public :: get_external_field_size -public :: get_external_field_axes -public :: get_external_field_missing contains @@ -60,5 +51,4 @@ function real_to_time(x, err_msg) real_to_time = set_time(seconds=seconds, days=days, ticks=ticks, err_msg=err_msg) end function real_to_time - end module MOM_time_manager diff --git a/src/ice_shelf/MOM_ice_shelf.F90 b/src/ice_shelf/MOM_ice_shelf.F90 index aadbf1ace0..4149e1be01 100644 --- a/src/ice_shelf/MOM_ice_shelf.F90 +++ b/src/ice_shelf/MOM_ice_shelf.F90 @@ -15,8 +15,8 @@ module MOM_ice_shelf use MOM_IS_diag_mediator, only : diag_mediator_init, diag_mediator_end, set_diag_mediator_grid use MOM_IS_diag_mediator, only : enable_averages, enable_averaging, disable_averaging use MOM_IS_diag_mediator, only : diag_mediator_infrastructure_init, diag_mediator_close_registration -use MOM_domain_init, only : MOM_domains_init -use MOM_domains, only : clone_MOM_domain, pass_var, pass_vector, TO_ALL, CGRID_NE, BGRID_NE, CORNER +use MOM_domains, only : MOM_domains_init, pass_var, pass_vector, clone_MOM_domain +use MOM_domains, only : TO_ALL, CGRID_NE, BGRID_NE, CORNER use MOM_dyn_horgrid, only : dyn_horgrid_type, create_dyn_horgrid, destroy_dyn_horgrid use MOM_dyn_horgrid, only : rescale_dyn_horgrid_bathymetry use MOM_error_handler, only : MOM_error, MOM_mesg, FATAL, WARNING, is_root_pe @@ -57,7 +57,7 @@ module MOM_ice_shelf use MOM_coms, only : reproducing_sum use MOM_spatial_means, only : global_area_integral use MOM_checksums, only : hchksum, qchksum, chksum, uchksum, vchksum, uvchksum -use MOM_interpolate, only : init_external_field, time_interp_extern, time_interp_external_init +use MOM_interpolate, only : init_external_field, time_interp_external, time_interp_external_init implicit none ; private @@ -1084,7 +1084,7 @@ subroutine add_shelf_flux(G, US, CS, sfc_state, fluxes) do j=js,je ; do i=is,ie last_hmask(i,j) = ISS%hmask(i,j) ; last_area_shelf_h(i,j) = ISS%area_shelf_h(i,j) enddo ; enddo - call time_interp_extern(CS%id_read_mass, Time0, last_mass_shelf) + call time_interp_external(CS%id_read_mass, Time0, last_mass_shelf) do j=js,je ; do i=is,ie ! This should only be done if time_interp_extern did an update. last_mass_shelf(i,j) = US%kg_m3_to_R*US%m_to_Z * last_mass_shelf(i,j) ! Rescale after time_interp @@ -1933,15 +1933,15 @@ subroutine initialize_shelf_mass(G, param_file, CS, ISS, new_sim) call log_param(param_file, mdl, "INPUTDIR/SHELF_FILE", filename) CS%id_read_mass = init_external_field(filename, shelf_mass_var, & - domain=CS%Grid_in%Domain%mpp_domain, verbose=CS%debug) + MOM_domain=CS%Grid_in%Domain, verbose=CS%debug) if (read_shelf_area) then call get_param(param_file, mdl, "SHELF_AREA_VAR", shelf_area_var, & "The variable in SHELF_FILE with the shelf area.", & default="shelf_area") - CS%id_read_area = init_external_field(filename,shelf_area_var, & - domain=CS%Grid_in%Domain%mpp_domain) + CS%id_read_area = init_external_field(filename, shelf_area_var, & + MOM_domain=CS%Grid_in%Domain) endif if (.not.file_exists(filename, CS%Grid_in%Domain)) call MOM_error(FATAL, & @@ -1984,7 +1984,7 @@ subroutine update_shelf_mass(G, US, CS, ISS, Time) allocate(tmp2d(is:ie,js:je)) ; tmp2d(:,:) = 0.0 endif - call time_interp_extern(CS%id_read_mass, Time, tmp2d) + call time_interp_external(CS%id_read_mass, Time, tmp2d) call rotate_array(tmp2d, CS%turns, ISS%mass_shelf) deallocate(tmp2d) diff --git a/src/ice_shelf/MOM_ice_shelf_diag_mediator.F90 b/src/ice_shelf/MOM_ice_shelf_diag_mediator.F90 index 397696c0ba..4955dd291a 100644 --- a/src/ice_shelf/MOM_ice_shelf_diag_mediator.F90 +++ b/src/ice_shelf/MOM_ice_shelf_diag_mediator.F90 @@ -151,27 +151,21 @@ subroutine set_axes_info(G, param_file, diag_cs, axes_set_name) if (G%symmetric) then id_xq = diag_axis_init('xB', G%gridLonB(G%isgB:G%iegB), G%x_axis_units, 'x', & - 'Boundary point nominal longitude',set_name=set_name, & - Domain2=G%Domain%mpp_domain, domain_position=EAST) + 'Boundary point nominal longitude', G%Domain, position=EAST, set_name=set_name) id_yq = diag_axis_init('yB', G%gridLatB(G%jsgB:G%jegB), G%y_axis_units, 'y', & - 'Boundary point nominal latitude', set_name=set_name, & - Domain2=G%Domain%mpp_domain, domain_position=NORTH) + 'Boundary point nominal latitude', G%Domain, position=NORTH, set_name=set_name) else id_xq = diag_axis_init('xB', G%gridLonB(G%isg:G%ieg), G%x_axis_units, 'x', & - 'Boundary point nominal longitude',set_name=set_name, & - Domain2=G%Domain%mpp_domain, domain_position=EAST) + 'Boundary point nominal longitude', G%Domain, position=EAST, set_name=set_name) id_yq = diag_axis_init('yB', G%gridLatB(G%jsg:G%jeg), G%y_axis_units, 'y', & - 'Boundary point nominal latitude', set_name=set_name, & - Domain2=G%Domain%mpp_domain, domain_position=NORTH) + 'Boundary point nominal latitude', G%Domain, position=NORTH, set_name=set_name) endif id_xh = diag_axis_init('xT', G%gridLonT(G%isg:G%ieg), G%x_axis_units, 'x', & - 'T point nominal longitude', set_name=set_name, & - Domain2=G%Domain%mpp_domain) + 'T point nominal longitude', G%Domain, set_name=set_name) id_yh = diag_axis_init('yT', G%gridLatT(G%jsg:G%jeg), G%y_axis_units, 'y', & - 'T point nominal latitude', set_name=set_name, & - Domain2=G%Domain%mpp_domain) + 'T point nominal latitude', G%Domain, set_name=set_name) ! Axis groupings for 2-D arrays. call defineAxes(diag_cs, [id_xh, id_yh], diag_cs%axesT1) diff --git a/src/ice_shelf/user_shelf_init.F90 b/src/ice_shelf/user_shelf_init.F90 index 122758f3cc..9635f51262 100644 --- a/src/ice_shelf/user_shelf_init.F90 +++ b/src/ice_shelf/user_shelf_init.F90 @@ -4,15 +4,12 @@ module user_shelf_init ! This file is part of MOM6. See LICENSE.md for the license. -! use MOM_domains, only : sum_across_PEs use MOM_error_handler, only : MOM_mesg, MOM_error, FATAL, is_root_pe -use MOM_file_parser, only : get_param, log_version, param_file_type -use MOM_grid, only : ocean_grid_type -use MOM_time_manager, only : time_type, set_time, time_type_to_real -use MOM_unit_scaling, only : unit_scale_type -! use MOM_io, only : close_file, fieldtype, file_exists -! use MOM_io, only : open_file, read_data, read_axis_data, SINGLE_FILE -! use MOM_io, only : write_field, slasher +use MOM_file_parser, only : get_param, log_version, param_file_type +use MOM_grid, only : ocean_grid_type +use MOM_time_manager, only : time_type, set_time, time_type_to_real +use MOM_unit_scaling, only : unit_scale_type + implicit none ; private #include diff --git a/src/initialization/MOM_coord_initialization.F90 b/src/initialization/MOM_coord_initialization.F90 index c1ec788836..23d279b65a 100644 --- a/src/initialization/MOM_coord_initialization.F90 +++ b/src/initialization/MOM_coord_initialization.F90 @@ -3,21 +3,19 @@ module MOM_coord_initialization ! This file is part of MOM6. See LICENSE.md for the license. -use MOM_debugging, only : chksum -use MOM_EOS, only : calculate_density, EOS_type -use MOM_error_handler, only : MOM_mesg, MOM_error, FATAL, WARNING, is_root_pe -use MOM_error_handler, only : callTree_enter, callTree_leave, callTree_waypoint -use MOM_file_parser, only : get_param, read_param, log_param, param_file_type -use MOM_file_parser, only : log_version -use MOM_io, only : close_file, create_file, fieldtype, file_exists -use MOM_io, only : open_file, MOM_read_data, read_axis_data, SINGLE_FILE, MULTIPLE -use MOM_io, only : slasher, vardesc, write_field, var_desc -use MOM_string_functions, only : uppercase -use MOM_unit_scaling, only : unit_scale_type -use MOM_variables, only : thermo_var_ptrs -use MOM_verticalGrid, only : verticalGrid_type, setVerticalGridAxes -use user_initialization, only : user_set_coord -use BFB_initialization, only : BFB_set_coord +use MOM_debugging, only : chksum +use MOM_EOS, only : calculate_density, EOS_type +use MOM_error_handler, only : MOM_mesg, MOM_error, FATAL, WARNING, is_root_pe +use MOM_error_handler, only : callTree_enter, callTree_leave, callTree_waypoint +use MOM_file_parser, only : get_param, read_param, log_param, param_file_type, log_version +use MOM_io, only : MOM_read_data, close_file, create_file, fieldtype, file_exists +use MOM_io, only : write_field, vardesc, var_desc, SINGLE_FILE, MULTIPLE +use MOM_string_functions, only : slasher, uppercase +use MOM_unit_scaling, only : unit_scale_type +use MOM_variables, only : thermo_var_ptrs +use MOM_verticalGrid, only : verticalGrid_type, setVerticalGridAxes +use user_initialization, only : user_set_coord +use BFB_initialization, only : BFB_set_coord use netcdf @@ -286,8 +284,8 @@ subroutine set_coord_from_TS_profile(Rlay, g_prime, GV, US, param_file, eqn_of_s filename = trim(slasher(inputdir))//trim(coord_file) call log_param(param_file, mdl, "INPUTDIR/COORD_FILE", filename) - call MOM_read_data(filename,"PTEMP",T0(:)) - call MOM_read_data(filename,"SALT",S0(:)) + call MOM_read_data(filename, "PTEMP", T0(:)) + call MOM_read_data(filename, "SALT", S0(:)) if (.not.file_exists(filename)) call MOM_error(FATAL, & " set_coord_from_TS_profile: Unable to open " //trim(filename)) @@ -420,7 +418,7 @@ subroutine set_coord_from_file(Rlay, g_prime, GV, US, param_file) if (.not.file_exists(filename)) call MOM_error(FATAL, & " set_coord_from_file: Unable to open "//trim(filename)) - call read_axis_data(filename, coord_var, Rlay) + call MOM_read_data(filename, coord_var, Rlay) do k=1,nz ; Rlay(k) = US%kg_m3_to_R*Rlay(k) ; enddo g_prime(1) = g_fs do k=2,nz ; g_prime(k) = (GV%g_Earth/(GV%Rho0)) * (Rlay(k) - Rlay(k-1)) ; enddo diff --git a/src/initialization/MOM_grid_initialize.F90 b/src/initialization/MOM_grid_initialize.F90 index eee168eefb..b5685745ac 100644 --- a/src/initialization/MOM_grid_initialize.F90 +++ b/src/initialization/MOM_grid_initialize.F90 @@ -7,8 +7,7 @@ module MOM_grid_initialize use MOM_domains, only : pass_var, pass_vector, pe_here, root_PE, broadcast use MOM_domains, only : AGRID, BGRID_NE, CGRID_NE, To_All, Scalar_Pair use MOM_domains, only : To_North, To_South, To_East, To_West -use MOM_domains, only : MOM_define_domain, MOM_define_IO_domain, get_layout_extents -use MOM_domains, only : MOM_domain_type, deallocate_domain_contents +use MOM_domains, only : MOM_domain_type, clone_MOM_domain, deallocate_MOM_domain use MOM_dyn_horgrid, only : dyn_horgrid_type, set_derived_dyn_horgrid use MOM_error_handler, only : MOM_error, MOM_mesg, FATAL, is_root_pe use MOM_error_handler, only : callTree_enter, callTree_leave @@ -185,13 +184,10 @@ subroutine set_grid_metrics_from_mosaic(G, param_file, US) real :: m_to_L ! A unit conversion factor [L m-1 ~> nondim] character(len=200) :: filename, grid_file, inputdir character(len=64) :: mdl = "MOM_grid_init set_grid_metrics_from_mosaic" - integer :: err=0, ni, nj, global_indices(4) - type(MOM_domain_type) :: SGdom ! Supergrid domain + type(MOM_domain_type), pointer :: SGdom => NULL() ! Supergrid domain logical :: lon_bug ! If true use an older buggy answer in the tripolar longitude. - integer :: i, j, i2, j2 - integer, dimension(:), allocatable :: exni ! The extents of the grid for each i-row of the layout - integer, dimension(:), allocatable :: exnj ! The extents of the grid for each j-row of the layout - integer :: start(4), nread(4) + integer :: i, j, i2, j2, ni, nj + integer :: start(4), nread(4) call callTree_enter("set_grid_metrics_from_mosaic(), MOM_grid_initialize.F90") @@ -217,39 +213,9 @@ subroutine set_grid_metrics_from_mosaic(G, param_file, US) dxBu(:,:) = 0.0 ; dyBu(:,:) = 0.0 ; areaBu(:,:) = 0.0 ! - ni = 2*(G%iec-G%isc+1) ! i size of supergrid - nj = 2*(G%jec-G%jsc+1) ! j size of supergrid - - ! Define a domain for the supergrid (SGdom) - call get_layout_extents(G%domain, exni, exnj) - allocate(SGdom%mpp_domain) - SGdom%nihalo = 2*G%domain%nihalo+1 - SGdom%njhalo = 2*G%domain%njhalo+1 - SGdom%niglobal = 2*G%domain%niglobal - SGdom%njglobal = 2*G%domain%njglobal - SGdom%layout(:) = G%domain%layout(:) - SGdom%io_layout(:) = G%domain%io_layout(:) - global_indices(1) = 1+SGdom%nihalo - global_indices(2) = SGdom%niglobal+SGdom%nihalo - global_indices(3) = 1+SGdom%njhalo - global_indices(4) = SGdom%njglobal+SGdom%njhalo - exni(:) = 2*exni(:) ; exnj(:) = 2*exnj(:) - if (associated(G%domain%maskmap)) then - call MOM_define_domain(global_indices, SGdom%layout, SGdom%mpp_domain, & - xflags=G%domain%X_FLAGS, yflags=G%domain%Y_FLAGS, & - xhalo=SGdom%nihalo, yhalo=SGdom%njhalo, & - xextent=exni, yextent=exnj, & - symmetry=.true., name="MOM_MOSAIC", maskmap=G%domain%maskmap) - else - call MOM_define_domain(global_indices, SGdom%layout, SGdom%mpp_domain, & - xflags=G%domain%X_FLAGS, yflags=G%domain%Y_FLAGS, & - xhalo=SGdom%nihalo, yhalo=SGdom%njhalo, & - xextent=exni, yextent=exnj, & - symmetry=.true., name="MOM_MOSAIC") - endif - call MOM_define_IO_domain(SGdom%mpp_domain, SGdom%io_layout) - deallocate(exni, exnj) + call clone_MOM_domain(G%domain, SGdom, symmetric=.true., domain_name="MOM_MOSAIC", & + refine=2, extra_halo=1) ! Read X from the supergrid tmpZ(:,:) = 999. @@ -338,9 +304,9 @@ subroutine set_grid_metrics_from_mosaic(G, param_file, US) (tmpT(i2,j2+1) + tmpT(i2+1,j2)) enddo ; enddo - ni=SGdom%niglobal - nj=SGdom%njglobal - call deallocate_domain_contents(SGdom) + ni = SGdom%niglobal + nj = SGdom%njglobal + call deallocate_MOM_domain(SGdom) call pass_vector(dyCu, dxCv, G%Domain, To_All+Scalar_Pair, CGRID_NE) call pass_vector(dxCu, dyCv, G%Domain, To_All+Scalar_Pair, CGRID_NE) diff --git a/src/ocean_data_assim/MOM_oda_driver.F90 b/src/ocean_data_assim/MOM_oda_driver.F90 index 670be5d3fb..8057234cdc 100644 --- a/src/ocean_data_assim/MOM_oda_driver.F90 +++ b/src/ocean_data_assim/MOM_oda_driver.F90 @@ -1,29 +1,28 @@ !> Interfaces for MOM6 ensembles and data assimilation. module MOM_oda_driver_mod - ! This file is part of MOM6. see LICENSE.md for the license. - -use mpp_mod, only : stdout, stdlog, mpp_error, npes=>mpp_npes,pe=>mpp_pe -use mpp_mod, only : set_current_pelist => mpp_set_current_pelist -use mpp_mod, only : set_root_pe => mpp_set_root_pe -use mpp_mod, only : mpp_sync_self, mpp_sum, get_pelist=>mpp_get_current_pelist, mpp_root_pe -use mpp_mod, only : set_stack_size=>mpp_set_stack_size, broadcast=>mpp_broadcast -use mpp_io_mod, only : io_set_stack_size=>mpp_io_set_stack_size -use mpp_io_mod, only : MPP_SINGLE,MPP_MULTI -use mpp_domains_mod, only : domain2d, mpp_global_field -use mpp_domains_mod, only : mpp_get_compute_domain, mpp_get_data_domain -use mpp_domains_mod, only : mpp_redistribute, mpp_broadcast_domain -use mpp_domains_mod, only : set_domains_stack_size=>mpp_domains_set_stack_size -use diag_manager_mod, only : register_diag_field, diag_axis_init, send_data -use ensemble_manager_mod, only : get_ensemble_id, get_ensemble_size -use ensemble_manager_mod, only : get_ensemble_pelist, get_ensemble_filter_pelist -use time_manager_mod, only : time_type, decrement_time, increment_time -use time_manager_mod, only : get_date, operator(>=),operator(/=),operator(==),operator(<) -use constants_mod, only : radius, epsln +! This file is part of MOM6. see LICENSE.md for the license. + +! MOM infrastructure +use MOM_coms, only : PE_here, num_PEs +use MOM_coms, only : set_PElist, set_rootPE, Get_PElist, broadcast +use MOM_domains, only : domain2d, global_field, get_domain_extent +use MOM_domains, only : pass_var, redistribute_array, broadcast_domain +use MOM_diag_mediator, only : register_diag_field, diag_axis_init, post_data +use MOM_ensemble_manager, only : get_ensemble_id, get_ensemble_size +use MOM_ensemble_manager, only : get_ensemble_pelist, get_ensemble_filter_pelist +use MOM_error_handler, only : stdout, stdlog, MOM_error +use MOM_io, only : SINGLE_FILE +use MOM_time_manager, only : time_type, real_to_time, get_date +use MOM_time_manager, only : operator(+), operator(>=), operator(/=) +use MOM_time_manager, only : operator(==), operator(<) + ! ODA Modules use ocean_da_types_mod, only : grid_type, ocean_profile_type, ocean_control_struct use ocean_da_core_mod, only : ocean_da_core_init, get_profiles -!use eakf_oda_mod, only : ensemble_filter +#ifdef ENABLE_ECDA +use eakf_oda_mod, only : ensemble_filter +#endif use write_ocean_obs_mod, only : open_profile_file use write_ocean_obs_mod, only : write_profile,close_profile_file use kdtree, only : kd_root !# JEDI @@ -57,6 +56,11 @@ module MOM_oda_driver_mod #include +!> A structure with a pointer to a domain2d, to allow for the creation of arrays of pointers. +type :: ptr_mpp_domain + type(domain2d), pointer :: mpp_domain => NULL() !< pointer to a domain2d +end type ptr_mpp_domain + !> Control structure that contains a transpose of the ocean state across ensemble members. type, public :: ODA_CS ; private type(ocean_control_struct), pointer :: Ocean_prior=> NULL() !< ensemble ocean prior states in DA space @@ -64,7 +68,7 @@ module MOM_oda_driver_mod !! or increments to prior in DA space integer :: nk !< number of vertical layers used for DA type(ocean_grid_type), pointer :: Grid => NULL() !< MOM6 grid type and decomposition for the DA - type(ptr_mpp_domain), pointer, dimension(:) :: domains => NULL() !< Pointer to mpp_domain objects + type(MOM_domain_type), pointer, dimension(:) :: domains => NULL() !< Pointer to mpp_domain objects !! for ensemble members type(verticalGrid_type), pointer :: GV => NULL() !< vertical grid for DA type(unit_scale_type), pointer :: & @@ -98,10 +102,6 @@ module MOM_oda_driver_mod type(diag_ctrl) :: diag_cs ! A structure with a pointer to a domain2d, to allow for the creation of arrays of pointers. -type :: ptr_mpp_domain - type(domain2d), pointer :: mpp_domain => NULL() !< pointer to an mpp domain2d -end type ptr_mpp_domain !>@{ DA parameters integer, parameter :: NO_ASSIM = 0, OI_ASSIM=1, EAKF_ASSIM=2 @@ -130,6 +130,8 @@ subroutine init_oda(Time, G, GV, CS) type(param_file_type) :: PF integer :: n, m, k, i, j, nk integer :: is,ie,js,je,isd,ied,jsd,jed + integer :: isg,ieg,jsg,jeg + integer :: idg_offset, jdg_offset integer :: stdout_unit character(len=32) :: assim_method integer :: npes_pm, ens_info(6), ni, nj @@ -139,7 +141,7 @@ subroutine init_oda(Time, G, GV, CS) character(len=200) :: inputdir, basin_file logical :: reentrant_x, reentrant_y, tripolar_N, symmetric - if (associated(CS)) call mpp_error(FATAL, 'Calling oda_init with associated control structure') + if (associated(CS)) call MOM_error(FATAL, 'Calling oda_init with associated control structure') allocate(CS) ! Use ens1 parameters , this could be changed at a later time ! if it were desirable to have alternate parameters, e.g. for the grid @@ -182,7 +184,7 @@ subroutine init_oda(Time, G, GV, CS) case('no_assim') CS%assim_method = NO_ASSIM case default - call mpp_error(FATAL, 'Invalid assimilation method provided') + call MOM_error(FATAL, "Invalid assimilation method provided") end select ens_info = get_ensemble_size() @@ -195,16 +197,16 @@ subroutine init_oda(Time, G, GV, CS) call get_ensemble_pelist(CS%ensemble_pelist, 'ocean') call get_ensemble_filter_pelist(CS%filter_pelist, 'ocean') - call set_current_pelist(CS%filter_pelist) + call set_PElist(CS%filter_pelist) allocate(CS%domains(CS%ensemble_size)) CS%domains(CS%ensemble_id)%mpp_domain => G%Domain%mpp_domain do n=1,CS%ensemble_size if (.not. associated(CS%domains(n)%mpp_domain)) allocate(CS%domains(n)%mpp_domain) - call set_root_pe(CS%ensemble_pelist(n,1)) - call mpp_broadcast_domain(CS%domains(n)%mpp_domain) + call set_rootPE(CS%ensemble_pelist(n,1)) + call broadcast_domain(CS%domains(n)%mpp_domain) enddo - call set_root_pe(CS%filter_pelist(1)) + call set_rootPE(CS%filter_pelist(1)) allocate(CS%Grid) ! params NIHALO_ODA, NJHALO_ODA set the DA halo size call MOM_domains_init(CS%Grid%Domain,PF,param_suffix='_ODA') @@ -239,7 +241,12 @@ subroutine init_oda(Time, G, GV, CS) call initialize_regridding(CS%regridCS, CS%GV, CS%US, dG%max_depth,PF,'oda_driver',coord_mode,'','') call initialize_remapping(CS%remapCS,'PLM') call set_regrid_params(CS%regridCS, min_thickness=0.) - call mpp_get_data_domain(G%Domain%mpp_domain,isd,ied,jsd,jed) + ! breaking with the MOM6 convention and using global indices + call get_domain_extent(G%Domain,is,ie,js,je,isd,ied,jsd,jed,& + isg,ieg,jsg,jeg,idg_offset,jdg_offset,symmetric) + isd=isd+idg_offset; ied=ied+idg_offset + jsd=jsd+jdg_offset; jed=jed+jdg_offset + !call mpp_get_data_domain(G%Domain%mpp_domain,isd,ied,jsd,jed) if (.not. associated(CS%h)) then allocate(CS%h(isd:ied,jsd:jed,CS%GV%ke)); CS%h(:,:,:)=0.0 ! assign thicknesses @@ -247,10 +254,13 @@ subroutine init_oda(Time, G, GV, CS) endif allocate(CS%tv%T(isd:ied,jsd:jed,CS%GV%ke)); CS%tv%T(:,:,:)=0.0 allocate(CS%tv%S(isd:ied,jsd:jed,CS%GV%ke)); CS%tv%S(:,:,:)=0.0 - call set_axes_info(CS%Grid, CS%GV, CS%US, PF, CS%diag_cs, set_vertical=.true.) - - call mpp_get_data_domain(CS%mpp_domain,isd,ied,jsd,jed) + ! get domain extents for the analysis grid and use global indexing + !call get_domain_extent(CS%Grid%Domain,is,ie,js,je,isd,ied,jsd,jed,& + ! isg,ieg,jsg,jeg,idg_offset,jdg_offset,symmetric) + !isd=isd+idg_offset; ied=ied+idg_offset + !jsd=jsd+jdg_offset; jed=jed+jdg_offset + !call mpp_get_data_domain(CS%mpp_domain,isd,ied,jsd,jed) allocate(CS%oda_grid) CS%oda_grid%x => CS%Grid%geolonT CS%oda_grid%y => CS%Grid%geolatT @@ -268,9 +278,9 @@ subroutine init_oda(Time, G, GV, CS) allocate(T_grid%x(CS%ni,CS%nj)) allocate(T_grid%y(CS%ni,CS%nj)) allocate(T_grid%basin_mask(CS%ni,CS%nj)) - call mpp_global_field(CS%mpp_domain, CS%Grid%geolonT, T_grid%x) - call mpp_global_field(CS%mpp_domain, CS%Grid%geolatT, T_grid%y) - call mpp_global_field(CS%mpp_domain, CS%oda_grid%basin_mask, T_grid%basin_mask) + call global_field(CS%mpp_domain, CS%Grid%geolonT, T_grid%x) + call global_field(CS%mpp_domain, CS%Grid%geolatT, T_grid%y) + call global_field(CS%mpp_domain, CS%oda_grid%basin_mask, T_grid%basin_mask) T_grid%ni = CS%ni T_grid%nj = CS%nj T_grid%nk = CS%nk @@ -282,7 +292,7 @@ subroutine init_oda(Time, G, GV, CS) T_grid%z(:,:,:) = 0.0 do k = 1, CS%nk - call mpp_global_field(G%Domain%mpp_domain, CS%h(:,:,k), global2D) + call global_field(G%Domain%mpp_domain, CS%h(:,:,k), global2D) do i=1,CS%ni ; do j=1,CS%nj if ( global2D(i,j) > 1 ) then T_grid%mask(i,j,k) = 1.0 @@ -300,7 +310,7 @@ subroutine init_oda(Time, G, GV, CS) CS%Time=Time !! switch back to ensemble member pelist - call set_current_pelist(CS%ensemble_pelist(CS%ensemble_id,:)) + call set_PElist(CS%ensemble_pelist(CS%ensemble_id,:)) end subroutine init_oda !> Copy ensemble member tracers to ensemble vector. @@ -312,14 +322,15 @@ subroutine set_prior_tracer(Time, G, GV, h, tv, CS) type(thermo_var_ptrs), intent(in) :: tv !< A structure pointing to various thermodynamic variables type(ODA_CS), pointer :: CS !< ocean DA control structure - real, dimension(:,:,:), allocatable :: T, S + real, dimension(SZI_(G),SZJ_(G),CS%nk) :: T, S type(ocean_grid_type), pointer :: Grid=>NULL() integer :: i,j, m, n, ss integer :: is, ie, js, je integer :: isc, iec, jsc, jec integer :: isd, ied, jsd, jed + integer :: isg, ieg, jsg, jeg, idg_offset, jdg_offset integer :: id - logical :: used + logical :: used, symmetric ! return if not time for analysis if (Time < CS%Time) return @@ -328,32 +339,36 @@ subroutine set_prior_tracer(Time, G, GV, h, tv, CS) if (.not. associated(CS%GV)) call MOM_ERROR(FATAL,'ODA_CS ensemble vertical grid not associated') !! switch to global pelist - call set_current_pelist(CS%filter_pelist) + call set_PElist(CS%filter_pelist) call MOM_mesg('Setting prior') + ! computational domain for the analysis grid isc=CS%Grid%isc;iec=CS%Grid%iec;jsc=CS%Grid%jsc;jec=CS%Grid%jec - call mpp_get_compute_domain(CS%domains(CS%ensemble_id)%mpp_domain,is,ie,js,je) - call mpp_get_data_domain(CS%domains(CS%ensemble_id)%mpp_domain,isd,ied,jsd,jed) - allocate(T(isd:ied,jsd:jed,CS%nk)) - allocate(S(isd:ied,jsd:jed,CS%nk)) - - do j=js,je ; do i=is,ie + ! array extents for the ensemble member + !call get_domain_extent(CS%domains(CS%ensemble_id),is,ie,js,je,isd,ied,jsd,jed,& + ! isg,ieg,jsg,jeg,idg_offset,jdg_offset,symmetric) + ! remap temperature and salinity from the ensemble member to the analysis grid + do j=G%jsc,G%jec ; do i=G%isc,G%iec call remapping_core_h(CS%remapCS, GV%ke, h(i,j,:), tv%T(i,j,:), & CS%nk, CS%h(i,j,:), T(i,j,:)) call remapping_core_h(CS%remapCS, GV%ke, h(i,j,:), tv%S(i,j,:), & CS%nk, CS%h(i,j,:), S(i,j,:)) enddo ; enddo - + ! cast ensemble members to the analysis domain do m=1,CS%ensemble_size - call mpp_redistribute(CS%domains(m)%mpp_domain, T,& + call redistribute_array(CS%domains(m)%mpp_domain, T,& CS%mpp_domain, CS%Ocean_prior%T(:,:,:,m), complete=.true.) - call mpp_redistribute(CS%domains(m)%mpp_domain, S,& + call redistribute_array(CS%domains(m)%mpp_domain, S,& CS%mpp_domain, CS%Ocean_prior%S(:,:,:,m), complete=.true.) enddo - deallocate(T,S) + + do m=1,CS%ensemble_size + call pass_var(CS%Ocean_prior%T(:,:,:,m),CS%Grid%domain) + call pass_var(CS%Ocean_prior%S(:,:,:,m),CS%Grid%domain) + enddo !! switch back to ensemble member pelist - call set_current_pelist(CS%ensemble_pelist(CS%ensemble_id,:)) + call set_PElist(CS%ensemble_pelist(CS%ensemble_id,:)) return @@ -377,7 +392,7 @@ subroutine get_posterior_tracer(Time, CS, h, tv, increment) !! switch to global pelist - call set_current_pelist(CS%filter_pelist) + call set_PElist(CS%filter_pelist) call MOM_mesg('Getting posterior') get_inc = .true. @@ -391,26 +406,26 @@ subroutine get_posterior_tracer(Time, CS, h, tv, increment) endif do m=1,CS%ensemble_size if (get_inc) then - call mpp_redistribute(CS%mpp_domain, Ocean_increment%T(:,:,:,m), & - CS%domains(m)%mpp_domain, CS%tv%T, complete=.true.) - call mpp_redistribute(CS%mpp_domain, Ocean_increment%S(:,:,:,m), & - CS%domains(m)%mpp_domain, CS%tv%S, complete=.true.) + call redistribute_array(CS%mpp_domain, Ocean_increment%T(:,:,:,m),& + CS%domains(m)%mpp_domain, CS%tv%T, complete=.true.) + call redistribute_array(CS%mpp_domain, Ocean_increment%S(:,:,:,m),& + CS%domains(m)%mpp_domain, CS%tv%S, complete=.true.) else - call mpp_redistribute(CS%mpp_domain, CS%Ocean_posterior%T(:,:,:,m), & - CS%domains(m)%mpp_domain, CS%tv%T, complete=.true.) - call mpp_redistribute(CS%mpp_domain, CS%Ocean_posterior%S(:,:,:,m), & - CS%domains(m)%mpp_domain, CS%tv%S, complete=.true.) + call redistribute_array(CS%mpp_domain, CS%Ocean_posterior%T(:,:,:,m),& + CS%domains(m)%mpp_domain, CS%tv%T, complete=.true.) + call redistribute_array(CS%mpp_domain, CS%Ocean_posterior%S(:,:,:,m),& + CS%domains(m)%mpp_domain, CS%tv%S, complete=.true.) endif enddo tv => CS%tv h => CS%h !! switch back to ensemble member pelist - call set_current_pelist(CS%ensemble_pelist(CS%ensemble_id,:)) + call set_PElist(CS%ensemble_pelist(CS%ensemble_id,:)) end subroutine get_posterior_tracer -!> Gather observations and sall ODA routines +!> Gather observations and call ODA routines subroutine oda(Time, CS) type(time_type), intent(in) :: Time !< the current model time type(oda_CS), intent(inout) :: CS !< the ocean DA control structure @@ -422,7 +437,7 @@ subroutine oda(Time, CS) if ( Time >= CS%Time ) then !! switch to global pelist - call set_current_pelist(CS%filter_pelist) + call set_PElist(CS%filter_pelist) call get_profiles(Time, CS%Profiles, CS%CProfiles) #ifdef ENABLE_ECDA @@ -430,7 +445,7 @@ subroutine oda(Time, CS) #endif !! switch back to ensemble member pelist - call set_current_pelist(CS%ensemble_pelist(CS%ensemble_id,:)) + call set_PElist(CS%ensemble_pelist(CS%ensemble_id,:)) endif @@ -479,7 +494,8 @@ subroutine set_analysis_time(Time,CS) integer :: yr, mon, day, hr, min, sec if (Time >= CS%Time) then - CS%Time=increment_time(CS%Time,CS%assim_frequency*3600) + ! increment the analysis time to the next step converting to seconds + CS%Time = CS%Time + real_to_time(CS%US%T_to_s*(CS%assim_frequency*3600.)) call get_date(Time, yr, mon, day, hr, min, sec) write(mesg,*) 'Model Time: ', yr, mon, day, hr, min, sec @@ -505,11 +521,11 @@ subroutine save_obs_diff(filename,CS) integer :: fid ! profile file handle type(ocean_profile_type), pointer :: Prof=>NULL() - fid = open_profile_file(trim(filename), nvar=2, thread=MPP_SINGLE, fset=MPP_SINGLE) + fid = open_profile_file(trim(filename), nvar=2, thread=SINGLE_FILE, fset=SINGLE_FILE) Prof=>CS%CProfiles !! switch to global pelist - !call set_current_pelist(CS%filter_pelist) + !call set_PElist(CS%filter_pelist) do while (associated(Prof)) call write_profile(fid,Prof) @@ -518,7 +534,7 @@ subroutine save_obs_diff(filename,CS) call close_profile_file(fid) !! switch back to ensemble member pelist - !call set_current_pelist(CS%ensemble_pelist(CS%ensemble_id,:)) + !call set_PElist(CS%ensemble_pelist(CS%ensemble_id,:)) return end subroutine save_obs_diff diff --git a/src/parameterizations/vertical/MOM_ALE_sponge.F90 b/src/parameterizations/vertical/MOM_ALE_sponge.F90 index 64eb80acb5..548b1d04f4 100644 --- a/src/parameterizations/vertical/MOM_ALE_sponge.F90 +++ b/src/parameterizations/vertical/MOM_ALE_sponge.F90 @@ -13,18 +13,19 @@ module MOM_ALE_sponge ! This file is part of MOM6. See LICENSE.md for the license. use MOM_array_transform, only: rotate_array -use MOM_coms, only : sum_across_PEs +use MOM_coms, only : sum_across_PEs use MOM_diag_mediator, only : post_data, query_averaging_enabled, register_diag_field use MOM_diag_mediator, only : diag_ctrl use MOM_error_handler, only : MOM_error, FATAL, NOTE, WARNING, is_root_pe -use MOM_file_parser, only : get_param, log_param, log_version, param_file_type -use MOM_grid, only : ocean_grid_type +use MOM_file_parser, only : get_param, log_param, log_version, param_file_type +use MOM_grid, only : ocean_grid_type use MOM_horizontal_regridding, only : horiz_interp_and_extrap_tracer +use MOM_interpolate, only : init_external_field, get_external_field_info, time_interp_external_init +use MOM_remapping, only : remapping_cs, remapping_core_h, initialize_remapping use MOM_spatial_means, only : global_i_mean -use MOM_time_manager, only : time_type, init_external_field, get_external_field_size, time_interp_external_init -use MOM_remapping, only : remapping_cs, remapping_core_h, initialize_remapping -use MOM_unit_scaling, only : unit_scale_type -use MOM_verticalGrid, only : verticalGrid_type +use MOM_time_manager, only : time_type +use MOM_unit_scaling, only : unit_scale_type +use MOM_verticalGrid, only : verticalGrid_type implicit none ; private @@ -638,12 +639,12 @@ subroutine set_up_ALE_sponge_field_varying(filename, fieldname, Time, G, GV, US, ! get a unique time interp id for this field. If sponge data is ongrid, then setup ! to only read on the computational domain if (CS%spongeDataOngrid) then - CS%Ref_val(CS%fldno)%id = init_external_field(filename, fieldname,domain=G%Domain%mpp_domain) + CS%Ref_val(CS%fldno)%id = init_external_field(filename, fieldname, MOM_domain=G%Domain) else CS%Ref_val(CS%fldno)%id = init_external_field(filename, fieldname) endif fld_sz(1:4)=-1 - fld_sz = get_external_field_size(CS%Ref_val(CS%fldno)%id) + call get_external_field_info(CS%Ref_val(CS%fldno)%id, size=fld_sz) nz_data = fld_sz(3) CS%Ref_val(CS%fldno)%nz_data = nz_data !< individual sponge fields may reside on a different vertical grid CS%Ref_val(CS%fldno)%num_tlevs = fld_sz(4) @@ -735,12 +736,12 @@ subroutine set_up_ALE_sponge_vel_field_varying(filename_u, fieldname_u, filename ! to the current model date. CS%Ref_val_u%id = init_external_field(filename_u, fieldname_u) fld_sz(1:4)=-1 - fld_sz = get_external_field_size(CS%Ref_val_u%id) + call get_external_field_info(CS%Ref_val_u%id, size=fld_sz) CS%Ref_val_u%nz_data = fld_sz(3) CS%Ref_val_u%num_tlevs = fld_sz(4) CS%Ref_val_v%id = init_external_field(filename_v, fieldname_v) fld_sz(1:4)=-1 - fld_sz = get_external_field_size(CS%Ref_val_v%id) + call get_external_field_info(CS%Ref_val_v%id, size=fld_sz) CS%Ref_val_v%nz_data = fld_sz(3) CS%Ref_val_v%num_tlevs = fld_sz(4) allocate( u_val(isdB:iedB,jsd:jed, fld_sz(3)) ) diff --git a/src/parameterizations/vertical/MOM_diabatic_aux.F90 b/src/parameterizations/vertical/MOM_diabatic_aux.F90 index 8ba9dd959a..ee27c6c5df 100644 --- a/src/parameterizations/vertical/MOM_diabatic_aux.F90 +++ b/src/parameterizations/vertical/MOM_diabatic_aux.F90 @@ -15,8 +15,7 @@ module MOM_diabatic_aux use MOM_file_parser, only : get_param, log_param, log_version, param_file_type use MOM_forcing_type, only : forcing, extractFluxes1d, forcing_SinglePointPrint use MOM_grid, only : ocean_grid_type -use MOM_interpolate, only : init_external_field, time_interp_extern -use MOM_interpolate, only : time_interp_external_init +use MOM_interpolate, only : init_external_field, time_interp_external, time_interp_external_init use MOM_io, only : slasher use MOM_opacity, only : set_opacity, opacity_CS, extract_optics_slice, extract_optics_fields use MOM_opacity, only : optics_type, optics_nbands, absorbRemainingSW, sumSWoverBands @@ -621,7 +620,7 @@ subroutine set_pen_shortwave(optics, fluxes, G, GV, US, CS, opacity_CSp, tracer_ if (CS%chl_from_file) then ! Only the 2-d surface chlorophyll can be read in from a file. The ! same value is assumed for all layers. - call time_interp_extern(CS%sbc_chl, CS%Time, chl_2d) + call time_interp_external(CS%sbc_chl, CS%Time, chl_2d) do j=js,je ; do i=is,ie if ((G%mask2dT(i,j) > 0.5) .and. (chl_2d(i,j) < 0.0)) then write(mesg,'(" Time_interp negative chl of ",(1pe12.4)," at i,j = ",& @@ -1694,7 +1693,7 @@ subroutine diabatic_aux_init(Time, G, GV, US, param_file, diag, CS, useALEalgori call log_param(param_file, mdl, "INPUTDIR/CHL_FILE", chl_filename) call get_param(param_file, mdl, "CHL_VARNAME", chl_varname, & "Name of CHL_A variable in CHL_FILE.", default='CHL_A') - CS%sbc_chl = init_external_field(chl_filename, trim(chl_varname), domain=G%Domain%mpp_domain) + CS%sbc_chl = init_external_field(chl_filename, trim(chl_varname), MOM_domain=G%Domain) endif CS%id_chl = register_diag_field('ocean_model', 'Chl_opac', diag%axesT1, Time, & diff --git a/src/tracer/MOM_lateral_boundary_diffusion.F90 b/src/tracer/MOM_lateral_boundary_diffusion.F90 index 44e105d1cf..b51813678b 100644 --- a/src/tracer/MOM_lateral_boundary_diffusion.F90 +++ b/src/tracer/MOM_lateral_boundary_diffusion.F90 @@ -592,14 +592,13 @@ subroutine fluxes_layer_method(boundary, ke, hbl_L, hbl_R, h_L, h_R, phi_L, phi_ type(lbd_CS), pointer :: CS !< Lateral diffusion control structure !! the boundary layer ! Local variables - real, dimension(:), allocatable :: dz_top !< The LBD z grid to be created [L ~ m] - real, dimension(:), allocatable :: phi_L_z !< Tracer values in the ztop grid (left) [conc] - real, dimension(:), allocatable :: phi_R_z !< Tracer values in the ztop grid (right) [conc] - real, dimension(:), allocatable :: F_layer_z !< Diffusive flux at U- or V-points in the ztop grid - !! [H L2 conc ~> m3 conc] + real, dimension(:), allocatable :: dz_top !< The LBD z grid to be created [L ~ m] + real, dimension(:), allocatable :: phi_L_z !< Tracer values in the ztop grid (left) [conc] + real, dimension(:), allocatable :: phi_R_z !< Tracer values in the ztop grid (right) [conc] + real, dimension(:), allocatable :: F_layer_z !< Diffusive flux at U/V-point in the ztop grid [H L2 conc ~> m3 conc] real, dimension(ke) :: h_vel !< Thicknesses at u- and v-points in the native grid - !! The harmonic mean is used to avoid zero values [H ~> m or kg m-2] - real :: khtr_avg !< Thickness-weighted diffusivity at the u-point [m^2 s^-1] + !! The harmonic mean is used to avoid zero values [H ~> m or kg m-2] + real :: khtr_avg !< Thickness-weighted diffusivity at the u-point [m^2 s^-1] !! This is just to remind developers that khtr_avg should be !! computed once khtr is 3D. real :: htot !< Total column thickness [H ~> m or kg m-2] diff --git a/src/user/MOM_wave_interface.F90 b/src/user/MOM_wave_interface.F90 index 33a255b687..df4b2a30fd 100644 --- a/src/user/MOM_wave_interface.F90 +++ b/src/user/MOM_wave_interface.F90 @@ -9,6 +9,7 @@ module MOM_wave_interface use MOM_domains, only : To_South, To_West, To_All use MOM_error_handler, only : MOM_error, FATAL, WARNING use MOM_file_parser, only : get_param, log_version, param_file_type +use MOM_forcing_type, only : mech_forcing use MOM_grid, only : ocean_grid_type use MOM_safe_alloc, only : safe_alloc_ptr use MOM_time_manager, only : time_type, operator(+), operator(/) @@ -68,6 +69,9 @@ module MOM_wave_interface !! approach. ! Surface Wave Dependent 1d/2d/3d vars + integer, public :: NumBands =0 !< Number of wavenumber/frequency partitions to receive + !! This needs to match the number of bands provided + !! via either coupling or file. real, allocatable, dimension(:), public :: & WaveNum_Cen !< Wavenumber bands for read/coupled [m-1] real, allocatable, dimension(:), public :: & @@ -138,10 +142,6 @@ module MOM_wave_interface !! \todo Module variable! Move into a control structure. ! Options if WaveMethod is Surface Stokes Drift Bands (1) -integer, public :: NumBands =0 !< Number of wavenumber/frequency partitions to receive - !! This needs to match the number of bands provided - !! via either coupling or file. - !! \todo Module variable! Move into a control structure. integer, public :: PartitionMode !< Method for partition mode (meant to check input) !! 0 - wavenumbers !! 1 - frequencies @@ -299,22 +299,34 @@ subroutine MOM_wave_interface_init(time, G, GV, US, param_file, CS, diag ) "Filename of surface Stokes drift input band data.", default="StkSpec.nc") case (COUPLER_STRING)! Reserved for coupling DataSource = Coupler + ! This is just to make something work, but it needs to be read from the wavemodel. + call get_param(param_file,mdl,"STK_BAND_COUPLER",CS%NumBands, & + "STK_BAND_COUPLER is the number of Stokes drift bands in the coupler. "// & + "This has to be consistent with the number of Stokes drift bands in WW3, "//& + "or the model will fail.",units='', default=1) + allocate( CS%WaveNum_Cen(CS%NumBands) ) + allocate( CS%STKx0(G%isdB:G%iedB,G%jsd:G%jed,CS%NumBands)) + allocate( CS%STKy0(G%isdB:G%iedB,G%jsd:G%jed,CS%NumBands)) + CS%WaveNum_Cen(:) = 0.0 + CS%STKx0(:,:,:) = 0.0 + CS%STKy0(:,:,:) = 0.0 + partitionmode = 0 case (INPUT_STRING)! A method to input the Stokes band (globally uniform) DataSource = Input - call get_param(param_file,mdl,"SURFBAND_NB",NumBands, & + call get_param(param_file,mdl,"SURFBAND_NB",CS%NumBands, & "Prescribe number of wavenumber bands for Stokes drift. "// & "Make sure this is consistnet w/ WAVENUMBERS, STOKES_X, and "// & "STOKES_Y, there are no safety checks in the code.", & units='', default=1) - allocate( CS%WaveNum_Cen(1:NumBands) ) + allocate( CS%WaveNum_Cen(1:CS%NumBands) ) CS%WaveNum_Cen(:) = 0.0 - allocate( CS%PrescribedSurfStkX(1:NumBands)) + allocate( CS%PrescribedSurfStkX(1:CS%NumBands)) CS%PrescribedSurfStkX(:) = 0.0 - allocate( CS%PrescribedSurfStkY(1:NumBands)) + allocate( CS%PrescribedSurfStkY(1:CS%NumBands)) CS%PrescribedSurfStkY(:) = 0.0 - allocate( CS%STKx0(G%isdB:G%iedB,G%jsd:G%jed,1:NumBands)) + allocate( CS%STKx0(G%isdB:G%iedB,G%jsd:G%jed,1:CS%NumBands)) CS%STKx0(:,:,:) = 0.0 - allocate( CS%STKy0(G%isd:G%ied,G%jsdB:G%jedB,1:NumBands)) + allocate( CS%STKy0(G%isd:G%ied,G%jsdB:G%jedB,1:CS%NumBands)) CS%STKy0(:,:,:) = 0.0 partitionmode=0 call get_param(param_file,mdl,"SURFBAND_WAVENUMBERS",CS%WaveNum_Cen, & @@ -432,13 +444,14 @@ subroutine MOM_wave_interface_init_lite(param_file) end subroutine MOM_wave_interface_init_lite !> Subroutine that handles updating of surface wave/Stokes drift related properties -subroutine Update_Surface_Waves(G, GV, US, Day, dt, CS) +subroutine Update_Surface_Waves(G, GV, US, Day, dt, CS, forces) type(wave_parameters_CS), pointer :: CS !< Wave parameter Control structure type(ocean_grid_type), intent(inout) :: G !< Grid structure type(verticalGrid_type), intent(in) :: GV !< Vertical grid structure type(unit_scale_type), intent(in) :: US !< A dimensional unit scaling type type(time_type), intent(in) :: Day !< Current model time type(time_type), intent(in) :: dt !< Timestep as a time-type + type(mech_forcing), intent(in), optional :: forces !< MOM_forcing_type ! Local variables integer :: ii, jj, kk, b type(time_type) :: Day_Center @@ -452,16 +465,42 @@ subroutine Update_Surface_Waves(G, GV, US, Day, dt, CS) if (DataSource==DATAOVR) then call Surface_Bands_by_data_override(day_center, G, GV, US, CS) elseif (DataSource==Coupler) then - ! Reserve for coupler hooks + if (.not.present(FORCES)) then + call MOM_error(FATAL,"The option SURFBAND = COUPLER can not be used with "//& + "this driver. If you are using a coupled driver with a wave model then "//& + "check the arguments in the subroutine call to Update_Surface_Waves, "//& + "otherwise select another option for SURFBAND_SOURCE.") + endif + if (size(CS%WaveNum_Cen).ne.size(forces%stk_wavenumbers)) then + call MOM_error(FATAL, "Number of wavenumber bands in WW3 does not match that in MOM6. "//& + "Make sure that STK_BAND_COUPLER in MOM6 input is equal to the number of bands in "//& + "ww3_grid.inp, and that your mod_def.ww3 is up to date.") + endif + + do b=1,CS%NumBands + CS%WaveNum_Cen(b) = forces%stk_wavenumbers(b) + !Interpolate from a grid to c grid + do jj=G%jsc,G%jec + do II=G%iscB,G%iecB + CS%STKx0(II,jj,b) = 0.5*(forces%UStkb(ii,jj,b)+forces%UStkb(ii+1,jj,b)) + enddo + enddo + do JJ=G%jscB, G%jecB + do ii=G%isc,G%iec + CS%STKY0(ii,JJ,b) = 0.5*(forces%VStkb(ii,jj,b)+forces%VStkb(ii,jj+1,b)) + enddo + enddo + call pass_vector(CS%STKx0(:,:,b),CS%STKy0(:,:,b), G%Domain) + enddo elseif (DataSource==Input) then - do b=1,NumBands - do II=G%isdB,G%iedB - do jj=G%jsd,G%jed + do b=1,CS%NumBands + do jj=G%jsd,G%jed + do II=G%isdB,G%iedB CS%STKx0(II,jj,b) = CS%PrescribedSurfStkX(b) enddo enddo - do ii=G%isd,G%ied - do JJ=G%jsdB, G%jedB + do JJ=G%jsdB, G%jedB + do ii=G%isd,G%ied CS%STKY0(ii,JJ,b) = CS%PrescribedSurfStkY(b) enddo enddo @@ -484,20 +523,21 @@ subroutine Update_Stokes_Drift(G, GV, US, CS, h, ustar) real, dimension(SZI_(G),SZJ_(G)), & intent(in) :: ustar !< Wind friction velocity [Z T-1 ~> m s-1]. ! Local Variables - real :: Top, MidPoint, Bottom, one_cm + real :: Top, MidPoint, Bottom, one_cm, level_thick, min_level_thick_avg real :: DecayScale real :: CMN_FAC, WN, UStokes real :: La integer :: ii, jj, kk, b, iim1, jjm1 one_cm = 0.01*US%m_to_Z + min_level_thick_avg = 1.e-3*US%m_to_Z ! 1. If Test Profile Option is chosen ! Computing mid-point value from surface value and decay wavelength if (WaveMethod==TESTPROF) then DecayScale = 4.*PI / TP_WVL !4pi - do II = G%isdB,G%iedB - do jj = G%jsd,G%jed + do jj = G%jsd,G%jed + do II = G%isdB,G%iedB IIm1 = max(1,II-1) Bottom = 0.0 MidPoint = 0.0 @@ -509,8 +549,8 @@ subroutine Update_Stokes_Drift(G, GV, US, CS, h, ustar) enddo enddo enddo - do ii = G%isd,G%ied - do JJ = G%jsdB,G%jedB + do JJ = G%jsdB,G%jedB + do ii = G%isd,G%ied JJm1 = max(1,JJ-1) Bottom = 0.0 MidPoint = 0.0 @@ -531,11 +571,11 @@ subroutine Update_Stokes_Drift(G, GV, US, CS, h, ustar) CS%Us0_x(:,:) = 0.0 CS%Us0_y(:,:) = 0.0 ! Computing X direction Stokes drift - do II = G%isdB,G%iedB - do jj = G%jsd,G%jed + do jj = G%jsd,G%jed + do II = G%isdB,G%iedB ! 1. First compute the surface Stokes drift ! by integrating over the partitionas. - do b = 1,NumBands + do b = 1,CS%NumBands if (PartitionMode==0) then ! In wavenumber we are averaging over (small) level CMN_FAC = (1.0-exp(-one_cm*2.*CS%WaveNum_Cen(b))) / & @@ -551,34 +591,48 @@ subroutine Update_Stokes_Drift(G, GV, US, CS, h, ustar) do kk = 1,GV%ke Top = Bottom IIm1 = max(II-1,1) - MidPoint = Bottom - GV%H_to_Z*0.25*(h(II,jj,kk)+h(IIm1,jj,kk)) - Bottom = Bottom - GV%H_to_Z*0.5*(h(II,jj,kk)+h(IIm1,jj,kk)) - do b = 1,NumBands - if (PartitionMode==0) then + level_thick = 0.5*GV%H_to_Z*(h(II,jj,kk)+h(IIm1,jj,kk)) + MidPoint = Bottom - 0.5*level_thick + Bottom = Bottom - level_thick + ! -> Stokes drift in thin layers not averaged. + if (level_thick>min_level_thick_avg) then + do b = 1,CS%NumBands + if (PartitionMode==0) then ! In wavenumber we are averaging over level - CMN_FAC = (exp(Top*2.*CS%WaveNum_Cen(b))-exp(Bottom*2.*CS%WaveNum_Cen(b)))& - / ((Top-Bottom)*(2.*CS%WaveNum_Cen(b))) - elseif (PartitionMode==1) then - if (CS%StkLevelMode==0) then - ! Take the value at the midpoint - CMN_FAC = exp(MidPoint*2.*(2.*PI*CS%Freq_Cen(b)*US%T_to_s)**2/(US%L_to_Z**2*GV%g_Earth)) - elseif (CS%StkLevelMode==1) then - ! Use a numerical integration and then - ! divide by layer thickness - WN = (2.*PI*CS%Freq_Cen(b)*US%T_to_s)**2 / (US%L_to_Z**2*GV%g_Earth) !bgr bug-fix missing g - CMN_FAC = (exp(2.*WN*Top)-exp(2.*WN*Bottom)) / (2.*WN*(Top-Bottom)) + CMN_FAC = (exp(Top*2.*CS%WaveNum_Cen(b))-exp(Bottom*2.*CS%WaveNum_Cen(b)))& + / ((Top-Bottom)*(2.*CS%WaveNum_Cen(b))) + elseif (PartitionMode==1) then + if (CS%StkLevelMode==0) then + ! Take the value at the midpoint + CMN_FAC = exp(MidPoint*2.*(2.*PI*CS%Freq_Cen(b)*US%T_to_s)**2/(US%L_to_Z**2*GV%g_Earth)) + elseif (CS%StkLevelMode==1) then + ! Use a numerical integration and then + ! divide by layer thickness + WN = (2.*PI*CS%Freq_Cen(b)*US%T_to_s)**2 / (US%L_to_Z**2*GV%g_Earth) !bgr bug-fix missing g + CMN_FAC = (exp(2.*WN*Top)-exp(2.*WN*Bottom)) / (2.*WN*(Top-Bottom)) + endif endif - endif - CS%US_x(II,jj,kk) = CS%US_x(II,jj,kk) + CS%STKx0(II,jj,b)*CMN_FAC - enddo + CS%US_x(II,jj,kk) = CS%US_x(II,jj,kk) + CS%STKx0(II,jj,b)*CMN_FAC + enddo + else + ! Take the value at the midpoint + do b = 1,CS%NumBands + if (PartitionMode==0) then + CMN_FAC = exp(MidPoint*2.*CS%WaveNum_Cen(b)) + elseif (PartitionMode==1) then + CMN_FAC = exp(MidPoint*2.*(2.*PI*CS%Freq_Cen(b)*US%T_to_s)**2/(US%L_to_Z**2*GV%g_Earth)) + endif + CS%US_x(II,jj,kk) = CS%US_x(II,jj,kk) + CS%STKx0(II,jj,b)*CMN_FAC + enddo + endif enddo enddo enddo ! Computing Y direction Stokes drift - do ii = G%isd,G%ied - do JJ = G%jsdB,G%jedB + do JJ = G%jsdB,G%jedB + do ii = G%isd,G%ied ! Compute the surface values. - do b = 1,NumBands + do b = 1,CS%NumBands if (PartitionMode==0) then ! In wavenumber we are averaging over (small) level CMN_FAC = (1.0-exp(-one_cm*2.*CS%WaveNum_Cen(b))) / & @@ -594,34 +648,47 @@ subroutine Update_Stokes_Drift(G, GV, US, CS, h, ustar) do kk = 1,GV%ke Top = Bottom JJm1 = max(JJ-1,1) - MidPoint = Bottom - GV%H_to_Z*0.25*(h(ii,JJ,kk)+h(ii,JJm1,kk)) - Bottom = Bottom - GV%H_to_Z*0.5*(h(ii,JJ,kk)+h(ii,JJm1,kk)) - do b = 1,NumBands - if (PartitionMode==0) then + level_thick = 0.5*GV%H_to_Z*(h(ii,JJ,kk)+h(ii,JJm1,kk)) + MidPoint = Bottom - 0.5*level_thick + Bottom = Bottom - level_thick + ! -> Stokes drift in thin layers not averaged. + if (level_thick>min_level_thick_avg) then + do b = 1,CS%NumBands + if (PartitionMode==0) then ! In wavenumber we are averaging over level - CMN_FAC = (exp(Top*2.*CS%WaveNum_Cen(b)) - & - exp(Bottom*2.*CS%WaveNum_Cen(b))) / & - ((Top-Bottom)*(2.*CS%WaveNum_Cen(b))) - elseif (PartitionMode==1) then - if (CS%StkLevelMode==0) then - ! Take the value at the midpoint - CMN_FAC = exp(MidPoint*2.*(2.*PI*CS%Freq_Cen(b)*US%T_to_s)**2/(US%L_to_Z**2*GV%g_Earth)) - elseif (CS%StkLevelMode==1) then - ! Use a numerical integration and then - ! divide by layer thickness - WN = (2.*PI*CS%Freq_Cen(b)*US%T_to_s)**2 / (US%L_to_Z**2*GV%g_Earth) - CMN_FAC = (exp(2.*WN*Top)-exp(2.*WN*Bottom)) / (2.*WN*(Top-Bottom)) + CMN_FAC = (exp(Top*2.*CS%WaveNum_Cen(b))-exp(Bottom*2.*CS%WaveNum_Cen(b)))& + / ((Top-Bottom)*(2.*CS%WaveNum_Cen(b))) + elseif (PartitionMode==1) then + if (CS%StkLevelMode==0) then + ! Take the value at the midpoint + CMN_FAC = exp(MidPoint*2.*(2.*PI*CS%Freq_Cen(b)*US%T_to_s)**2/(US%L_to_Z**2*GV%g_Earth)) + elseif (CS%StkLevelMode==1) then + ! Use a numerical integration and then + ! divide by layer thickness + WN = (2.*PI*CS%Freq_Cen(b)*US%T_to_s)**2 / (US%L_to_Z**2*GV%g_Earth) !bgr bug-fix missing g + CMN_FAC = (exp(2.*WN*Top)-exp(2.*WN*Bottom)) / (2.*WN*(Top-Bottom)) + endif endif - endif - CS%US_y(ii,JJ,kk) = CS%US_y(ii,JJ,kk) + CS%STKy0(ii,JJ,b)*CMN_FAC - enddo + CS%US_y(ii,JJ,kk) = CS%US_y(ii,JJ,kk) + CS%STKy0(ii,JJ,b)*CMN_FAC + enddo + else + ! Take the value at the midpoint + do b = 1,CS%NumBands + if (PartitionMode==0) then + CMN_FAC = exp(MidPoint*2.*CS%WaveNum_Cen(b)) + elseif (PartitionMode==1) then + CMN_FAC = exp(MidPoint*2.*(2.*PI*CS%Freq_Cen(b)*US%T_to_s)**2/(US%L_to_Z**2*GV%g_Earth)) + endif + CS%US_y(ii,JJ,kk) = CS%US_y(ii,JJ,kk) + CS%STKy0(ii,JJ,b)*CMN_FAC + enddo + endif enddo enddo enddo elseif (WaveMethod==DHH85) then if (.not.(StaticWaves .and. DHH85_is_set)) then - do II = G%isdB,G%iedB - do jj = G%jsd,G%jed + do jj = G%jsd,G%jed + do II = G%isdB,G%iedB bottom = 0.0 do kk = 1,GV%ke Top = Bottom @@ -638,8 +705,8 @@ subroutine Update_Stokes_Drift(G, GV, US, CS, h, ustar) enddo enddo enddo - do ii = G%isd,G%ied - do JJ = G%jsdB,G%jedB + do JJ = G%jsdB,G%jedB + do ii = G%isd,G%ied Bottom = 0.0 do kk=1, GV%ke Top = Bottom @@ -664,13 +731,13 @@ subroutine Update_Stokes_Drift(G, GV, US, CS, h, ustar) endif else! Keep this else, fallback to 0 Stokes drift do kk= 1,GV%ke - do II = G%isdB,G%iedB - do jj = G%jsd,G%jed + do jj = G%jsd,G%jed + do II = G%isdB,G%iedB CS%Us_x(II,jj,kk) = 0. enddo enddo - do ii = G%isd,G%ied - do JJ = G%jsdB,G%jedB + do JJ = G%jsdB,G%jedB + do ii = G%isd,G%ied CS%Us_y(ii,JJ,kk) = 0. enddo enddo @@ -680,8 +747,8 @@ subroutine Update_Stokes_Drift(G, GV, US, CS, h, ustar) ! Turbulent Langmuir number is computed here and available to use anywhere. ! SL Langmuir number requires mixing layer depth, and therefore is computed ! in the routine it is needed by (e.g. KPP or ePBL). - do ii = G%isc,G%iec - do jj = G%jsc, G%jec + do jj = G%jsc, G%jec + do ii = G%isc,G%iec Top = h(ii,jj,1)*GV%H_to_Z call get_Langmuir_Number( La, G, GV, US, Top, ustar(ii,jj), ii, jj, & H(ii,jj,:),Override_MA=.false.,WAVES=CS) @@ -812,8 +879,8 @@ subroutine Surface_Bands_by_data_override(day_center, G, GV, US, CS) trim(varread1)//",dim_name "//trim(dim_name(1))// & " in file "// trim(SurfBandFileName)//" in MOM_wave_interface") endif - NUMBANDS = ID - do B = 1,NumBands ; CS%WaveNum_Cen(b) = US%Z_to_m*CS%WaveNum_Cen(b) ; enddo + CS%NUMBANDS = ID + do B = 1,CS%NumBands ; CS%WaveNum_Cen(b) = US%Z_to_m*CS%WaveNum_Cen(b) ; enddo elseif (PartitionMode==1) then rcode_fr = NF90_GET_VAR(ncid, dim_id(1), CS%Freq_Cen, start, counter) if (rcode_fr /= 0) then @@ -822,15 +889,15 @@ subroutine Surface_Bands_by_data_override(day_center, G, GV, US, CS) trim(varread2)//",dim_name "//trim(dim_name(1))// & " in file "// trim(SurfBandFileName)//" in MOM_wave_interface") endif - NUMBANDS = ID - do B = 1,NumBands + CS%NUMBANDS = ID + do B = 1,CS%NumBands CS%WaveNum_Cen(b) = (2.*PI*CS%Freq_Cen(b)*US%T_to_s)**2 / (US%L_to_Z**2*GV%g_Earth) enddo endif endif - do b = 1,NumBands + do b = 1,CS%NumBands temp_x(:,:) = 0.0 temp_y(:,:) = 0.0 varname = ' ' @@ -904,9 +971,10 @@ subroutine get_Langmuir_Number( LA, G, GV, US, HBL, ustar, i, j, & real :: LA_STKx, LA_STKy, LA_STK ! Stokes velocities in [m s-1] logical :: ContinueLoop, USE_MA real, dimension(SZK_(GV)) :: US_H, VS_H - real, dimension(NumBands) :: StkBand_X, StkBand_Y + real, allocatable :: StkBand_X(:), StkBand_Y(:) integer :: KK, BB + ! Compute averaging depth for Stokes drift (negative) Dpt_LASL = min(-0.1*US%m_to_Z, -LA_FracHBL*HBL) @@ -940,13 +1008,15 @@ subroutine get_Langmuir_Number( LA, G, GV, US, HBL, ustar, i, j, & call Get_SL_Average_Prof( GV, Dpt_LASL, H, VS_H, LA_STKy) LA_STK = sqrt(LA_STKX*LA_STKX+LA_STKY*LA_STKY) elseif (WaveMethod==SURFBANDS) then - do bb = 1,NumBands + allocate(StkBand_X(WAVES%NumBands), StkBand_Y(WAVES%NumBands)) + do bb = 1,WAVES%NumBands StkBand_X(bb) = 0.5*(WAVES%STKx0(I,j,bb)+WAVES%STKx0(I-1,j,bb)) StkBand_Y(bb) = 0.5*(WAVES%STKy0(i,J,bb)+WAVES%STKy0(i,J-1,bb)) enddo - call Get_SL_Average_Band(GV, Dpt_LASL, NumBands, WAVES%WaveNum_Cen, StkBand_X, LA_STKx ) - call Get_SL_Average_Band(GV, Dpt_LASL, NumBands, WAVES%WaveNum_Cen, StkBand_Y, LA_STKy ) + call Get_SL_Average_Band(GV, Dpt_LASL, WAVES%NumBands, WAVES%WaveNum_Cen, StkBand_X, LA_STKx ) + call Get_SL_Average_Band(GV, Dpt_LASL, WAVES%NumBands, WAVES%WaveNum_Cen, StkBand_Y, LA_STKy ) LA_STK = sqrt(LA_STKX**2 + LA_STKY**2) + deallocate(StkBand_X, StkBand_Y) elseif (WaveMethod==DHH85) then ! Temporarily integrating profile rather than spectrum for simplicity do kk = 1,GV%ke @@ -1022,6 +1092,8 @@ subroutine get_StokesSL_LiFoxKemper(ustar, hbl, GV, US, UStokes_SL, LA) real :: z0, z0i, r1, r2, r3, r4, tmp, lasl_sqr_i real :: u10 + UStokes_sl = 0.0 + LA=1.e8 if (ustar > 0.0) then ! Computing u10 based on u_star and COARE 3.5 relationships call ust_2_u10_coare3p5(US%Z_to_m*US%s_to_T*ustar*sqrt(US%R_to_kg_m3*GV%Rho0/1.225), u10, GV, US) @@ -1069,10 +1141,7 @@ subroutine get_StokesSL_LiFoxKemper(ustar, hbl, GV, US, UStokes_SL, LA) sqrt( 2.0 * PI *kstar * z0) * & erfc( sqrt( 2.0 * kstar * z0 ) ) UStokes_sl = UStokes * (0.715 + r1 + r2 + r3 + r4) - LA = sqrt(US%Z_to_m*US%s_to_T*ustar / UStokes_sl) - else - UStokes_sl = 0.0 - LA=1.e8 + if(UStokes_sl .ne. 0.0)LA = sqrt(US%Z_to_m*US%s_to_T*ustar / UStokes_sl) endif end subroutine Get_StokesSL_LiFoxKemper