From 1a3f42a253247dee8311c8922f42fd6d2498d8ae Mon Sep 17 00:00:00 2001 From: Robert Hallberg Date: Mon, 18 Jul 2022 14:40:32 -0400 Subject: [PATCH 01/40] Call set_initialized after query_initialized Call set_initialized for variables after they are initialized, when their lack of initialization was detected via query_initialized. This commit reproduces the model's behavior prior to the removal of the code that set the initialized flag within query_initialized in https://github.com/NOAA-GFDL/MOM6/pull/149, and it completes the set of changes that was envisioned when set_initialized was added in https://github.com/NOAA-GFDL/MOM6/pull/152. All answers are bitwise identical. --- src/core/MOM.F90 | 10 +++++---- src/core/MOM_dynamics_split_RK2.F90 | 27 ++++++++++++++++-------- src/framework/MOM_restart.F90 | 3 ++- src/ice_shelf/MOM_ice_shelf.F90 | 2 +- src/ice_shelf/MOM_ice_shelf_dynamics.F90 | 3 +-- src/tracer/MOM_CFC_cap.F90 | 6 ++++-- src/tracer/MOM_OCMIP2_CFC.F90 | 10 ++++++--- src/tracer/MOM_generic_tracer.F90 | 4 +++- src/tracer/advection_test_tracer.F90 | 4 +++- src/tracer/boundary_impulse_tracer.F90 | 3 ++- src/tracer/ideal_age_example.F90 | 3 ++- src/tracer/nw2_tracers.F90 | 5 +++-- src/tracer/oil_tracer.F90 | 4 ++-- src/tracer/pseudo_salt_tracer.F90 | 3 ++- src/user/MOM_wave_interface.F90 | 2 +- 15 files changed, 57 insertions(+), 32 deletions(-) diff --git a/src/core/MOM.F90 b/src/core/MOM.F90 index 0e87a1fb3e..29bef6bcd8 100644 --- a/src/core/MOM.F90 +++ b/src/core/MOM.F90 @@ -40,8 +40,8 @@ module MOM use MOM_io, only : MOM_io_init, vardesc, var_desc use MOM_io, only : slasher, file_exists, MOM_read_data use MOM_obsolete_params, only : find_obsolete_params -use MOM_restart, only : register_restart_field, register_restart_pair -use MOM_restart, only : query_initialized, save_restart, restart_registry_lock +use MOM_restart, only : register_restart_field, register_restart_pair, save_restart +use MOM_restart, only : query_initialized, set_initialized, restart_registry_lock use MOM_restart, only : restart_init, is_new_run, determine_is_new_run, MOM_restart_CS use MOM_spatial_means, only : global_mass_integral use MOM_time_manager, only : time_type, real_to_time, time_type_to_real, operator(+) @@ -2926,11 +2926,12 @@ subroutine initialize_MOM(Time, Time_init, param_file, dirs, CS, restart_CSp, & endif else CS%tv%frazil(:,:) = 0.0 + call set_initialized(CS%tv%frazil, "frazil", restart_CSp) endif endif if (CS%interp_p_surf) then - CS%p_surf_prev_set = query_initialized(CS%p_surf_prev,"p_surf_prev",restart_CSp) + CS%p_surf_prev_set = query_initialized(CS%p_surf_prev, "p_surf_prev", restart_CSp) if (CS%p_surf_prev_set) then ! Test whether the dimensional rescaling has changed for pressure. @@ -2958,7 +2959,7 @@ subroutine initialize_MOM(Time, Time_init, param_file, dirs, CS, restart_CSp, & endif endif - if (query_initialized(CS%ave_ssh_ibc,"ave_ssh",restart_CSp)) then + if (query_initialized(CS%ave_ssh_ibc, "ave_ssh", restart_CSp)) then if ((US%m_to_Z_restart /= 0.0) .and. (US%m_to_Z_restart /= 1.0) ) then Z_rescale = 1.0 / US%m_to_Z_restart do j=js,je ; do i=is,ie @@ -2971,6 +2972,7 @@ subroutine initialize_MOM(Time, Time_init, param_file, dirs, CS, restart_CSp, & else call find_eta(CS%h, CS%tv, G, GV, US, CS%ave_ssh_ibc, dZref=G%Z_ref) endif + call set_initialized(CS%ave_ssh_ibc, "ave_ssh", restart_CSp) endif if (CS%split) deallocate(eta) diff --git a/src/core/MOM_dynamics_split_RK2.F90 b/src/core/MOM_dynamics_split_RK2.F90 index 003033659e..c011d18c44 100644 --- a/src/core/MOM_dynamics_split_RK2.F90 +++ b/src/core/MOM_dynamics_split_RK2.F90 @@ -30,7 +30,7 @@ module MOM_dynamics_split_RK2 use MOM_get_input, only : directories use MOM_io, only : vardesc, var_desc use MOM_restart, only : register_restart_field, register_restart_pair -use MOM_restart, only : query_initialized, save_restart +use MOM_restart, only : query_initialized, set_initialized, save_restart use MOM_restart, only : restart_init, is_new_run, MOM_restart_CS use MOM_time_manager, only : time_type, time_type_to_real, operator(+) use MOM_time_manager, only : operator(-), operator(>), operator(*), operator(/) @@ -1131,7 +1131,7 @@ subroutine initialize_dyn_split_RK2(u, v, h, uh, vh, eta, Time, G, GV, US, param type(param_file_type), intent(in) :: param_file !< parameter file for parsing type(diag_ctrl), target, intent(inout) :: diag !< to control diagnostics type(MOM_dyn_split_RK2_CS), pointer :: CS !< module control structure - type(MOM_restart_CS), intent(in) :: restart_CS !< MOM restart control structure + type(MOM_restart_CS), intent(inout) :: restart_CS !< MOM restart control structure real, intent(in) :: dt !< time step [T ~> s] type(accel_diag_ptrs), target, intent(inout) :: Accel_diag !< points to momentum equation terms for !! budget analysis @@ -1304,6 +1304,7 @@ subroutine initialize_dyn_split_RK2(u, v, h, uh, vh, eta, Time, G, GV, US, param do k=1,nz ; do j=js,je ; do i=is,ie CS%eta(i,j) = CS%eta(i,j) + h(i,j,k) enddo ; enddo ; enddo + call set_initialized(CS%eta, trim(eta_rest_name), restart_CS) elseif ((GV%m_to_H_restart /= 0.0) .and. (GV%m_to_H_restart /= 1.0)) then H_rescale = 1.0 / GV%m_to_H_restart do j=js,je ; do i=is,ie ; CS%eta(i,j) = H_rescale * CS%eta(i,j) ; enddo ; enddo @@ -1315,10 +1316,12 @@ subroutine initialize_dyn_split_RK2(u, v, h, uh, vh, eta, Time, G, GV, US, param CS%barotropic_CSp, restart_CS, calc_dtbt, CS%BT_cont, & CS%tides_CSp) - if (.not. query_initialized(CS%diffu,"diffu",restart_CS) .or. & - .not. query_initialized(CS%diffv,"diffv",restart_CS)) then + if (.not. query_initialized(CS%diffu, "diffu", restart_CS) .or. & + .not. query_initialized(CS%diffv, "diffv", restart_CS)) then call horizontal_viscosity(u, v, h, CS%diffu, CS%diffv, MEKE, VarMix, G, GV, US, CS%hor_visc, & OBC=CS%OBC, BT=CS%barotropic_CSp, TD=thickness_diffuse_CSp) + call set_initialized(CS%diffu, "diffu", restart_CS) + call set_initialized(CS%diffv, "diffv", restart_CS) else if ( (US%s_to_T_restart * US%m_to_L_restart /= 0.0) .and. & (US%s_to_T_restart**2 /= US%m_to_L_restart) ) then @@ -1332,10 +1335,12 @@ subroutine initialize_dyn_split_RK2(u, v, h, uh, vh, eta, Time, G, GV, US, param endif endif - if (.not. query_initialized(CS%u_av,"u2", restart_CS) .or. & - .not. query_initialized(CS%u_av,"v2", restart_CS)) then + if (.not. query_initialized(CS%u_av, "u2", restart_CS) .or. & + .not. query_initialized(CS%v_av, "v2", restart_CS)) then do k=1,nz ; do j=jsd,jed ; do I=IsdB,IedB ; CS%u_av(I,j,k) = u(I,j,k) ; enddo ; enddo ; enddo do k=1,nz ; do J=JsdB,JedB ; do i=isd,ied ; CS%v_av(i,J,k) = v(i,J,k) ; enddo ; enddo ; enddo + call set_initialized(CS%u_av, "u2", restart_CS) + call set_initialized(CS%v_av, "v2", restart_CS) elseif ( (US%s_to_T_restart * US%m_to_L_restart /= 0.0) .and. & (US%s_to_T_restart /= US%m_to_L_restart) ) then vel_rescale = US%s_to_T_restart / US%m_to_L_restart @@ -1344,17 +1349,21 @@ subroutine initialize_dyn_split_RK2(u, v, h, uh, vh, eta, Time, G, GV, US, param endif ! This call is just here to initialize uh and vh. - if (.not. query_initialized(uh,"uh",restart_CS) .or. & - .not. query_initialized(vh,"vh",restart_CS)) then + if (.not. query_initialized(uh, "uh", restart_CS) .or. & + .not. query_initialized(vh, "vh", restart_CS)) then do k=1,nz ; do j=jsd,jed ; do i=isd,ied ; h_tmp(i,j,k) = h(i,j,k) ; enddo ; enddo ; enddo call continuity(u, v, h, h_tmp, uh, vh, dt, G, GV, US, CS%continuity_CSp, CS%OBC, pbv) call pass_var(h_tmp, G%Domain, clock=id_clock_pass_init) do k=1,nz ; do j=jsd,jed ; do i=isd,ied CS%h_av(i,j,k) = 0.5*(h(i,j,k) + h_tmp(i,j,k)) enddo ; enddo ; enddo + call set_initialized(uh, "uh", restart_CS) + call set_initialized(vh, "vh", restart_CS) + call set_initialized(CS%h_av, "h2", restart_CS) else - if (.not. query_initialized(CS%h_av,"h2",restart_CS)) then + if (.not. query_initialized(CS%h_av, "h2", restart_CS)) then CS%h_av(:,:,:) = h(:,:,:) + call set_initialized(CS%h_av, "h2", restart_CS) elseif ((GV%m_to_H_restart /= 0.0) .and. (GV%m_to_H_restart /= 1.0)) then H_rescale = 1.0 / GV%m_to_H_restart do k=1,nz ; do j=js,je ; do i=is,ie ; CS%h_av(i,j,k) = H_rescale * CS%h_av(i,j,k) ; enddo ; enddo ; enddo diff --git a/src/framework/MOM_restart.F90 b/src/framework/MOM_restart.F90 index 7081bbd0fb..6eba9be727 100644 --- a/src/framework/MOM_restart.F90 +++ b/src/framework/MOM_restart.F90 @@ -22,7 +22,8 @@ module MOM_restart implicit none ; private public restart_init, restart_end, restore_state, register_restart_field -public save_restart, query_initialized, restart_registry_lock, restart_init_end, vardesc +public save_restart, query_initialized, set_initialized +public restart_registry_lock, restart_init_end, vardesc public restart_files_exist, determine_is_new_run, is_new_run public register_restart_field_as_obsolete, register_restart_pair diff --git a/src/ice_shelf/MOM_ice_shelf.F90 b/src/ice_shelf/MOM_ice_shelf.F90 index 8c2f7dd4c9..aaa0830273 100644 --- a/src/ice_shelf/MOM_ice_shelf.F90 +++ b/src/ice_shelf/MOM_ice_shelf.F90 @@ -34,7 +34,7 @@ module MOM_ice_shelf use MOM_io, only : field_exists, file_exists, MOM_read_data, write_version_number use MOM_io, only : slasher, fieldtype, vardesc, var_desc use MOM_io, only : write_field, close_file, SINGLE_FILE, MULTIPLE -use MOM_restart, only : register_restart_field, query_initialized, save_restart +use MOM_restart, only : register_restart_field, save_restart use MOM_restart, only : restart_init, restore_state, MOM_restart_CS, register_restart_pair use MOM_time_manager, only : time_type, time_type_to_real, real_to_time, operator(>), operator(-) use MOM_transcribe_grid, only : copy_dyngrid_to_MOM_grid, copy_MOM_grid_to_dyngrid diff --git a/src/ice_shelf/MOM_ice_shelf_dynamics.F90 b/src/ice_shelf/MOM_ice_shelf_dynamics.F90 index 4015c5d602..63ccc3d33c 100644 --- a/src/ice_shelf/MOM_ice_shelf_dynamics.F90 +++ b/src/ice_shelf/MOM_ice_shelf_dynamics.F90 @@ -16,8 +16,7 @@ module MOM_ice_shelf_dynamics use MOM_file_parser, only : read_param, get_param, log_param, log_version, param_file_type use MOM_grid, only : MOM_grid_init, ocean_grid_type use MOM_io, only : file_exists, slasher, MOM_read_data -use MOM_restart, only : register_restart_field, query_initialized -use MOM_restart, only : MOM_restart_CS +use MOM_restart, only : register_restart_field, MOM_restart_CS use MOM_time_manager, only : time_type, set_time use MOM_unit_scaling, only : unit_scale_type, unit_scaling_init !MJH use MOM_ice_shelf_initialize, only : initialize_ice_shelf_boundary diff --git a/src/tracer/MOM_CFC_cap.F90 b/src/tracer/MOM_CFC_cap.F90 index 0e78c351a8..44f83a475a 100644 --- a/src/tracer/MOM_CFC_cap.F90 +++ b/src/tracer/MOM_CFC_cap.F90 @@ -16,7 +16,7 @@ module MOM_CFC_cap use MOM_io, only : vardesc, var_desc, query_vardesc, stdout use MOM_tracer_registry, only : tracer_type use MOM_open_boundary, only : ocean_OBC_type -use MOM_restart, only : query_initialized, MOM_restart_CS +use MOM_restart, only : query_initialized, set_initialized, MOM_restart_CS use MOM_spatial_means, only : global_mass_int_EFP use MOM_time_manager, only : time_type use time_interp_external_mod, only : init_external_field, time_interp_external @@ -204,9 +204,11 @@ subroutine initialize_CFC_cap(restart, day, G, GV, US, h, diag, OBC, CS) do m=1,2 if (.not.restart .or. (CS%tracers_may_reinit .and. & - .not.query_initialized(CS%CFC_data(m)%conc, CS%CFC_data(m)%name, CS%restart_CSp))) & + .not.query_initialized(CS%CFC_data(m)%conc, CS%CFC_data(m)%name, CS%restart_CSp))) then call init_tracer_CFC(h, CS%CFC_data(m)%conc, CS%CFC_data(m)%name, CS%CFC_data(m)%land_val, & CS%CFC_data(m)%IC_val, G, GV, US, CS) + call set_initialized(CS%CFC_data(m)%conc, CS%CFC_data(m)%name, CS%restart_CSp) + endif ! cmor diagnostics ! CFC11 cmor conventions: http://clipc-services.ceda.ac.uk/dreq/u/42625c97b8fe75124a345962c4430982.html diff --git a/src/tracer/MOM_OCMIP2_CFC.F90 b/src/tracer/MOM_OCMIP2_CFC.F90 index f7038b46f7..8139d6e8c1 100644 --- a/src/tracer/MOM_OCMIP2_CFC.F90 +++ b/src/tracer/MOM_OCMIP2_CFC.F90 @@ -15,7 +15,7 @@ module MOM_OCMIP2_CFC use MOM_io, only : file_exists, MOM_read_data, slasher use MOM_io, only : vardesc, var_desc, query_vardesc use MOM_open_boundary, only : ocean_OBC_type -use MOM_restart, only : query_initialized, MOM_restart_CS +use MOM_restart, only : query_initialized, set_initialized, MOM_restart_CS use MOM_spatial_means, only : global_mass_int_EFP use MOM_sponge, only : set_up_sponge_field, sponge_CS use MOM_time_manager, only : time_type @@ -335,14 +335,18 @@ subroutine initialize_OCMIP2_CFC(restart, day, G, GV, US, h, diag, OBC, CS, & CS%diag => diag if (.not.restart .or. (CS%tracers_may_reinit .and. & - .not.query_initialized(CS%CFC11, CS%CFC11_name, CS%restart_CSp))) & + .not.query_initialized(CS%CFC11, CS%CFC11_name, CS%restart_CSp))) then call init_tracer_CFC(h, CS%CFC11, CS%CFC11_name, CS%CFC11_land_val, & CS%CFC11_IC_val, G, GV, US, CS) + call set_initialized(CS%CFC11, CS%CFC11_name, CS%restart_CSp) + endif if (.not.restart .or. (CS%tracers_may_reinit .and. & - .not.query_initialized(CS%CFC12, CS%CFC12_name, CS%restart_CSp))) & + .not.query_initialized(CS%CFC12, CS%CFC12_name, CS%restart_CSp))) then call init_tracer_CFC(h, CS%CFC12, CS%CFC12_name, CS%CFC12_land_val, & CS%CFC12_IC_val, G, GV, US, CS) + call set_initialized(CS%CFC12, CS%CFC12_name, CS%restart_CSp) + endif if (associated(OBC)) then ! Steal from updated DOME in the fullness of time. diff --git a/src/tracer/MOM_generic_tracer.F90 b/src/tracer/MOM_generic_tracer.F90 index e454a9a4bb..902b91fccc 100644 --- a/src/tracer/MOM_generic_tracer.F90 +++ b/src/tracer/MOM_generic_tracer.F90 @@ -39,7 +39,7 @@ module MOM_generic_tracer use MOM_hor_index, only : hor_index_type use MOM_io, only : file_exists, MOM_read_data, slasher use MOM_open_boundary, only : ocean_OBC_type - use MOM_restart, only : register_restart_field, query_initialized, MOM_restart_CS + use MOM_restart, only : register_restart_field, query_initialized, set_initialized, MOM_restart_CS use MOM_spatial_means, only : global_area_mean, global_mass_int_EFP use MOM_sponge, only : set_up_sponge_field, sponge_CS use MOM_time_manager, only : time_type, set_time @@ -345,6 +345,8 @@ subroutine initialize_MOM_generic_tracer(restart, day, G, GV, US, h, param_file, endif endif + + call set_initialized(tr_ptr, g_tracer_name, CS%restart_CSp) endif !traverse the linked list till hit NULL diff --git a/src/tracer/advection_test_tracer.F90 b/src/tracer/advection_test_tracer.F90 index 441189c0ac..a4e53ae797 100644 --- a/src/tracer/advection_test_tracer.F90 +++ b/src/tracer/advection_test_tracer.F90 @@ -13,7 +13,7 @@ module advection_test_tracer use MOM_hor_index, only : hor_index_type use MOM_io, only : slasher, vardesc, var_desc, query_vardesc use MOM_open_boundary, only : ocean_OBC_type -use MOM_restart, only : query_initialized, MOM_restart_CS +use MOM_restart, only : query_initialized, set_initialized, MOM_restart_CS use MOM_spatial_means, only : global_mass_int_EFP use MOM_sponge, only : set_up_sponge_field, sponge_CS use MOM_time_manager, only : time_type @@ -235,6 +235,8 @@ subroutine initialize_advection_test_tracer(restart, day, G, GV, h,diag, OBC, CS if (locx**2+locy**2<=1.0) CS%tr(i,j,k,m) = 1.0 if (locx>0.0.and.abs(locy)<0.2) CS%tr(i,j,k,m) = 0.0 enddo ; enddo + + call set_initialized(CS%tr(:,:,:,m), name, CS%restart_CSp) endif ! restart enddo diff --git a/src/tracer/boundary_impulse_tracer.F90 b/src/tracer/boundary_impulse_tracer.F90 index a4599a891e..3f8d8e7937 100644 --- a/src/tracer/boundary_impulse_tracer.F90 +++ b/src/tracer/boundary_impulse_tracer.F90 @@ -13,7 +13,7 @@ module boundary_impulse_tracer use MOM_hor_index, only : hor_index_type use MOM_io, only : vardesc, var_desc, query_vardesc use MOM_open_boundary, only : ocean_OBC_type -use MOM_restart, only : register_restart_field, query_initialized, MOM_restart_CS +use MOM_restart, only : register_restart_field, query_initialized, set_initialized, MOM_restart_CS use MOM_spatial_means, only : global_mass_int_EFP use MOM_sponge, only : set_up_sponge_field, sponge_CS use MOM_time_manager, only : time_type @@ -186,6 +186,7 @@ subroutine initialize_boundary_impulse_tracer(restart, day, G, GV, US, h, diag, do k=1,CS%nkml ; do j=jsd,jed ; do i=isd,ied CS%tr(i,j,k,m) = 1.0 enddo ; enddo ; enddo + call set_initialized(CS%tr(:,:,:,m), name, CS%restart_CSp) endif enddo ! Tracer loop diff --git a/src/tracer/ideal_age_example.F90 b/src/tracer/ideal_age_example.F90 index 2fdeaff02f..66c76f0e2c 100644 --- a/src/tracer/ideal_age_example.F90 +++ b/src/tracer/ideal_age_example.F90 @@ -13,7 +13,7 @@ module ideal_age_example use MOM_hor_index, only : hor_index_type use MOM_io, only : file_exists, MOM_read_data, slasher, vardesc, var_desc, query_vardesc use MOM_open_boundary, only : ocean_OBC_type -use MOM_restart, only : query_initialized, MOM_restart_CS +use MOM_restart, only : query_initialized, set_initialized, MOM_restart_CS use MOM_spatial_means, only : global_mass_int_EFP use MOM_sponge, only : set_up_sponge_field, sponge_CS use MOM_time_manager, only : time_type, time_type_to_real @@ -266,6 +266,7 @@ subroutine initialize_ideal_age_tracer(restart, day, G, GV, US, h, diag, OBC, CS enddo ; enddo ; enddo endif + call set_initialized(CS%tr(:,:,:,m), name, CS%restart_CSp) endif ! restart enddo ! Tracer loop diff --git a/src/tracer/nw2_tracers.F90 b/src/tracer/nw2_tracers.F90 index 2ecd2ba6e0..36885d8dc8 100644 --- a/src/tracer/nw2_tracers.F90 +++ b/src/tracer/nw2_tracers.F90 @@ -10,7 +10,7 @@ module nw2_tracers use MOM_grid, only : ocean_grid_type use MOM_hor_index, only : hor_index_type use MOM_io, only : file_exists, MOM_read_data, slasher, vardesc, var_desc -use MOM_restart, only : query_initialized, MOM_restart_CS +use MOM_restart, only : query_initialized, set_initialized, MOM_restart_CS use MOM_time_manager, only : time_type, time_type_to_real use MOM_tracer_registry, only : register_tracer, tracer_registry_type use MOM_tracer_diabatic, only : tracer_vertdiff, applyTracerBoundaryFluxesInOut @@ -162,10 +162,11 @@ subroutine initialize_nw2_tracers(restart, day, G, GV, US, h, tv, diag, CS) ! in which the tracers were not present write(var_name(1:8),'(a6,i2.2)') 'tracer',m if ((.not.restart) .or. & - (.not. query_initialized(CS%tr(:,:,:,m),var_name,CS%restart_CSp))) then + (.not. query_initialized(CS%tr(:,:,:,m), var_name, CS%restart_CSp))) then do k=1,GV%ke ; do j=G%jsc,G%jec ; do i=G%isc,G%iec CS%tr(i,j,k,m) = nw2_tracer_dist(m, G, GV, eta, i, j, k) enddo ; enddo ; enddo + call set_initialized(CS%tr(:,:,:,m), var_name, CS%restart_CSp) endif ! restart enddo ! Tracer loop diff --git a/src/tracer/oil_tracer.F90 b/src/tracer/oil_tracer.F90 index 5592b7627a..9b7b630237 100644 --- a/src/tracer/oil_tracer.F90 +++ b/src/tracer/oil_tracer.F90 @@ -14,7 +14,7 @@ module oil_tracer use MOM_io, only : file_exists, MOM_read_data, slasher use MOM_io, only : vardesc, var_desc, query_vardesc use MOM_open_boundary, only : ocean_OBC_type -use MOM_restart, only : query_initialized, MOM_restart_CS +use MOM_restart, only : query_initialized, set_initialized, MOM_restart_CS use MOM_spatial_means, only : global_mass_int_EFP use MOM_sponge, only : set_up_sponge_field, sponge_CS use MOM_time_manager, only : time_type, time_type_to_real @@ -278,7 +278,7 @@ subroutine initialize_oil_tracer(restart, day, G, GV, US, h, diag, OBC, CS, & endif enddo ; enddo ; enddo endif - + call set_initialized(CS%tr(:,:,:,m), name, CS%restart_CSp) endif ! restart enddo ! Tracer loop diff --git a/src/tracer/pseudo_salt_tracer.F90 b/src/tracer/pseudo_salt_tracer.F90 index 39320db405..9221d76f2c 100644 --- a/src/tracer/pseudo_salt_tracer.F90 +++ b/src/tracer/pseudo_salt_tracer.F90 @@ -15,7 +15,7 @@ module pseudo_salt_tracer use MOM_hor_index, only : hor_index_type use MOM_io, only : vardesc, var_desc, query_vardesc use MOM_open_boundary, only : ocean_OBC_type -use MOM_restart, only : query_initialized, MOM_restart_CS +use MOM_restart, only : query_initialized, set_initialized, MOM_restart_CS use MOM_spatial_means, only : global_mass_int_EFP use MOM_sponge, only : set_up_sponge_field, sponge_CS use MOM_time_manager, only : time_type @@ -148,6 +148,7 @@ subroutine initialize_pseudo_salt_tracer(restart, day, G, GV, US, h, diag, OBC, do k=1,nz ; do j=jsd,jed ; do i=isd,ied CS%ps(i,j,k) = US%S_to_ppt*tv%S(i,j,k) enddo ; enddo ; enddo + call set_initialized(CS%ps, name, CS%restart_CSp) endif if (associated(OBC)) then diff --git a/src/user/MOM_wave_interface.F90 b/src/user/MOM_wave_interface.F90 index e6734b2ac7..cd45f33bfd 100644 --- a/src/user/MOM_wave_interface.F90 +++ b/src/user/MOM_wave_interface.F90 @@ -20,7 +20,7 @@ module MOM_wave_interface use MOM_unit_scaling, only : unit_scale_type use MOM_variables, only : thermo_var_ptrs, surface use MOM_verticalgrid, only : verticalGrid_type -use MOM_restart, only : register_restart_field, MOM_restart_CS, query_initialized +use MOM_restart, only : register_restart_field, MOM_restart_CS implicit none ; private From c5984414e26d16f0f126eddf6f0693355c2fcb36 Mon Sep 17 00:00:00 2001 From: Kate Hedstrom Date: Wed, 27 Jul 2022 06:56:21 -0800 Subject: [PATCH 02/40] Seg offset (#171) * Add a variable to shorten some statements - suggested by @Hallberg-NOAA in the discussion for #164 * Fix to small i/j error --- src/core/MOM_open_boundary.F90 | 39 +++++++++++++++++----------------- 1 file changed, 19 insertions(+), 20 deletions(-) diff --git a/src/core/MOM_open_boundary.F90 b/src/core/MOM_open_boundary.F90 index 7f170f5510..63b9434269 100644 --- a/src/core/MOM_open_boundary.F90 +++ b/src/core/MOM_open_boundary.F90 @@ -3696,6 +3696,7 @@ subroutine update_OBC_segment_data(G, GV, US, OBC, tv, h, Time) real, dimension(:,:,:), allocatable, target :: tmp_buffer ! A buffer for input data [various units] real, dimension(:), allocatable :: h_stack ! Thicknesses at corner points [H ~> m or kg m-2] integer :: is_obc2, js_obc2 + integer :: i_seg_offset, j_seg_offset real :: net_H_src ! Total thickness of the incoming flow in the source field [H ~> m or kg m-2] real :: net_H_int ! Total thickness of the incoming flow in the model [H ~> m or kg m-2] real :: scl_fac ! A scaling factor to compensate for differences in total thicknesses [nondim] @@ -3737,6 +3738,8 @@ subroutine update_OBC_segment_data(G, GV, US, OBC, tv, h, Time) ie_obc = min(segment%ie_obc,ied) js_obc = max(segment%js_obc,jsd-1) je_obc = min(segment%je_obc,jed) + i_seg_offset = G%idg_offset - segment%HI%Isgb + j_seg_offset = G%jdg_offset - segment%HI%Jsgb ! Calculate auxiliary fields at staggered locations. ! Segment indices are on q points: @@ -3890,19 +3893,19 @@ subroutine update_OBC_segment_data(G, GV, US, OBC, tv, h, Time) if (segment%field(m)%name == 'V' .or. segment%field(m)%name == 'DVDX' .or. & segment%field(m)%name == 'Vamp' .or. segment%field(m)%name == 'Vphase') then segment%field(m)%buffer_src(is_obc,:,:) = & - tmp_buffer(1,2*(js_obc+G%jdg_offset-segment%HI%Jsgb)+1:2*(je_obc+G%jdg_offset-segment%HI%Jsgb)+1:2,:) + tmp_buffer(1,2*(js_obc+j_seg_offset)+1:2*(je_obc+j_seg_offset)+1:2,:) else segment%field(m)%buffer_src(is_obc,:,:) = & - tmp_buffer(1,2*(js_obc+G%jdg_offset-segment%HI%Jsgb)+1:2*(je_obc+G%jdg_offset-segment%HI%Jsgb):2,:) + tmp_buffer(1,2*(js_obc+j_seg_offset)+1:2*(je_obc+j_seg_offset):2,:) endif else if (segment%field(m)%name == 'U' .or. segment%field(m)%name == 'DUDY' .or. & segment%field(m)%name == 'Uamp' .or. segment%field(m)%name == 'Uphase') then segment%field(m)%buffer_src(:,js_obc,:) = & - tmp_buffer(2*(is_obc+G%idg_offset-segment%HI%Isgb)+1:2*(ie_obc+G%idg_offset-segment%HI%Isgb)+1:2,1,:) + tmp_buffer(2*(is_obc+i_seg_offset)+1:2*(ie_obc+i_seg_offset)+1:2,1,:) else segment%field(m)%buffer_src(:,js_obc,:) = & - tmp_buffer(2*(is_obc+G%idg_offset-segment%HI%Isgb)+1:2*(ie_obc+G%idg_offset-segment%HI%Isgb):2,1,:) + tmp_buffer(2*(is_obc+i_seg_offset)+1:2*(ie_obc+i_seg_offset):2,1,:) endif endif else @@ -3910,19 +3913,19 @@ subroutine update_OBC_segment_data(G, GV, US, OBC, tv, h, Time) if (segment%field(m)%name == 'V' .or. segment%field(m)%name == 'DVDX' .or. & segment%field(m)%name == 'Vamp' .or. segment%field(m)%name == 'Vphase') then segment%field(m)%buffer_src(is_obc,:,:) = & - tmp_buffer(1,js_obc+G%jdg_offset-segment%HI%Jsgb+1:je_obc+G%jdg_offset-segment%HI%Jsgb+1,:) + tmp_buffer(1,js_obc+j_seg_offset+1:je_obc+j_seg_offset+1,:) else segment%field(m)%buffer_src(is_obc,:,:) = & - tmp_buffer(1,js_obc+G%jdg_offset-segment%HI%Jsgb+1:je_obc+G%jdg_offset-segment%HI%Jsgb,:) + tmp_buffer(1,js_obc+j_seg_offset+1:je_obc+j_seg_offset,:) endif else if (segment%field(m)%name == 'U' .or. segment%field(m)%name == 'DUDY' .or. & segment%field(m)%name == 'Uamp' .or. segment%field(m)%name == 'Uphase') then segment%field(m)%buffer_src(:,js_obc,:) = & - tmp_buffer(is_obc+G%idg_offset-segment%HI%Isgb+1:ie_obc+G%idg_offset-segment%HI%Isgb+1,1,:) + tmp_buffer(is_obc+i_seg_offset+1:ie_obc+i_seg_offset+1,1,:) else segment%field(m)%buffer_src(:,js_obc,:) = & - tmp_buffer(is_obc+G%idg_offset-segment%HI%Isgb+1:ie_obc+G%idg_offset-segment%HI%Isgb,1,:) + tmp_buffer(is_obc+i_seg_offset+1:ie_obc+i_seg_offset,1,:) endif endif endif @@ -3949,40 +3952,36 @@ subroutine update_OBC_segment_data(G, GV, US, OBC, tv, h, Time) if (segment%is_E_or_W) then if (segment%field(m)%name == 'V' .or. segment%field(m)%name == 'DVDX') then segment%field(m)%dz_src(is_obc,:,:) = & - tmp_buffer(1,2*(js_obc+G%jdg_offset-segment%HI%Jsgb)+1:2*(je_obc+G%jdg_offset- & - segment%HI%Jsgb)+1:2,:) + tmp_buffer(1,2*(js_obc+j_seg_offset)+1:2*(je_obc+j_seg_offset)+1:2,:) else segment%field(m)%dz_src(is_obc,:,:) = & - tmp_buffer(1,2*(js_obc+G%jdg_offset-segment%HI%Jsgb)+1:2*(je_obc+G%jdg_offset- & - segment%HI%Jsgb):2,:) + tmp_buffer(1,2*(js_obc+j_seg_offset)+1:2*(je_obc+j_seg_offset):2,:) endif else if (segment%field(m)%name == 'U' .or. segment%field(m)%name == 'DUDY') then segment%field(m)%dz_src(:,js_obc,:) = & - tmp_buffer(2*(is_obc+G%idg_offset-segment%HI%Isgb)+1:2*(ie_obc+G%idg_offset- & - segment%HI%Isgb)+1:2,1,:) + tmp_buffer(2*(is_obc+i_seg_offset)+1:2*(ie_obc+i_seg_offset)+1:2,1,:) else segment%field(m)%dz_src(:,js_obc,:) = & - tmp_buffer(2*(is_obc+G%idg_offset-segment%HI%Isgb)+1:2*(ie_obc+G%idg_offset- & - segment%HI%Isgb):2,1,:) + tmp_buffer(2*(is_obc+i_seg_offset)+1:2*(ie_obc+i_seg_offset):2,1,:) endif endif else if (segment%is_E_or_W) then if (segment%field(m)%name == 'V' .or. segment%field(m)%name == 'DVDX') then segment%field(m)%dz_src(is_obc,:,:) = & - tmp_buffer(1,js_obc+G%jdg_offset-segment%HI%Jsgb+1:je_obc+G%jdg_offset-segment%HI%Jsgb+1,:) + tmp_buffer(1,js_obc+j_seg_offset+1:je_obc+j_seg_offset+1,:) else segment%field(m)%dz_src(is_obc,:,:) = & - tmp_buffer(1,js_obc+G%jdg_offset-segment%HI%Jsgb+1:je_obc+G%jdg_offset-segment%HI%Jsgb,:) + tmp_buffer(1,js_obc+j_seg_offset+1:je_obc+j_seg_offset,:) endif else if (segment%field(m)%name == 'U' .or. segment%field(m)%name == 'DUDY') then segment%field(m)%dz_src(:,js_obc,:) = & - tmp_buffer(is_obc+G%idg_offset-segment%HI%Isgb+1:ie_obc+G%idg_offset-segment%HI%Isgb+1,1,:) + tmp_buffer(is_obc+i_seg_offset+1:ie_obc+i_seg_offset+1,1,:) else segment%field(m)%dz_src(:,js_obc,:) = & - tmp_buffer(is_obc+G%idg_offset-segment%HI%Isgb+1:ie_obc+G%idg_offset-segment%HI%Isgb,1,:) + tmp_buffer(is_obc+i_seg_offset+1:ie_obc+i_seg_offset,1,:) endif endif endif From c9cda0d0640291e576da3803860228202d56e285 Mon Sep 17 00:00:00 2001 From: Alistair Adcroft Date: Fri, 15 Jul 2022 16:23:53 -0400 Subject: [PATCH 03/40] Fixes minor issues with ac/make process Where we had placed the dependency discovery (makedep) was leading to some small annoyances: - The dependency generation was being invoked twice - The dependency generation was not invoked when source code was changed. Now: - `./configure` creates config.status Makefile and Makefile.dep - `./config.status` recreates Makefile and Makefile.dep - `make depend` recreates Makefile.dep - `make` will conditionally recreate Makefile.dep Changes: - Add dependency of Makefile.dep on source code in Makefile.in. This allows `make` to know when to run `makedep`. - Fix syntax issue for AC_CONFIG_COMMANDS which caused errors to be issued without stopping. Side note: the system works perfectly well if we comment out the "AC_CONFIG_COMMANDS" line in configure.ac to invoke makedep. This is because `make` now knows how --- ac/Makefile.in | 6 +++++- ac/configure.ac | 2 +- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/ac/Makefile.in b/ac/Makefile.in index 599381a35b..930816bc8c 100644 --- a/ac/Makefile.in +++ b/ac/Makefile.in @@ -24,10 +24,14 @@ Makefile: @srcdir@/ac/Makefile.in config.status ./config.status +# Recursive wildcard (finds all files in $1 with suffixes in $2) +rwildcard=$(foreach d,$(wildcard $(1:=/*)),$(call rwildcard,$d,$2) $(filter $(subst *,%,$2),$d)) + + # Generate dependencies .PHONY: depend depend: Makefile.dep -Makefile.dep: +Makefile.dep: $(MAKEDEP) $(call rwildcard,$(SRC_DIRS),*.h *.c *.inc *.F90) $(MAKEDEP) -o Makefile.dep -e $(SRC_DIRS) diff --git a/ac/configure.ac b/ac/configure.ac index bf1cf11776..dc4962307e 100644 --- a/ac/configure.ac +++ b/ac/configure.ac @@ -231,7 +231,7 @@ AC_SUBST([MAKEDEP]) AC_SUBST([SRC_DIRS], ["${srcdir}/src ${MODEL_FRAMEWORK} ${srcdir}/config_src/external ${DRIVER_DIR} ${MEM_LAYOUT}"] ) -AC_CONFIG_COMMANDS([Makefile.dep], [make depend]) +AC_CONFIG_COMMANDS(Makefile.dep, [make depend]) # setjmp verification From 9928d9586815fb1e37cca50038f510a23e12e1c1 Mon Sep 17 00:00:00 2001 From: Marshall Ward Date: Tue, 26 Jul 2022 10:33:05 -0400 Subject: [PATCH 04/40] Refactoring of the .testing Makefile Primarily changes to improve coverage reporting, but some other changes as well: * New separate rules for `codecov` and `report.cov` * REPORT_COVERAGE was split into two flags: * DO_COVERAGE: Enable code coverage reporting * REQUIRE_COVERAGE_UPLOAD: If true, error if upload fails * We now report codecov upload failures in the log, but only raise an error (i.e. nonzero return code) if REQUIRE_COVERAGE_UPLOAD is true. * GitHub Actions now only reports failed uploads to CodeCov for pull requests. * Separate FCFLAGS_FMS flag to control FCFLAGS for FMS builds - also renamed the old FCFLAGS_FMS to FCFLAGS_DEPS, which was previously used to pass the flags to the FMS library/modfiles * $(DEPS) was dropped, and we just use `deps` now. * Updated the header instructions * README update * Codecov.io and GitHub Actions CIs were updated to support changes --- .codecov.yml | 4 - .github/workflows/coverage.yml | 30 ++- .testing/Makefile | 214 ++++++++++--------- .testing/README.md | 277 ------------------------ .testing/README.rst | 371 +++++++++++++++++++++++++++++++++ 5 files changed, 508 insertions(+), 388 deletions(-) delete mode 100644 .testing/README.md create mode 100644 .testing/README.rst diff --git a/.codecov.yml b/.codecov.yml index aa85b2b3ac..ae3b27aed3 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -8,7 +8,3 @@ coverage: default: threshold: 100% base: parent -comment: - # This is set to the number of TCs, plus unit, but can be removed - # (i.e. set to 1) when reporting is separated from coverage. - after_n_builds: 9 diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 1fc95e9127..e285b18c72 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -3,15 +3,15 @@ name: Code coverage on: [push, pull_request] jobs: - build-test-nans: + build-coverage: runs-on: ubuntu-latest defaults: run: working-directory: .testing - env: - REPORT_COVERAGE: true + #env: + # REQUIRE_COVERAGE_UPLOAD: true steps: - uses: actions/checkout@v2 @@ -26,10 +26,26 @@ jobs: run: make -j build/unit/MOM6 - name: Run unit tests - run: make unit.cov.upload + run: make run.cov.unit - - name: Compile MOM6 with code coverage + - name: Report unit test coverage to CI (PR) + if: github.event_name == 'pull_request' + run: make report.cov.unit REQUIRE_COVERAGE_UPLOAD=true + + - name: Report unit test coverage to CI (Push) + if: github.event_name != 'pull_request' + run: make report.cov.unit + + - name: Compile ocean-only MOM6 with code coverage run: make -j build/cov/MOM6 - - name: Run and post coverage - run: make run.cov -k -s + - name: Run coverage tests + run: make -j -k run.cov + + - name: Report coverage to CI (PR) + if: github.event_name == 'pull_request' + run: make report.cov REQUIRE_COVERAGE_UPLOAD=true + + - name: Report coverage to CI (Push) + if: github.event_name != 'pull_request' + run: make report.cov diff --git a/.testing/Makefile b/.testing/Makefile index 972c213032..bcfbc323d2 100644 --- a/.testing/Makefile +++ b/.testing/Makefile @@ -8,27 +8,24 @@ # Run the test suite, defined in the `tc` directores. # # make clean -# Wipe the MOM6 test executables -# (NOTE: This does not delete FMS in the `deps`) +# Delete the MOM6 test executables and dependency builds (FMS) +# +# make clean.build +# Delete only the MOM6 test executables # # # Configuration: # These settings can be provided as either command-line flags, or saved in a # `config.mk` file. # -# Experiment Configuration: -# BUILDS Executables to be built by `make` or `make all` -# CONFIGS Model configurations to test (default: `tc*`) -# TESTS Tests to run -# DIMS Dimensional scaling tests -# (NOTE: Each test will build its required executables, regardless of BUILDS) -# # General test configuration: -# FRAMEWORK Model framework (fms1 or fms2) # MPIRUN MPI job launcher (mpirun, srun, etc) +# FRAMEWORK Model framework (fms1 or fms2) # DO_REPRO_TESTS Enable production ("repro") testing equivalence # DO_REGRESSION_TESTS Enable regression tests (usually dev/gfdl) -# REPORT_COVERAGE Enable code coverage and generate reports +# DO_COVERAGE Enable code coverage and generate .gcov reports +# DO_PROFILE Enable performance profiler comparison tests +# REQUIRE_CODECOV_UPLOAD Abort as error if upload to codecov.io fails. # # Compiler configuration: # CC C compiler @@ -43,6 +40,16 @@ # FCFLAGS_OPT Aggressive optimization compiler flags # FCFLAGS_INIT Variable initialization flags # FCFLAGS_COVERAGE Code coverage flags +# FCFLAGS_FMS FMS build flags (default: FCFLAGS_DEBUG) +# +# LDFLAGS_COVERAGE Linker coverage flags +# LDFLAGS_USER User-defined linker flags (used for all MOM/FMS builds) +# +# Experiment Configuration: +# BUILDS Executables to be built by `make` or `make all` +# CONFIGS Model configurations to test (default: `tc*`) +# TESTS Tests to run +# DIMS Dimensional scaling tests # # Regression repository ("target") configuration: # MOM_TARGET_SLUG URL slug (minus domain) of the target repo @@ -78,32 +85,37 @@ export FC export MPIFC # Builds are distinguished by FCFLAGS -# NOTE: FMS will be built using FCFLAGS_DEBUG FCFLAGS_DEBUG ?= -g -O0 FCFLAGS_REPRO ?= -g -O2 FCFLAGS_OPT ?= -g -O3 -mavx -fno-omit-frame-pointer -FCFLAGS_COVERAGE ?= -g -O0 -fbacktrace --coverage FCFLAGS_INIT ?= +FCFLAGS_COVERAGE ?= -g -O0 -fbacktrace --coverage +FCFLAGS_FMS ?= $(FCFLAGS_DEBUG) # Additional notes: # - These default values are simple, minimalist flags, supported by nearly all -# compilers, and are comparable to GFDL's canonical DEBUG and REPRO builds. +# compilers, and are somewhat analogous to GFDL's DEBUG and REPRO builds. # -# - These flags should be configured outside of the Makefile, either with +# - These flags can be configured outside of the Makefile, either with # config.mk or as environment variables. -# -# - FMS cannot be built with the same aggressive initialization flags as MOM6, -# so FCFLAGS_INIT is used to provide additional MOM6 configuration. -# User-defined LDFLAGS (applied to all builds and FMS) LDFLAGS_COVERAGE ?= --coverage LDFLAGS_USER ?= # Set to `true` to require identical results from DEBUG and REPRO builds -# NOTE: Many compilers (Intel, GCC on ARM64) do not yet produce identical -# results across DEBUG and REPRO builds (as defined below), so we disable on +# NOTE: Many compilers (Intel, GCC on ARM64) do not produce identical results +# across DEBUG and REPRO builds (as defined below), so we disable on # default. DO_REPRO_TESTS ?= +# Enable profiling +DO_PROFILE ?= + +# Enable code coverage runs +DO_COVERAGE ?= + +# Report failure if coverage report is not uploaded +REQUIRE_COVERAGE_UPLOAD ?= + # Time measurement (configurable by the CI) TIME ?= time @@ -115,11 +127,6 @@ TESTS ?= grid layout rotate restart openmp nan $(foreach d,$(DIMS),dim.$(d)) DIMS ?= t l h z q r -#--- -# Dependencies -DEPS = deps - - #--- # Test configuration @@ -132,21 +139,18 @@ ifeq ($(DO_REPRO_TESTS), true) endif # Profiling -ifeq ($(DO_PROFILE), false) +ifeq ($(DO_PROFILE), true) BUILDS += opt opt_target endif -# Unit test testing -BUILDS += cov unit +# Unit testing +ifeq ($(DO_COVERAGE), true) + BUILDS += cov unit +endif +CODECOV_UPLOADER_URL ?= https://uploader.codecov.io/latest/linux/codecov + -# The following variables are configured by Travis: -# DO_REGRESSION_TESTS: true if $(TRAVIS_PULL_REQUEST) is a PR number -# MOM_TARGET_SLUG: TRAVIS_REPO_SLUG -# MOM_TARGET_LOCAL_BRANCH: TRAVIS_BRANCH -# These are set to true by our Travis configuration if testing a pull request DO_REGRESSION_TESTS ?= -REPORT_COVERAGE ?= -CODECOV_UPLOADER_URL ?= https://uploader.codecov.io/latest/linux/codecov ifeq ($(DO_REGRESSION_TESTS), true) BUILDS += target @@ -180,7 +184,7 @@ TARGET_SOURCE = $(call SOURCE,build/target_codebase/src) \ $(wildcard build/target_codebase/config_src/infra/FMS1/*.F90) \ $(wildcard build/target_codebase/config_src/drivers/solo_driver/*.F90) \ $(wildcard build/target_codebase/config_src/ext*/*.F90) -FMS_SOURCE = $(call SOURCE,$(DEPS)/fms/src) +FMS_SOURCE = $(call SOURCE,deps/fms/src) #--- @@ -224,22 +228,22 @@ BUILD_TARGETS = MOM6 Makefile path_names # .testing dependencies # TODO: We should probably build TARGET with the FMS that it was configured # to use. But for now we use the same FMS over all builds. -FCFLAGS_FMS = -I../../$(DEPS)/include -LDFLAGS_FMS = -L../../$(DEPS)/lib -PATH_FMS = PATH="${PATH}:../../$(DEPS)/bin" +FCFLAGS_DEPS = -I../../deps/include +LDFLAGS_DEPS = -L../../deps/lib +PATH_DEPS = PATH="${PATH}:../../deps/bin" # Define the build targets in terms of the traditional DEBUG/REPRO/etc labels -SYMMETRIC_FCFLAGS := FCFLAGS="$(FCFLAGS_DEBUG) $(FCFLAGS_INIT) $(FCFLAGS_FMS)" -ASYMMETRIC_FCFLAGS := FCFLAGS="$(FCFLAGS_DEBUG) $(FCFLAGS_INIT) $(FCFLAGS_FMS)" -REPRO_FCFLAGS := FCFLAGS="$(FCFLAGS_REPRO) $(FCFLAGS_FMS)" -OPT_FCFLAGS := FCFLAGS="$(FCFLAGS_OPT) $(FCFLAGS_FMS)" -OPENMP_FCFLAGS := FCFLAGS="$(FCFLAGS_DEBUG) $(FCFLAGS_INIT) $(FCFLAGS_FMS)" -TARGET_FCFLAGS := FCFLAGS="$(FCFLAGS_DEBUG) $(FCFLAGS_INIT) $(FCFLAGS_FMS)" -COV_FCFLAGS := FCFLAGS="$(FCFLAGS_COVERAGE) $(FCFLAGS_FMS)" +SYMMETRIC_FCFLAGS := FCFLAGS="$(FCFLAGS_DEBUG) $(FCFLAGS_INIT) $(FCFLAGS_DEPS)" +ASYMMETRIC_FCFLAGS := FCFLAGS="$(FCFLAGS_DEBUG) $(FCFLAGS_INIT) $(FCFLAGS_DEPS)" +REPRO_FCFLAGS := FCFLAGS="$(FCFLAGS_REPRO) $(FCFLAGS_DEPS)" +OPT_FCFLAGS := FCFLAGS="$(FCFLAGS_OPT) $(FCFLAGS_DEPS)" +OPENMP_FCFLAGS := FCFLAGS="$(FCFLAGS_DEBUG) $(FCFLAGS_INIT) $(FCFLAGS_DEPS)" +TARGET_FCFLAGS := FCFLAGS="$(FCFLAGS_DEBUG) $(FCFLAGS_INIT) $(FCFLAGS_DEPS)" +COV_FCFLAGS := FCFLAGS="$(FCFLAGS_COVERAGE) $(FCFLAGS_DEPS)" -MOM_LDFLAGS := LDFLAGS="$(LDFLAGS_FMS) $(LDFLAGS_USER)" -COV_LDFLAGS := LDFLAGS="$(LDFLAGS_COVERAGE) $(LDFLAGS_FMS) $(LDFLAGS_USER)" +MOM_LDFLAGS := LDFLAGS="$(LDFLAGS_DEPS) $(LDFLAGS_USER)" +COV_LDFLAGS := LDFLAGS="$(LDFLAGS_COVERAGE) $(LDFLAGS_DEPS) $(LDFLAGS_USER)" # Environment variable configuration @@ -291,7 +295,7 @@ build/%/MOM6: build/%/Makefile # Use autoconf to construct the Makefile for each target .PRECIOUS: $(foreach b,$(BUILDS),build/$(b)/Makefile) -build/%/Makefile: ../ac/configure ../ac/Makefile.in $(DEPS)/lib/libFMS.a +build/%/Makefile: ../ac/configure ../ac/Makefile.in deps/lib/libFMS.a mkdir -p $(@D) cd $(@D) \ && $(MOM_ENV) ../../../ac/configure $(MOM_ACFLAGS) --with-framework=$(FRAMEWORK) \ @@ -304,7 +308,7 @@ build/%/Makefile: ../ac/configure ../ac/Makefile.in $(DEPS)/lib/libFMS.a # Fetch the regression target codebase build/target/Makefile build/opt_target/Makefile: \ - $(TARGET_CODEBASE)/ac/configure $(DEPS)/lib/libFMS.a + $(TARGET_CODEBASE)/ac/configure deps/lib/libFMS.a mkdir -p $(@D) cd $(@D) \ && $(MOM_ENV) ../../$(TARGET_CODEBASE)/ac/configure $(MOM_ACFLAGS) \ @@ -324,32 +328,31 @@ $(TARGET_CODEBASE): # FMS # Set up the FMS build environment variables -FMS_ENV = PATH="${PATH}:$(realpath ../ac)" FCFLAGS="$(FCFLAGS_DEBUG)" +FMS_ENV = PATH="${PATH}:$(realpath ../ac)" FCFLAGS="$(FCFLAGS_FMS)" -# TODO: *.mod dependencies? -$(DEPS)/lib/libFMS.a: $(DEPS)/fms/build/libFMS.a - $(MAKE) -C $(DEPS) lib/libFMS.a +deps/lib/libFMS.a: deps/fms/build/libFMS.a + $(MAKE) -C deps lib/libFMS.a -$(DEPS)/fms/build/libFMS.a: $(DEPS)/fms/build/Makefile - $(MAKE) -C $(DEPS) fms/build/libFMS.a +deps/fms/build/libFMS.a: deps/fms/build/Makefile + $(MAKE) -C deps fms/build/libFMS.a -$(DEPS)/fms/build/Makefile: $(DEPS)/fms/src/configure $(DEPS)/Makefile.fms.in - $(FMS_ENV) $(MAKE) -C $(DEPS) fms/build/Makefile +deps/fms/build/Makefile: deps/fms/src/configure deps/Makefile.fms.in + $(FMS_ENV) $(MAKE) -C deps fms/build/Makefile -$(DEPS)/Makefile.fms.in: ../ac/deps/Makefile.fms.in $(DEPS)/Makefile - cp $< $(DEPS) +deps/Makefile.fms.in: ../ac/deps/Makefile.fms.in deps/Makefile + cp $< deps # TODO: m4 dependencies? -$(DEPS)/fms/src/configure: ../ac/deps/configure.fms.ac $(DEPS)/Makefile $(FMS_SOURCE) | $(DEPS)/fms/src - cp ../ac/deps/configure.fms.ac $(DEPS) - cp -r ../ac/deps/m4 $(DEPS) - $(MAKE) -C $(DEPS) fms/src/configure +deps/fms/src/configure: ../ac/deps/configure.fms.ac deps/Makefile $(FMS_SOURCE) | deps/fms/src + cp ../ac/deps/configure.fms.ac deps + cp -r ../ac/deps/m4 deps + $(MAKE) -C deps fms/src/configure -$(DEPS)/fms/src: $(DEPS)/Makefile - make -C $(DEPS) fms/src +deps/fms/src: deps/Makefile + make -C deps fms/src # Dependency init -$(DEPS)/Makefile: ../ac/deps/Makefile +deps/Makefile: ../ac/deps/Makefile mkdir -p $(@D) cp $< $@ @@ -362,15 +365,18 @@ $(DEPS)/Makefile: ../ac/deps/Makefile # TODO: # - Avoid re-building FMS and MOM6 src by re-using existing object/mod files # - Use autoconf rather than mkmf templates -MK_TEMPLATE ?= ../../$(DEPS)/mkmf/templates/ncrc-gnu.mk +MK_TEMPLATE ?= ../../deps/mkmf/templates/ncrc-gnu.mk + # NUOPC driver build/nuopc/mom_ocean_model_nuopc.o: build/nuopc/Makefile cd $(@D) && make $(@F) check_mom6_api_nuopc: build/nuopc/mom_ocean_model_nuopc.o + # GFDL coupled driver build/coupled/ocean_model_MOM.o: build/coupled/Makefile cd $(@D) && make $(@F) check_mom6_api_coupled: build/coupled/ocean_model_MOM.o + # MCT driver build/mct/mom_ocean_model_mct.o: build/mct/Makefile cd $(@D) && make $(@F) @@ -442,8 +448,8 @@ $(eval $(call CONFIG_RULE,tc3,grid)) # Color highlights for test results RED = \033[0;31m -YELLOW = \033[0;33m GREEN = \033[0;32m +YELLOW = \033[0;33m MAGENTA = \033[0;35m RESET = \033[0m @@ -544,7 +550,6 @@ $(foreach c,$(CONFIGS),$(eval $(call CONFIG_DIM_RULE,$(c)))) define STAT_RULE work/%/$(1)/ocean.stats work/%/$(1)/chksum_diag: build/$(2)/MOM6 $(VENV_PATH) @echo "Running test $$*.$(1)..." - if [ $(3) ]; then find build/$(2) -name *.gcda -exec rm -f '{}' \; ; fi mkdir -p $$(@D) cp -RL $$*/* $$(@D) if [ -f $$(@D)/Makefile ]; then \ @@ -572,19 +577,30 @@ work/%/$(1)/ocean.stats work/%/$(1)/chksum_diag: build/$(2)/MOM6 $(VENV_PATH) cd build/$(2) ; \ gcov -b *.gcda > gcov.$$*.$(1).out ; \ curl -s $(CODECOV_UPLOADER_URL) -o codecov ; \ - chmod +x codecov ; \ - ./codecov -R . -Z -f "*.gcov" -n $$@ \ - > codecov.$$*.$(1).out \ - 2> codecov.$$*.$(1).err \ - && echo -e "${MAGENTA}Report uploaded to codecov.${RESET}"; \ fi endef +# Upload coverage reports +codecov: + curl -s $(CODECOV_UPLOADER_URL) -o $@ + chmod +x codecov + +.PHONY: report.cov +report.cov: run.cov codecov + ./codecov -R build/cov -Z -f "*.gcov" \ + > build/cov/codecov.out \ + 2> build/cov/codecov.err \ + && echo -e "${MAGENTA}Report uploaded to codecov.${RESET}" \ + || { \ + echo -e "${RED}Failed to upload report.${RESET}" ; \ + if [ "$(REQUIRE_COVERAGE_UPLOAD)" = true ] ; then false ; fi ; \ + } + # Define $(,) as comma escape character , := , -$(eval $(call STAT_RULE,symmetric,symmetric,$(REPORT_COVERAGE),,,1)) +$(eval $(call STAT_RULE,symmetric,symmetric,,,,1)) $(eval $(call STAT_RULE,asymmetric,asymmetric,,,,1)) $(eval $(call STAT_RULE,target,target,,,,1)) $(eval $(call STAT_RULE,repro,repro,,,,1)) @@ -599,7 +615,7 @@ $(eval $(call STAT_RULE,dim.z,symmetric,,Z_RESCALE_POWER=11,,1)) $(eval $(call STAT_RULE,dim.q,symmetric,,Q_RESCALE_POWER=11,,1)) $(eval $(call STAT_RULE,dim.r,symmetric,,R_RESCALE_POWER=11,,1)) -$(eval $(call STAT_RULE,cov,cov,$(REPORT_COVERAGE),,,1)) +$(eval $(call STAT_RULE,cov,cov,true,,,1)) # Generate the half-period input namelist as follows: # 1. Fetch DAYMAX and TIMEUNIT from MOM_input @@ -678,8 +694,9 @@ test.summary: #--- # unit test -.PHONY: unit.cov -unit.cov: build/unit/MOM_new_unit_tests.gcov +# NOTE: Using file parser gcov report as a proxy for test completion +.PHONY: run.cov.unit +run.cov.unit: build/unit/MOM_file_parser_tests.F90.gcov work/unit/std.out: build/unit/MOM_unit_tests if [ $(REPORT_COVERAGE) ]; then \ @@ -700,31 +717,27 @@ work/unit/std.out: build/unit/MOM_unit_tests cat p2.std.err | tail -n 100 ; \ ) -build/unit/codecov: - mkdir -p $(@D) - cd $(@D) \ - && curl -s $(CODECOV_UPLOADER_URL) -o $(@F) - chmod +x $@ - # Use driver coverage file as a proxy for the run -# TODO: Replace work/unit/std.out with *.gcda? -build/unit/MOM_new_unit_tests.gcov: work/unit/std.out - mkdir -p $(@D) +# NOTE: .gcov actually depends on .gcda, but .gcda is produced with std.out +build/unit/MOM_file_parser_tests.F90.gcov: work/unit/std.out cd $(@D) \ && gcov -b *.gcda > gcov.unit.out -# Use driver coverage file as a proxy for the run -.PHONY: unit.cov.upload -unit.cov.upload: build/unit/MOM_new_unit_tests.gcov build/unit/codecov - cd build/unit \ - && ./codecov -R . -Z -f "*.gcov" -n "Unit tests" \ - > codecov.unit.out \ - 2> codecov.unit.err \ - && echo -e "${MAGENTA}Report uploaded to codecov.${RESET}" +.PHONY: report.cov.unit +report.cov.unit: build/unit/MOM_file_parser_tests.F90.gcov codecov + ./codecov -R build/unit -f "*.gcov" -Z -n "Unit tests" \ + > build/unit/codecov.out \ + 2> build/unit/codecov.err \ + && echo -e "${MAGENTA}Report uploaded to codecov.${RESET}" \ + || { \ + echo -e "${RED}Failed to upload report.${RESET}" ; \ + if [ $(REQUIRE_COVERAGE_UPLOAD) = "true" ] ; then false ; fi ; \ + } + #--- -# Profiling -# XXX: This is experimental work to track, log, and report changes in runtime +# Profiling based on FMS clocks + PCONFIGS = p0 .PHONY: profile @@ -748,8 +761,9 @@ work/p0/%/std.out: cd $(@D) \ && $(MPIRUN) -n 1 ../../../$< 2> std.err > std.out + #--- -# Same but with perf +# Profiling based on perf output # TODO: This expects the -e flag, can I handle it in the command? PERF_EVENTS ?= diff --git a/.testing/README.md b/.testing/README.md deleted file mode 100644 index ef02bcfa09..0000000000 --- a/.testing/README.md +++ /dev/null @@ -1,277 +0,0 @@ -# .testing - -This directory contains the Makefile and test configurations used to evaluate -submissions to the MOM6 codebase. The tests are designed to run either locally -or in a CI environment such as Travis. - - -## Overview - -This section gives a very brief overview of the test suite and how to use it. - -To build and run the model tests: -``` -make -j -make -j test -``` -For new users, the default configuration should be suitable for most platforms. -If not, then the following options may need to be configured. - -`MPIRUN` (*default:* `mpirun`) - - Name of the MPI launcher. Often this is `mpirun` or `mpiexec` but may all - need to run through a scheduler, e.g. `srun` if using Slurm. - -`DO_REGRESSION_TESTS` (*default: none*) - - Set to `true` to compare output with `dev/gfdl`. - -`DO_REPRO_TESTS` (*default: none*) - - Set to `true` to compare DEBUG and REPRO builds, which typically correspond - to unoptimized and optimized builds. See TODO for more information. - -These settings can either be specified at the command line, as shown below -``` -make DO_REGRESSION_TESTS=true -make test DO_REGRESSION_TESTS=true -``` -or saved in a configuration file, `config.mk`. - -To run individual classes of tests, use the subclass name: -``` -make test.grids -make test.layouts -make DO_REGRESSION_TESTS=true test.regressions -``` -To test an individual test configuration (TC): -``` -make tc0.grid -``` -See "Tests" and "Test Configurations" for the complete list of tests. - -The rest of the document describes the test suite in more detail, including -names and descriptions of the test classes and configurations. - - -## Testing overview - -The test suite checks for numerical consistency of the model output across -different model configurations when subjected to relevant numerical and -mathematical transformations, such as grid layout or dimensional rescaling. If -the model state is unchanged after each transformation, then the test is -reported as passing. Any discrepancy in the model state causes the test to -fail. - -Model state is currently defined by the `ocean.stats` output file, which -reports the total energy (per unit mass) at machine precision alongside similar -global metrics at lower precision, such as mass or mean sea level. - -Diagnostics are based on the MOM checksum function, which includes the mean, -minimum, and maximum values, alongside a bitcount checksum, in the physical -domain, which are saved in the `chksum_diag` output file. - - -## Build configuration - -The test suite defines a DEBUG and a REPRO build, which resemble targets used -at GFDL. The DEBUG build is intended for detecting potential errors and -troubleshooting, while the REPRO build has typically been optimized for -production runs. - -Ideally, the DEBUG and REPRO runs will produce identical results, although this -is often not the case for many compilers and platforms. The `DO_REPRO_TEST` -flag is used to test DEBUG/REPRO equivalency. - -The following options are provided to configure your compiler flags. - -`FCFLAGS_DEBUG` (*default:* `-g -O0`) - - Specify the flags used in the DEBUG build. These are the flags used for all - tests excepting the REPRO builds. They are also used to build the FMS - library. - - These should be used to enable settings favorable to debugging, such as no - optimizations, backtraces, range checking, and warnings. - - For more aggressive debugging flags which cannot be used with FMS, see - `FCFLAGS_INIT`. - -`FCFLAGS_REPRO:` (*default:* `-g -O2`) - - Specify the optimized reproducible run, typically used in production runs. - - Ideally, this should consist of optimization flags which improve peformance - but do not change model output. In practice, this is difficult to achieve, - and should only used in certain environments. - -`FCFLAGS_INIT` (*default: none*) - - This flag was historically used to specify variable initialization, such as - nonzero integers or floating point values, and is still generally used for - this purpose. - - As implemented, it is used for all MOM6 builds. It is not used for FMS - builds, so can also act as a debugging flag independent of FMS. - -`FCFLAGS_COVERAGE` (*default: none*) - - This flag is used to define a build which supports some sort of code - coverage, often one which is handled by the CI. - - For many compilers, this is set to `--coverage`, and is applied to both the - compiler (`FCFLAGS`) and linker (`LDFLAGS`). - -Example values used by GFDL and Travis for the GFortran compiler are shown -below. -``` -FCFLAGS_DEBUG="-g -O0 -Wextra -Wno-compare-reals -fbacktrace -ffpe-trap=invalid,zero,overflow -fcheck=bounds" -FCFLAGS_REPRO="-g -O2 -fbacktrace" -FCFLAGS_INIT="-finit-real=snan -finit-integer=2147483647 -finit-derived" -FCFLAGS_COVERAGE="--coverage" -``` - -Note that the default values for these flags are very minimal, in order to -ensure compatibility over the largest possible range of compilers. - -Like all configuration variables, these can be specified in a `config.mk` file. - - -## Building the executables - -Run `make` to build the test executables. -``` -make -``` -This will fetch the MKMF build toolchain, fetch and compile the FMS framework -library, and compile the executables used in the test suite. The default -configuration uses the symmetric grid in the debug-compile mode, with -optimizations disabled and stronger quality controls. The following -executables will be created. - -- `build/symmetric/MOM6`: Symmetric grid configuration (i.e. extended grids - along western and/or southern boundaries for selected fields). This is the - default configuration. - -- `build/asymmetric/MOM6`: Non-symmetric grid (equal-sized grids) - -- `build/repro/MOM6`: Optimized reproducible mode - -- `build/target/MOM6`: A reference build for regression testing - -- `build/openmp/MOM6`: OpenMP-enabled build - -The `target` and `repro` builds are only created when their respective tests -are set to `true`. - - -### Regression testing - -When regression tests are enabled, the Makefile will check out a second copy of -the repository from a specified URL and branch given by `MOM_TARGET_URL` and -`MOM_TARGET_BRANCH`, respectively. The code is checked out into the -`TARGET_CODEBASE` directory. - -The default settings, with resolved values as comments, are shown below. -``` -MOM_TARGET_SLUG = NOAA-GFDL/MOM6 -MOM_TARGET_URL = https://github.com/$(MOM_TARGET_SLUG) - #= https://github.com/NOAA-GFDL/MOM6 -MOM_TARGET_LOCAL_BRANCH = dev/gfdl -MOM_TARGET_BRANCH = origin/$(MOM_TARGET_LOCAL_BRANCH) - #= origin/dev/gfdl -TARGET_CODEBASE = $(BUILD)/target_codebase -``` -These default values can be configured to target a particular development -branch. - -Currently the target can only be specifed by branch name, rather than hash. - -New diagnostics do not report as a fail, and are not tracked by any CIs, but -the test will report a warning to the user. - - -## Tests - -Using `test` will run through the full test suite. -``` -make test -``` -The tests are gathered into the following groups. - -- `test.regressions`: Regression tests relative to a code state (when enabled) -- `test.grids`: Symmetric vs nonsymmetric grids -- `test.layouts`: Domain decomposition, based on parallelization -- `test.restarts`: Resubmission by restarts -- `test.repros`: Optimized (REPRO) and unoptimized (DEBUG) compilation -- `test.nans`: NaN initialization of allocated arrays -- `test.dims`: Dimensional scaling (length, time, thickness, depth) - -Each group of tests can also be run individually, such as in the following -example. -``` -make test.grids -``` - -Each configuration is tested relative to the `symmetric` build, and reports a -fail if the answers differ from this build. - - -## Test configurations - -The following model test configurations (TCs) are supported, and are based on -configurations in the MOM6-examples repository. - -- `tc0`: Unit testing of various model components, based on `unit_tests` -- `tc1`: A low-resolution version of the `benchmark` configuration - - `tc1.a`: Use the un-split mode with Runge-Kutta 3 time integration - - `tc1.b`: Use the un-split mode with Runge-Kutta 2 time integration -- `tc2`: An ALE configuration based on tc1 with tides - - `tc2.a`: Use sigma, PPM_H4 and no tides -- `tc3`: An open-boundary condition (OBC) test based on `circle_obcs` -- `tc4`: Sponges and initialization using I/O - - -## Code coverage - -Code coverage reports the lines of code which have been tested, and can be used -to determine if a particular section is untested. - -Coverage is measured using `gcov` and is reported for TCs using the `symmetric` -executable. - -Coverage reporting is optionally uploaded to the `codecov.io` site. -``` -https://codecov.io/gh/NOAA-GFDL/MOM6 -``` -This is disabled on default, but can be enabled by the `REPORT_COVERAGE` flag. -``` -make test REPORT_COVERAGE=true -``` -Note that any uploads will require a valid CodeCov token. - - -## Running on Travis - -Whenever code is pushed to GitHub or a pull request (PR) is created, the test -suite is triggered and the code changes are tested. - -When the tests are run on Travis, the following variables are re-defined: - -- `DO_REPRO_TESTS` is set to `true` for all tests. - -- `DO_REGRESSION_TESTS` is set to `true` for a PR submission, and is unset for - code pushes. - -- `MOM_TARGET_SLUG` is set to `TRAVIS_REPO_SLUG`, the URL stub of the model to - be built. - - For submissions to NOAA-GFDL, this will be set to `NOAA-GFDL/MOM6` and the - reference URL will be `https://github.com/NOAA-GFDL/MOM6`. - -- `MOM_TARGET_LOCAL_BRANCH` is set to `TRAVIS_BRANCH`. - - For a code push, this is set to the name of the active branch at GitHub. For - a PR, this is the name of the branch which is receiving the PR. - -- `REPORT_COVERAGE` is set to `true`. diff --git a/.testing/README.rst b/.testing/README.rst new file mode 100644 index 0000000000..5bab076707 --- /dev/null +++ b/.testing/README.rst @@ -0,0 +1,371 @@ +=============== +MOM6 Test Suite +=============== + +This directory contains test configurations used to evaluate submissions to the +MOM6 codebase. The tests are designed to run either locally or in a CI +environment. + + +Usage +===== + +``make -j`` + Build the FMS library and test executables. + +``make -j test`` + Run the test suite, defined in the ``tc`` directores. + +``make clean.build`` + Delete only the MOM6 test executables. + +``make clean`` + Delete the MOM6 test executables and dependency builds (FMS). + + +Configuration +============= + +The test suite includes many configuration flags and variables which can be +configured at either the command line, or can be stored in a ``config.mk`` +file. + +Several of the following may require configuration for particular systems. + +``MPIRUN`` (*default:* ``mpirun``) + Name of the MPI launcher. Often this is ``mpirun`` or ``mpiexec`` but may + all need to run through a scheduler, e.g. ``srun`` if using Slurm. + +``FRAMEWORK`` (*default:* ``fms1``) + Select either the legacy FMS framework (``fms1``) or an FMS2 I/O compatible + version (``fms2``). + +``DO_REPRO_TESTS`` (*default:* *none*) + Set to ``true`` to test the REPRO build and confirm equivalence of DEBUG and + REPRO builds. + + For compilers with aggressive optimization, DEBUG and REPRO may not produce + identical results and this test should not be used. + +``DO_REGRESSION_TESTS`` (*default:* *none*) + Set to ``true`` to compare output with a defined target branch, set by + ``MOM_TARGET_LOCAL_BRANCH``. (NOTE: This defaults to ``dev/gfdl``). + +``DO_COVERAGE`` (*default:* *none*) + Set to ``true`` to enable code coverage. Currently only configured for + ``gcov``. + +``REQUIRE_COVERAGE_UPLOAD`` (*default:* *none*) + Set to ``true`` if failure to upload the coverage report to codecov.io + should result in an error. This should only be enabled if codecov.io has + already been configured for the user, or by a supporting CI. + +``DO_PROFILE`` (*default:* *none*) + Set to ``true`` to enable performance profile monitoring. Models are + compiled using ``OPT_FCFLAGS`` (see below) and performance of various + functions are reported and compared to the target branch. + + Results from these tests should only be considered if the platform has been + configure for benchmarking. + + +Build configuration +------------------- + +Compilation is controlled with the following variables. Defaults are chosen +for the widest compatibility across platforms. Users should modify these to +reflect their own needs. + +``FCFLAGS_DEBUG`` (*default:* ``-g -O0``) + The "DEBUG" build, for rapid compilation and debugging. + +``FCFLAGS_REPRO`` (*default:* ``-g -O2``) + The "REPRO" build, for reproducible production runs. + +``FCFLAGS_OPT`` (*default:* ``-g -O3``) + The "OPT" build, for aggressive optimization and profiling. + +``FCFLAGS_COVERAGE`` (*default:* ``-g -O0 -fbacktrace --coverage``) + Flags used for producing code coverage reports. Defaults are for gcc, + although ``--coverage`` is relatively common across compilers. + +``FCFLAGS_INIT`` (*default:* *none*) + A placeholder flag for aggressive initialization testing. This is appended + to existing flags (usually ``FCFLAGS_DEBUG``). + +``FCFLAGS_FMS`` (*default:* ``FCFLAGS_DEBUG``) + Compiler flags used for the supporting FMS library. In most cases, it is + sufficient to use ``FCFLAGS_DEBUG``. + +``LDFLAGS_COVERAGE`` (*default:* ``--coverage``) + Linker flags to enable coverage. + +``LDFLAGS_USER`` (*default:* *none*) + A placeholder for supplemental linker flags, such as an external library not + configured by autoconf. + +The following flags are passed as environment variables to other Makefiles. + +``FC``, ``MPIFC`` + The Fortran compiler and its MPI wrapper. + +``CC``, ``MPICC`` + The C compiler and its MPI wrapper. This is primarily used by FMS, but may + be used in some MOM6 autoconf tests. + +If unset, these will be configured by autoconf or from the user's environment +variables. + +Additional settings for particular tasks are explained below. + + +Example ``config.mk`` +--------------------- + +An example config.mk file configured for GFortran is shown below.:: + + DO_REPRO_TESTS = true + DO_REGRESSION_TESTS = true + DO_COVERAGE = true + DO_PROFILE = true + + FCFLAGS_DEBUG = -g -O0 -Wextra -Wno-compare-reals -fbacktrace -ffpe-trap=invalid,zero,overflow -fcheck=bounds + FCFLAGS_REPRO = -g -O2 -fbacktrace + FCFLAGS_OPT = -g -O3 -mavx -mfma + FCFLAGS_INIT = -finit-real=snan -finit-integer=2147483647 -finit-derived + FCFLAGS_COVERAGE = --coverage + +The file follows Makefile syntax, so quotations are generally not required and +spaces are permitted between assignment operators (``=``). + + +Builds +====== + +Run ``make`` to build the test executables.:: + + $ make + +This will fetch external dependencies, compile the FMS framework library, and +compile the executables used in the test suite. + +The following executables will be created. + +``build/symmetric/MOM6`` + Use symmetric grids for model fields, using DEBUG flags. + + A symmetric grid is one where each finite-volume cell has grid points along + all faces. Often this results in a redundant row of points along each side + of a regular domain. + + This is the recommended production configuration, and is the reference build + for all tests in the suite. + +``build/asymmetric/MOM6`` + Use asymmetric grids for model fields. + + Asymmetric grids eliminate a redundant fields along western and southern + boundaries, which reduces the total number of points. They also ensure + that center, face, and vertex field arrays are the same size. + + The disadvantages are greater computational complexity along these + boundaries. They also do not support open boundary conditions. + + Asymmetric grids were traditionally used in many legacy ocean models. + +``build/repro/MOM6`` + Optimized build for doing reproducible runs, based REPRO flags. + + This is only built if ``DO_REPRO_TESTS`` is set to ``true``. + +``build/target/MOM6`` + A reference build for regression testing. + + The reference branch is set by ``MOM_TARGET_LOCAL_BRANCH``. This would + generally be configured by a CI to a pull request's target branch. This is + only built if ``DO_REGRESSION_TESTS`` is set to ``true``. + +``build/openmp/MOM6`` + A DEBUG build with OpenMP enabled. + + +Tests +===== + +The ``test`` rule will run all of the tests.:: + + $ make test + +Tests are based on configurations which are designed to give identical output. +When the output differs, the test reports a failure. + + +Test groups +----------- + +The tests are gathered into the following groups. + +``test.grid`` + Compare symmetric and nonsymmetric grids. + +``test.regression`` + Compare the current codebase to a target branch (e.g. ``dev/gfdl``). + +``test.layout`` + Compare a serial (one domain) and a parallel (two domain) simulation. + +``test.restart`` + Compare a single run to two runs separated by a restart. + +``test.repro`` + Compare the unoptimized (DEBUG) and optimized (REPRO) builds. + +``test.nan`` + Enable NaN-initialization of allocated (heap) arrays. + + This relies on internal features of glibc and may not work on other + platforms. + +``test.dim`` + Enable dimension rescaling tests. + +Each tests uses the symmetric build for its reference state. + +These rules can be used to run individual groups of tests.:: + + $ make test.grid + + +Test experiments +---------------- + +For each group, we test each of the following configurations, which represent +idealizations of various production experiments. + +``tc0`` + Unit testing of various model components, based on ``unit_tests`` + +``tc1`` + A low-resolution version of the ``benchmark`` configuration + + ``tc1.a`` + Use the un-split mode with Runge-Kutta 3 time integration + + ``tc1.b`` + Use the un-split mode with Runge-Kutta 2 time integration + +``tc2`` + An ALE configuration based on tc1 with tides + + ``tc2.a`` + Use sigma, PPM_H4 and no tides + +``tc3`` + An open-boundary condition (OBC) test based on ``circle_obcs`` + +``tc4`` + Sponges and initialization using I/O + + +Test procedure +-------------- + +The test suite checks for numerical consistency of the model output across +different model configurations when subjected to relevant numerical and +mathematical transformations, such as grid layout or dimensional rescaling. If +the model state is unchanged after each transformation, then the test is +reported as passing. Any discrepancy in the model state causes the test to +fail. + +Model state is currently defined by the ``ocean.stats`` output file, which +reports the total energy (per unit mass) at machine precision alongside similar +global metrics at lower precision, such as mass or mean sea level. + +Diagnostics are based on the MOM checksum function, which includes the mean, +minimum, and maximum values, alongside a bitcount checksum, in the physical +domain, which are saved in the ``chksum_diag`` output file. + + +Regression testing +================== + +When ``DO_REGRESSION_TESTS`` is enabled, the Makefile will check out a second +copy of the repository from a specified URL and branch given by +``MOM_TARGET_URL`` and ``MOM_TARGET_BRANCH``, respectively. The code is +checked out into the ``TARGET_CODEBASE`` directory. + +The default settings, with resolved values as comments, are shown below.:: + + MOM_TARGET_SLUG = NOAA-GFDL/MOM6 + MOM_TARGET_URL = https://github.com/$(MOM_TARGET_SLUG) + #= https://github.com/NOAA-GFDL/MOM6 + MOM_TARGET_LOCAL_BRANCH = dev/gfdl + MOM_TARGET_BRANCH = origin/$(MOM_TARGET_LOCAL_BRANCH) + #= origin/dev/gfdl + TARGET_CODEBASE = $(BUILD)/target_codebase + +These default values can be configured to target a particular development +branch. + +Currently the target can only be specified by branch name, rather than hash. + +New diagnostics do not report as a fail, and are not tracked by any CIs, but +the test will report a warning to the user. + + +Code coverage +============= + +Code coverage reports the lines of code which have been tested, and can be used +to determine if a particular section is untested. + +To enable code coverage, set ``DO_COVERAGE`` to ``true``. + +Reports are stored in the build directories. There is one report per source +file, and each ends in the ``.gcov`` suffix. Two sets of coverage reports are +generated. + +``build/cov`` + Test suite code coverage + +``build/unit`` + Unit test code coverage + +To upload the tests to codecov.io, use the following rules.:: + + $ make report.cov # Test suite + $ make report.cov.unit # Unit test + +Note that any uploads will require a valid CodeCov token. If uploading through +the CI, this can be set up through your GitHub account. + +Pull request coverage reports for the CI can be checked at +https://codecov.io/gh/NOAA-GFDL/MOM6 + + +CI configuration +================ + +Whenever code is pushed to GitHub or a pull request (PR) is created, the test +suite is run. + +When the tests are run on the CI, the following variables are re-defined: + +- ``DO_REPRO_TESTS`` is set to ``true`` for all tests. + +- ``DO_REGRESSION_TESTS`` is set to ``true`` for a PR submission, and is unset for + code pushes. + +- ``DO_COVERAGE`` is set to ``true``. + + - For pull requests, ``REQUIRE_COVERAGE_UPLOAD`` is set to ``true``. + +- ``MOM_TARGET_SLUG`` is set to the URL stub of the model to be built. + + For submissions to NOAA-GFDL, this will be set to ``NOAA-GFDL/MOM6`` and the + reference URL will be ``https://github.com/NOAA-GFDL/MOM6``. + +- ``MOM_TARGET_LOCAL_BRANCH`` + + For a code push, this is set to the name of the active branch at GitHub. For + a PR, this is the name of the branch which is receiving the PR. From 3b9cbcdb675d01e3e75376e349da900d33593089 Mon Sep 17 00:00:00 2001 From: Marshall Ward Date: Wed, 27 Jul 2022 14:11:21 -0400 Subject: [PATCH 05/40] Remove asterisk from partial coverage Not sure if this is wise, but codecov is severely screwing up these entries. --- .testing/Makefile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.testing/Makefile b/.testing/Makefile index bcfbc323d2..6879b04f84 100644 --- a/.testing/Makefile +++ b/.testing/Makefile @@ -576,6 +576,7 @@ work/%/$(1)/ocean.stats work/%/$(1)/chksum_diag: build/$(2)/MOM6 $(VENV_PATH) mkdir -p results/$$* ; \ cd build/$(2) ; \ gcov -b *.gcda > gcov.$$*.$(1).out ; \ + find -name "*.gcov" -exec sed -i -r 's/^( *[0-9]*)\*:/ \1:/g' {} \; ; \ curl -s $(CODECOV_UPLOADER_URL) -o codecov ; \ fi endef @@ -722,6 +723,7 @@ work/unit/std.out: build/unit/MOM_unit_tests build/unit/MOM_file_parser_tests.F90.gcov: work/unit/std.out cd $(@D) \ && gcov -b *.gcda > gcov.unit.out + find $(@D) -name "*.gcov" -exec sed -i -r 's/^( *[0-9]*)\*:/ \1:/g' {} \; .PHONY: report.cov.unit report.cov.unit: build/unit/MOM_file_parser_tests.F90.gcov codecov From 8b609d071fbe8505399960f2e93405ca9f182506 Mon Sep 17 00:00:00 2001 From: Alistair Adcroft Date: Mon, 25 Jul 2022 15:38:46 -0400 Subject: [PATCH 06/40] Facilitate multiple execs in .testing/Makefile Updates .testing/Makefile to allow for alternative executable names and multiple executables in one build directory. Until now, the .testing/Makefile assumes all executables are called "MOM6". With the introduction of makedep, executables are called by the name indicated by the Fortran program statement. Changes: - BUILDS used to be a list of directories under build/ . Now is a list of /. - UNIT_EXECS lists possible executables within build/unit . This list allows us to override targets at the command-line. - Replaces many $(foreach b,$(BUILDS), ...) constructs in favor of build/%/... rules. i.e. reduces use of $(BUILDS) in general - Corrected name of executable in build/unit Co-authored-by: Marshall Ward --- .github/workflows/coverage.yml | 5 +---- .testing/Makefile | 40 ++++++++++++++++++---------------- 2 files changed, 22 insertions(+), 23 deletions(-) diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index e285b18c72..358d48a7a7 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -10,9 +10,6 @@ jobs: run: working-directory: .testing - #env: - # REQUIRE_COVERAGE_UPLOAD: true - steps: - uses: actions/checkout@v2 with: @@ -23,7 +20,7 @@ jobs: - uses: ./.github/actions/testing-setup - name: Compile unit testing - run: make -j build/unit/MOM6 + run: make -j build/unit/MOM_unit_tests - name: Run unit tests run: make run.cov.unit diff --git a/.testing/Makefile b/.testing/Makefile index 6879b04f84..150a365692 100644 --- a/.testing/Makefile +++ b/.testing/Makefile @@ -121,7 +121,7 @@ TIME ?= time # Experiment configuration -BUILDS ?= symmetric asymmetric openmp +BUILDS ?= symmetric/MOM6 asymmetric/MOM6 openmp/MOM6 CONFIGS ?= $(wildcard tc*) TESTS ?= grid layout rotate restart openmp nan $(foreach d,$(DIMS),dim.$(d)) DIMS ?= t l h z q r @@ -134,26 +134,29 @@ DIMS ?= t l h z q r # the DEBUG results in older GCCs and vendor compilers, so we can optionally # disable them. ifeq ($(DO_REPRO_TESTS), true) - BUILDS += repro + BUILDS += repro/MOM6 TESTS += repro endif # Profiling ifeq ($(DO_PROFILE), true) - BUILDS += opt opt_target + BUILDS += opt/MOM6 opt_target/MOM6 endif # Unit testing +UNIT_EXECS ?= MOM_unit_tests ifeq ($(DO_COVERAGE), true) - BUILDS += cov unit + BUILDS += cov/MOM6 $(foreach e, $(UNIT_EXECS), unit/$(e)) +endif + +ifeq ($(DO_PROFILE), false) + BUILDS += opt/MOM6 opt_target/MOM6 endif -CODECOV_UPLOADER_URL ?= https://uploader.codecov.io/latest/linux/codecov DO_REGRESSION_TESTS ?= - ifeq ($(DO_REGRESSION_TESTS), true) - BUILDS += target + BUILDS += target/MOM6 TESTS += regression MOM_TARGET_SLUG ?= NOAA-GFDL/MOM6 @@ -214,13 +217,12 @@ endif # Rules .PHONY: all build.regressions build.prof -all: $(foreach b,$(BUILDS),build/$(b)/MOM6) $(VENV_PATH) +all: $(foreach b,$(BUILDS),build/$(b)) $(VENV_PATH) build.regressions: $(foreach b,symmetric target,build/$(b)/MOM6) build.prof: $(foreach b,opt opt_target,build/$(b)/MOM6) # Executable -BUILD_TARGETS = MOM6 Makefile path_names -.PRECIOUS: $(foreach b,$(BUILDS),$(foreach f,$(BUILD_TARGETS),build/$(b)/$(f))) +.PRECIOUS: $(foreach b,$(BUILDS),build/$(b)) # Compiler flags @@ -283,18 +285,18 @@ build/opt_target/Makefile: | $(TARGET_CODEBASE) # NOTE: ./configure is too much, but Makefile is not enough! # Ideally we only want to re-run both Makefile and mkmf, but the mkmf call # is inside ./configure, so we must re-run ./configure as well. -$(foreach b,$(filter-out target,$(BUILDS)),build/$(b)/Makefile): $(MOM_SOURCE) build/target_codebase/configure: $(TARGET_SOURCE) -# Build MOM6 -.PRECIOUS: $(foreach b,$(BUILDS),build/$(b)/MOM6) -build/%/MOM6: build/%/Makefile +# Build executables +$(foreach e,$(UNIT_EXECS),build/unit/$(e)): build/unit/Makefile $(MOM_SOURCE) + cd $(@D) && $(TIME) $(MAKE) -j +build/%/MOM6: build/%/Makefile $(MOM_SOURCE) cd $(@D) && $(TIME) $(MAKE) -j # Use autoconf to construct the Makefile for each target -.PRECIOUS: $(foreach b,$(BUILDS),build/$(b)/Makefile) +.PRECIOUS: build/%/Makefile build/%/Makefile: ../ac/configure ../ac/Makefile.in deps/lib/libFMS.a mkdir -p $(@D) cd $(@D) \ @@ -577,12 +579,12 @@ work/%/$(1)/ocean.stats work/%/$(1)/chksum_diag: build/$(2)/MOM6 $(VENV_PATH) cd build/$(2) ; \ gcov -b *.gcda > gcov.$$*.$(1).out ; \ find -name "*.gcov" -exec sed -i -r 's/^( *[0-9]*)\*:/ \1:/g' {} \; ; \ - curl -s $(CODECOV_UPLOADER_URL) -o codecov ; \ fi endef # Upload coverage reports +CODECOV_UPLOADER_URL ?= https://uploader.codecov.io/latest/linux/codecov codecov: curl -s $(CODECOV_UPLOADER_URL) -o $@ chmod +x codecov @@ -693,7 +695,7 @@ test.summary: #--- -# unit test +# Unit test # NOTE: Using file parser gcov report as a proxy for test completion .PHONY: run.cov.unit @@ -718,8 +720,8 @@ work/unit/std.out: build/unit/MOM_unit_tests cat p2.std.err | tail -n 100 ; \ ) -# Use driver coverage file as a proxy for the run # NOTE: .gcov actually depends on .gcda, but .gcda is produced with std.out +# TODO: Replace work/unit/std.out with *.gcda? build/unit/MOM_file_parser_tests.F90.gcov: work/unit/std.out cd $(@D) \ && gcov -b *.gcda > gcov.unit.out @@ -733,7 +735,7 @@ report.cov.unit: build/unit/MOM_file_parser_tests.F90.gcov codecov && echo -e "${MAGENTA}Report uploaded to codecov.${RESET}" \ || { \ echo -e "${RED}Failed to upload report.${RESET}" ; \ - if [ $(REQUIRE_COVERAGE_UPLOAD) = "true" ] ; then false ; fi ; \ + if [ "$(REQUIRE_COVERAGE_UPLOAD)" = true ] ; then false ; fi ; \ } From b61efc70efe248d72029ab7d8ec5a3e3ec81febd Mon Sep 17 00:00:00 2001 From: Robert Hallberg Date: Fri, 15 Jul 2022 16:34:49 -0400 Subject: [PATCH 07/40] (*)Avoid array syntax math in MOM_wave_structure Revised the MOM_wave_structure code to avoid using array syntax in calculations and subroutine arguments. While making these changes, several incorrect or missing variable unit descriptions in comments were identified and corrected. This set of changes is mathematically equivalent, but will result in changes at the level of roundoff, either because the order of sums was changed or because divisions are replaced by multiplication by a common reciprocal. Some debugging safety checks were moved inside of a logical test of the debug flag for efficiency. One dimensionally inconsistent expression (in vertical and horizontal distances) was corrected, so that it should now pass the dimensional consistency checks. This commit will change answers in any cases that depend on this code, but because we recently corrected (in MOM6 PR#154) a major bug in this code with a much larger impact without causing any disruptions, we can be confident that this code is not yet used in an production runs where minor answer changes could be problematic. This code change does not alter any answers or output in the MOM6-examples test suite. --- src/diagnostics/MOM_wave_structure.F90 | 205 ++++++++++++++----------- 1 file changed, 118 insertions(+), 87 deletions(-) diff --git a/src/diagnostics/MOM_wave_structure.F90 b/src/diagnostics/MOM_wave_structure.F90 index d11a7af5ec..6241aef386 100644 --- a/src/diagnostics/MOM_wave_structure.F90 +++ b/src/diagnostics/MOM_wave_structure.F90 @@ -42,7 +42,8 @@ module MOM_wave_structure real, allocatable, dimension(:,:,:) :: w_strct !< Vertical structure of vertical velocity (normalized) [nondim]. real, allocatable, dimension(:,:,:) :: u_strct - !< Vertical structure of horizontal velocity (normalized) [nondim]. + !< Vertical structure of horizontal velocity (normalized and + !! divided by layer thicknesses) [Z-1 ~> m-1]. real, allocatable, dimension(:,:,:) :: W_profile !< Vertical profile of w_hat(z), where !! w(x,y,z,t) = w_hat(z)*exp(i(kx+ly-freq*t)) is the full time- @@ -141,7 +142,7 @@ subroutine wave_structure(h, tv, G, GV, US, cn, ModeNum, freq, CS, En, full_halo HxS_here, & !< A layer integrated salinity [S Z ~> ppt m] HxR_here !< A layer integrated density [R Z ~> kg m-2] real :: I_Hnew !< The inverse of a new layer thickness [Z-1 ~> m-1] - real :: drxh_sum !< The sum of density diffrences across interfaces times thicknesses [R Z ~> kg m-2] + real :: drxh_sum !< The sum of density differences across interfaces times thicknesses [R Z ~> kg m-2] real, parameter :: tol1 = 0.0001, tol2 = 0.001 real :: g_Rho0 !< G_Earth/Rho0 in [L2 Z-1 T-2 R-1 ~> m4 s-2 kg-1]. ! real :: rescale, I_rescale @@ -152,40 +153,47 @@ subroutine wave_structure(h, tv, G, GV, US, cn, ModeNum, freq, CS, En, full_halo real :: I_a_int !< inverse of a_int [nondim] real :: f2 !< squared Coriolis frequency [T-2 ~> s-2] real :: Kmag2 !< magnitude of horizontal wave number squared [L-2 ~> m-2] + real :: emag2 ! The sum of the squared magnitudes of the guesses [nondim] + real :: pi_htot ! The gravest vertical wavenumber in this column [Z-1 ~> m-1] + real :: renorm ! A renormalization factor [nondim] logical :: use_EOS !< If true, density is calculated from T & S using an !! equation of state. ! local representations of variables in CS; note, ! not all rows will be filled if layers get merged! real, dimension(SZK_(GV)+1) :: w_strct !< Vertical structure of vertical velocity (normalized) [nondim]. - real, dimension(SZK_(GV)+1) :: u_strct !< Vertical structure of horizontal velocity (normalized) [nondim]. + real, dimension(SZK_(GV)+1) :: u_strct !< Vertical structure of horizontal velocity (normalized and + !! divided by layer thicknesses) [Z-1 ~> m-1]. real, dimension(SZK_(GV)+1) :: W_profile !< Vertical profile of w_hat(z) = W0*w_strct(z) [Z T-1 ~> m s-1]. real, dimension(SZK_(GV)+1) :: Uavg_profile !< Vertical profile of the magnitude of - !! horizontal velocity [L T-1 ~> m s-1]. + !! horizontal velocity [L T-1 ~> m s-1]. real, dimension(SZK_(GV)+1) :: z_int !< Integrated depth [Z ~> m] real, dimension(SZK_(GV)+1) :: N2 !< Squared buoyancy frequency at each interface [T-2 ~> s-2]. real, dimension(SZK_(GV)+1) :: w_strct2 !< squared values [nondim] - real, dimension(SZK_(GV)+1) :: u_strct2 !< squared values [nondim] + real, dimension(SZK_(GV)+1) :: u_strct2 !< squared values [Z-2 ~> m-2] real, dimension(SZK_(GV)) :: dz !< thicknesses of merged layers (same as Hc I hope) [Z ~> m] - ! real, dimension(SZK_(GV)+1) :: dWdz_profile !< profile of dW/dz - real :: w2avg !< average of squared vertical velocity structure funtion [Z ~> m] - real :: int_dwdz2 !< Vertical integral of the square of u_strct [Z ~> m] + ! real, dimension(SZK_(GV)+1) :: dWdz_profile !< profile of dW/dz times total depth [Z T-1 ~> m s-1] + real :: w2avg !< average of squared vertical velocity structure function [Z ~> m] + real :: int_dwdz2 !< Vertical integral of the square of u_strct [Z-1 ~> m-1] real :: int_w2 !< Vertical integral of the square of w_strct [Z ~> m] real :: int_N2w2 !< Vertical integral of N2 [Z T-2 ~> m s-2] real :: KE_term !< terms in vertically averaged energy equation [R Z ~> kg m-2] real :: PE_term !< terms in vertically averaged energy equation [R Z ~> kg m-2] real :: W0 !< A vertical velocity magnitude [Z T-1 ~> m s-1] - real :: gp_unscaled !< A version of gprime rescaled to [L T-2 ~> m s-2]. + real :: U_mag !< A horizontal velocity magnitude times the depth of the + !! ocean [Z L T-1 ~> m2 s-1] real, dimension(SZK_(GV)-1) :: lam_z !< product of eigen value and gprime(k); one value for each - !< interface (excluding surface and bottom) - real, dimension(SZK_(GV)-1) :: a_diag, b_diag, c_diag - !< diagonals of tridiagonal matrix; one value for each - !< interface (excluding surface and bottom) + !< interface (excluding surface and bottom) [Z-1 ~> m-1] + real, dimension(SZK_(GV)-1) :: a_diag !< upper diagonal of tridiagonal matrix; one value for each + !< interface (excluding surface and bottom) [Z-1 ~> m-1] + real, dimension(SZK_(GV)-1) :: c_diag !< lower diagonal of tridiagonal matrix; one value for each + !< interface (excluding surface and bottom) [Z-1 ~> m-1] + real, dimension(SZK_(GV)-1) :: b_dom !< Matrix center diagonal offset from a_diag + c_diag; one value + !< for each interface (excluding surface and bottom) [Z-1 ~> m-1] real, dimension(SZK_(GV)-1) :: e_guess !< guess at eigen vector with unit amplitude (for TDMA) [nondim] real, dimension(SZK_(GV)-1) :: e_itt !< improved guess at eigen vector (from TDMA) [nondim] - real :: Pi - integer :: kc - integer :: i, j, k, k2, itt, is, ie, js, je, nz, nzm, row, ig, jg, ig_stop, jg_stop + real :: Pi ! 3.1415926535... [nondim] + integer :: i, j, k, k2, kc, itt, is, ie, js, je, nz, nzm, row, ig, jg, ig_stop, jg_stop is = G%isc ; ie = G%iec ; js = G%jsc ; je = G%jec ; nz = GV%ke I_a_int = 1/a_int @@ -409,78 +417,85 @@ subroutine wave_structure(h, tv, G, GV, US, cn, ModeNum, freq, CS, En, full_halo call MOM_error(FATAL, "wave_structure: mismatch in total depths") endif - ! Note that many of the calcluation from here on revert to using vertical - ! distances in m, not Z. - ! Populate interior rows of tridiagonal matrix; must multiply through by ! gprime to get tridiagonal matrix to the symmetrical form: ! [-1/H(k-1)]e(k-1) + [1/H(k-1)+1/H(k)-lam_z]e(k) + [-1/H(k)]e(k+1) = 0, ! where lam_z = lam*gprime is now a function of depth. - ! Frist, populate interior rows + ! First, populate interior rows - ! init the values in matrix: since number of layers is variable, values need - ! to be reset + ! init the values in matrix: since number of layers is variable, values need to be reset lam_z(:) = 0.0 a_diag(:) = 0.0 - b_diag(:) = 0.0 + b_dom(:) = 0.0 c_diag(:) = 0.0 e_guess(:) = 0.0 e_itt(:) = 0.0 w_strct(:) = 0.0 do K=3,kc-1 row = K-1 ! indexing for TD matrix rows - gp_unscaled = gprime(K) - lam_z(row) = lam*gp_unscaled - a_diag(row) = gp_unscaled*(-Igu(K)) - b_diag(row) = gp_unscaled*(Igu(K)+Igl(K)) - lam_z(row) - c_diag(row) = gp_unscaled*(-Igl(K)) + lam_z(row) = lam*gprime(K) + a_diag(row) = gprime(K)*(-Igu(K)) + b_dom(row) = 2.0*gprime(K)*(Igu(K)+Igl(K)) - lam_z(row) + c_diag(row) = gprime(K)*(-Igl(K)) + enddo + if (CS%debug) then ; do row=2,kc-2 if (isnan(lam_z(row)))then ; print *, "Wave_structure: lam_z(row) is NAN" ; endif if (isnan(a_diag(row)))then ; print *, "Wave_structure: a(k) is NAN" ; endif - if (isnan(b_diag(row)))then ; print *, "Wave_structure: b(k) is NAN" ; endif if (isnan(c_diag(row)))then ; print *, "Wave_structure: c(k) is NAN" ; endif - enddo + enddo ; endif ! Populate top row of tridiagonal matrix K=2 ; row = K-1 ; - gp_unscaled = gprime(K) - lam_z(row) = lam*gp_unscaled + lam_z(row) = lam*gprime(K) a_diag(row) = 0.0 - b_diag(row) = gp_unscaled*(Igu(K)+Igl(K)) - lam_z(row) - c_diag(row) = gp_unscaled*(-Igl(K)) + b_dom(row) = gprime(K)*(Igu(K)+2.0*Igl(K)) - lam_z(row) + c_diag(row) = gprime(K)*(-Igl(K)) ! Populate bottom row of tridiagonal matrix K=kc ; row = K-1 - gp_unscaled = gprime(K) - lam_z(row) = lam*gp_unscaled - a_diag(row) = gp_unscaled*(-Igu(K)) - b_diag(row) = gp_unscaled*(Igu(K)+Igl(K)) - lam_z(row) + lam_z(row) = lam*gprime(K) + a_diag(row) = gprime(K)*(-Igu(K)) + b_dom(row) = gprime(K)*(2.0*Igu(K) + Igl(K)) - lam_z(row) c_diag(row) = 0.0 - ! Guess a vector shape to start with (excludes surface and bottom) - e_guess(1:kc-1) = sin((z_int(2:kc)/htot(i,j)) *Pi) - e_guess(1:kc-1) = e_guess(1:kc-1)/sqrt(sum(e_guess(1:kc-1)**2)) + ! Guess a normalized vector shape to start with (excludes surface and bottom) + emag2 = 0.0 + pi_htot = Pi / htot(i,j) + do K=2,kc + e_guess(K-1) = sin(pi_htot * z_int(K)) + emag2 = emag2 + e_guess(K-1)**2 + enddo + renorm = 1.0 / sqrt(emag2) + do K=2,kc ; e_guess(K-1) = renorm*e_guess(K-1) ; enddo ! Perform inverse iteration with tri-diag solver do itt=1,max_itt ! this solver becomes unstable very quickly + ! b_diag(1:kc-1) = b_dom(1:kc-1) + (a_diag(1:kc-1) + c_diag(1:kc-1)) !call tridiag_solver(a_diag(1:kc-1),b_diag(1:kc-1),c_diag(1:kc-1), & ! -lam_z(1:kc-1),e_guess(1:kc-1),"TDMA_T",e_itt) - call solve_diag_dominant_tridiag( c_diag(1:kc-1), b_diag(1:kc-1) - (a_diag(1:kc-1)+c_diag(1:kc-1)), & - a_diag(1:kc-1), e_guess(1:kc-1), & - e_itt, kc-1 ) - e_guess(1:kc-1) = e_itt(1:kc-1) / sqrt(sum(e_itt(1:kc-1)**2)) + call solve_diag_dominant_tridiag( c_diag, b_dom, a_diag, e_guess, e_itt, kc-1 ) + ! Renormalize the guesses of the structure. + emag2 = 0.0 + do K=2,kc ; emag2 = emag2 + e_itt(K-1)**2 ; enddo + renorm = 1.0 / sqrt(emag2) + do K=2,kc ; e_guess(K-1) = renorm*e_itt(K-1) ; enddo + + ! A test should be added here to evaluate convergence. enddo ! itt-loop - w_strct(2:kc) = e_guess(1:kc-1) + do K=2,kc ; w_strct(K) = e_guess(K-1) ; enddo w_strct(1) = 0.0 ! rigid lid at surface w_strct(kc+1) = 0.0 ! zero-flux at bottom ! Check to see if solver worked - ig_stop = 0 ; jg_stop = 0 - if (isnan(sum(w_strct(1:kc+1))))then - print *, "Wave_structure: w_strct has a NAN at ig=", ig, ", jg=", jg - if (iG%iec .or. jG%jec)then - print *, "This is occuring at a halo point." + if (CS%debug) then + ig_stop = 0 ; jg_stop = 0 + if (isnan(sum(w_strct(1:kc+1)))) then + print *, "Wave_structure: w_strct has a NAN at ig=", ig, ", jg=", jg + if (iG%iec .or. jG%jec)then + print *, "This is occuring at a halo point." + endif + ig_stop = ig ; jg_stop = jg endif - ig_stop = ig ; jg_stop = jg endif ! Normalize vertical structure function of w such that @@ -493,7 +508,8 @@ subroutine wave_structure(h, tv, G, GV, US, cn, ModeNum, freq, CS, En, full_halo w2avg = w2avg + 0.5*(w_strct(K)**2+w_strct(K+1)**2)*dz(k) enddo ! correct renormalization: - w_strct(:) = w_strct(:) * sqrt(htot(i,j)*a_int/w2avg) + renorm = sqrt(htot(i,j)*a_int/w2avg) + do K=1,kc+1 ; w_strct(K) = renorm * w_strct(K) ; enddo ! Calculate vertical structure function of u (i.e. dw/dz) do K=2,nzm-1 @@ -510,8 +526,10 @@ subroutine wave_structure(h, tv, G, GV, US, cn, ModeNum, freq, CS, En, full_halo ! Calculate terms in vertically integrated energy equation int_dwdz2 = 0.0 ; int_w2 = 0.0 ; int_N2w2 = 0.0 - u_strct2(1:nzm) = u_strct(1:nzm)**2 - w_strct2(1:nzm) = w_strct(1:nzm)**2 + do K=1,nzm + u_strct2(K) = u_strct(K)**2 + w_strct2(K) = w_strct(K)**2 + enddo ! vertical integration with Trapezoidal rule do k=1,nzm-1 int_dwdz2 = int_dwdz2 + 0.5*(u_strct2(K)+u_strct2(K+1)) * dz(k) @@ -522,7 +540,7 @@ subroutine wave_structure(h, tv, G, GV, US, cn, ModeNum, freq, CS, En, full_halo ! Back-calculate amplitude from energy equation if (present(En) .and. (freq**2*Kmag2 > 0.0)) then ! Units here are [R Z ~> kg m-2] - KE_term = 0.25*GV%Rho0*( ((freq**2 + f2) / (freq**2*Kmag2))*int_dwdz2 + int_w2 ) + KE_term = 0.25*GV%Rho0*( ((freq**2 + f2) / (freq**2*Kmag2))*US%L_to_Z**2*int_dwdz2 + int_w2 ) PE_term = 0.25*GV%Rho0*( int_N2w2 / freq**2 ) if (En(i,j) >= 0.0) then W0 = sqrt( En(i,j) / (KE_term + PE_term) ) @@ -532,34 +550,43 @@ subroutine wave_structure(h, tv, G, GV, US, cn, ModeNum, freq, CS, En, full_halo W0 = 0.0 endif ! Calculate actual vertical velocity profile and derivative - W_profile(:) = W0*w_strct(:) - ! dWdz_profile(:) = W0*u_strct(:) - ! Calculate average magnitude of actual horizontal velocity over a period - Uavg_profile(:) = abs(W0*u_strct(:)) * sqrt((freq**2 + f2) / (2.0*freq**2*Kmag2)) + U_mag = W0 * sqrt((freq**2 + f2) / (2.0*freq**2*Kmag2)) + do K=1,nzm + W_profile(K) = W0*w_strct(K) + ! dWdz_profile(K) = W0*u_strct(K) + ! Calculate average magnitude of actual horizontal velocity over a period + Uavg_profile(K) = abs(U_mag * u_strct(K)) + enddo else - W_profile(:) = 0.0 - ! dWdz_profile(:) = 0.0 - Uavg_profile(:) = 0.0 + do K=1,nzm + W_profile(K) = 0.0 + ! dWdz_profile(K) = 0.0 + Uavg_profile(K) = 0.0 + enddo endif ! Store values in control structure - CS%w_strct(i,j,1:nzm) = w_strct(1:nzm) - CS%u_strct(i,j,1:nzm) = u_strct(1:nzm) - CS%W_profile(i,j,1:nzm) = W_profile(1:nzm) - CS%Uavg_profile(i,j,1:nzm)= Uavg_profile(1:nzm) - CS%z_depths(i,j,1:nzm) = z_int(1:nzm) - CS%N2(i,j,1:nzm) = N2(1:nzm) - CS%num_intfaces(i,j) = nzm + do K=1,nzm + CS%w_strct(i,j,K) = w_strct(K) + CS%u_strct(i,j,K) = u_strct(K) + CS%W_profile(i,j,K) = W_profile(K) + CS%Uavg_profile(i,j,K) = Uavg_profile(K) + CS%z_depths(i,j,K) = z_int(K) + CS%N2(i,j,K) = N2(K) + enddo + CS%num_intfaces(i,j) = nzm else ! If not enough layers, default to zero nzm = kc+1 - CS%w_strct(i,j,1:nzm) = 0.0 - CS%u_strct(i,j,1:nzm) = 0.0 - CS%W_profile(i,j,1:nzm) = 0.0 - CS%Uavg_profile(i,j,1:nzm)= 0.0 - CS%z_depths(i,j,1:nzm) = 0.0 ! could use actual values - CS%N2(i,j,1:nzm) = 0.0 ! could use with actual values - CS%num_intfaces(i,j) = nzm + do K=1,nzm + CS%w_strct(i,j,K) = 0.0 + CS%u_strct(i,j,K) = 0.0 + CS%W_profile(i,j,K) = 0.0 + CS%Uavg_profile(i,j,K) = 0.0 + CS%z_depths(i,j,K) = 0.0 ! could use actual values + CS%N2(i,j,K) = 0.0 ! could use with actual values + enddo + CS%num_intfaces(i,j) = nzm endif ! kc >= 3 and kc > ModeNum + 1? endif ! drxh_sum >= 0? !else ! if at test point - delete later @@ -568,14 +595,16 @@ subroutine wave_structure(h, tv, G, GV, US, cn, ModeNum, freq, CS, En, full_halo endif ! mask2dT > 0.0? else ! if cn=0.0, default to zero - nzm = nz+1! could use actual values - CS%w_strct(i,j,1:nzm) = 0.0 - CS%u_strct(i,j,1:nzm) = 0.0 - CS%W_profile(i,j,1:nzm) = 0.0 - CS%Uavg_profile(i,j,1:nzm)= 0.0 - CS%z_depths(i,j,1:nzm) = 0.0 ! could use actual values - CS%N2(i,j,1:nzm) = 0.0 ! could use with actual values - CS%num_intfaces(i,j) = nzm + nzm = nz+1 ! could use actual values + do K=1,nzm + CS%w_strct(i,j,K) = 0.0 + CS%u_strct(i,j,K) = 0.0 + CS%W_profile(i,j,K) = 0.0 + CS%Uavg_profile(i,j,K) = 0.0 + CS%z_depths(i,j,K) = 0.0 ! could use actual values + CS%N2(i,j,K) = 0.0 ! could use with actual values + enddo + CS%num_intfaces(i,j) = nzm endif ; enddo ! if cn>0.0? ; i-loop enddo ! j-loop @@ -586,6 +615,8 @@ subroutine wave_structure(h, tv, G, GV, US, cn, ModeNum, freq, CS, En, full_halo end subroutine wave_structure +! The subroutine tridiag_solver is never used and could perhaps be deleted. + !> Solves a tri-diagonal system Ax=y using either the standard !! Thomas algorithm (TDMA_T) or its more stable variant that invokes the !! "Hallberg substitution" (TDMA_H). @@ -722,8 +753,8 @@ subroutine wave_structure_init(Time, G, GV, param_file, diag, CS) !! diagnostic output. type(wave_structure_CS), intent(inout) :: CS !< Wave structure control struct -! This include declares and sets the variable "version". -#include "version_variable.h" + ! This include declares and sets the variable "version". +# include "version_variable.h" character(len=40) :: mdl = "MOM_wave_structure" ! This module's name. integer :: isd, ied, jsd, jed, nz From 97319c9b5d5b88257514ca9d479dc3c7176210d1 Mon Sep 17 00:00:00 2001 From: Robert Hallberg Date: Sun, 31 Jul 2022 09:46:09 -0400 Subject: [PATCH 08/40] Corrected a sign error in commented out code Corrected a sign error in commented out code in wave_structure, as noted in a review by Raf Dussin of the previous PR. All answers are bitwise identical. --- src/diagnostics/MOM_wave_structure.F90 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/diagnostics/MOM_wave_structure.F90 b/src/diagnostics/MOM_wave_structure.F90 index 6241aef386..0f97b560db 100644 --- a/src/diagnostics/MOM_wave_structure.F90 +++ b/src/diagnostics/MOM_wave_structure.F90 @@ -469,12 +469,12 @@ subroutine wave_structure(h, tv, G, GV, US, cn, ModeNum, freq, CS, En, full_halo ! Perform inverse iteration with tri-diag solver do itt=1,max_itt ! this solver becomes unstable very quickly - ! b_diag(1:kc-1) = b_dom(1:kc-1) + (a_diag(1:kc-1) + c_diag(1:kc-1)) + ! b_diag(1:kc-1) = b_dom(1:kc-1) - (a_diag(1:kc-1) + c_diag(1:kc-1)) !call tridiag_solver(a_diag(1:kc-1),b_diag(1:kc-1),c_diag(1:kc-1), & ! -lam_z(1:kc-1),e_guess(1:kc-1),"TDMA_T",e_itt) call solve_diag_dominant_tridiag( c_diag, b_dom, a_diag, e_guess, e_itt, kc-1 ) - ! Renormalize the guesses of the structure. + ! Renormalize the guesses of the structure.- emag2 = 0.0 do K=2,kc ; emag2 = emag2 + e_itt(K-1)**2 ; enddo renorm = 1.0 / sqrt(emag2) From 6835709f851b388e0fa02055c8f37a597c99a8aa Mon Sep 17 00:00:00 2001 From: "Alan J. Wallcraft" Date: Fri, 29 Jul 2022 15:34:53 +0000 Subject: [PATCH 09/40] Bugfix to bottom drag as a body force DRAG_AS_BODY_FORCE was using two inconsistent values for the bottom boundary layer. It now always uses HBBL, or the total water depth if that is less than HBBL. Results with DRAG_AS_BODY_FORCE=False are unchanged. --- src/parameterizations/vertical/MOM_set_viscosity.F90 | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/parameterizations/vertical/MOM_set_viscosity.F90 b/src/parameterizations/vertical/MOM_set_viscosity.F90 index 22d65110be..7c6d96dede 100644 --- a/src/parameterizations/vertical/MOM_set_viscosity.F90 +++ b/src/parameterizations/vertical/MOM_set_viscosity.F90 @@ -741,6 +741,9 @@ subroutine set_viscous_BBL(u, v, h, tv, visc, G, GV, US, CS, pbv) ! bbl_thick. if ((bbl_thick > 0.5*CS%Hbbl) .and. (CS%RiNo_mix)) bbl_thick = 0.5*CS%Hbbl + ! If drag is a body force, bbl_thick is HBBL + if (CS%body_force_drag) bbl_thick = h_bbl_drag(i) + if (CS%Channel_drag) then ! The drag within the bottommost bbl_thick is applied as a part of ! an enhanced bottom viscosity, while above this the drag is applied @@ -1022,7 +1025,7 @@ subroutine set_viscous_BBL(u, v, h, tv, visc, G, GV, US, CS, pbv) visc%Ray_v(i,J,k) = visc%Ray_v(i,J,k) + (CS%cdrag*US%L_to_Z*umag_avg(i)) * h_bbl_fr endif h_sum = h_sum + h_at_vel(i,k) - if (h_sum >= bbl_thick) exit ! The top of this layer is above the drag zone. + if (h_sum >= h_bbl_drag(i)) exit ! The top of this layer is above the drag zone. enddo ! Do not enhance the near-bottom viscosity in this case. Kv_bbl = CS%Kv_BBL_min @@ -2003,7 +2006,8 @@ subroutine set_visc_init(Time, G, GV, US, param_file, diag, visc, CS, restart_CS call get_param(param_file, mdl, "DRAG_AS_BODY_FORCE", CS%body_force_drag, & "If true, the bottom stress is imposed as an explicit body force "//& "applied over a fixed distance from the bottom, rather than as an "//& - "implicit calculation based on an enhanced near-bottom viscosity", & + "implicit calculation based on an enhanced near-bottom viscosity. "//& + "The thickness of the bottom boundary layer is HBBL.", & default=.false., do_not_log=.not.CS%bottomdraglaw) call get_param(param_file, mdl, "CHANNEL_DRAG", CS%Channel_drag, & "If true, the bottom drag is exerted directly on each "//& From 34fa57064a4cde8331b27c44c64b538a744907c0 Mon Sep 17 00:00:00 2001 From: Robert Hallberg Date: Thu, 21 Jul 2022 12:35:46 -0400 Subject: [PATCH 10/40] Use MOM6 framework modules in mct & nuopc drivers Modified the coupler_types, data_override and time_interp modules used in the mct_cap and nuopc_cap driver code to use the appropriate modules from the MOM6 framework directory, which are properly documented and extensible, and will accommodate and buffer changes in the underlying FMS or other infrastructure code. These changes should have been in place previously, but are required to allow for rescaling of the fields being read via time_interp_external or data_override. Some unused module use references to mpp_chksum from mpp_mod were also removed. These changes mirror changes that had previously been applied to the FMS_cap driver code. All answers should be bitwise identical, but this has not been specifically tested via the GFDL testing procedures. --- .../drivers/mct_cap/mom_ocean_model_mct.F90 | 11 +++++------ .../drivers/mct_cap/mom_surface_forcing_mct.F90 | 16 +++++++--------- config_src/drivers/mct_cap/ocn_comp_mct.F90 | 7 ++----- .../drivers/nuopc_cap/mom_ocean_model_nuopc.F90 | 11 +++++------ .../nuopc_cap/mom_surface_forcing_nuopc.F90 | 16 +++++++--------- 5 files changed, 26 insertions(+), 35 deletions(-) diff --git a/config_src/drivers/mct_cap/mom_ocean_model_mct.F90 b/config_src/drivers/mct_cap/mom_ocean_model_mct.F90 index 5b1a980de1..f617dfa0f3 100644 --- a/config_src/drivers/mct_cap/mom_ocean_model_mct.F90 +++ b/config_src/drivers/mct_cap/mom_ocean_model_mct.F90 @@ -51,18 +51,17 @@ module MOM_ocean_model_mct use MOM_verticalGrid, only : verticalGrid_type use MOM_ice_shelf, only : initialize_ice_shelf, shelf_calc_flux, ice_shelf_CS use MOM_ice_shelf, only : add_shelf_forces, ice_shelf_end, ice_shelf_save_restart -use coupler_types_mod, only : coupler_1d_bc_type, coupler_2d_bc_type -use coupler_types_mod, only : coupler_type_spawn, coupler_type_write_chksums -use coupler_types_mod, only : coupler_type_initialized, coupler_type_copy_data -use coupler_types_mod, only : coupler_type_set_diags, coupler_type_send_data +use MOM_coupler_types, only : coupler_1d_bc_type, coupler_2d_bc_type +use MOM_coupler_types, only : coupler_type_spawn, coupler_type_write_chksums +use MOM_coupler_types, only : coupler_type_initialized, coupler_type_copy_data +use MOM_coupler_types, only : coupler_type_set_diags, coupler_type_send_data use mpp_domains_mod, only : domain2d, mpp_get_layout, mpp_get_global_domain use mpp_domains_mod, only : mpp_define_domains, mpp_get_compute_domain, mpp_get_data_domain use MOM_io, only : stdout -use mpp_mod, only : mpp_chksum use MOM_EOS, only : gsw_sp_from_sr, gsw_pt_from_ct use MOM_wave_interface, only : wave_parameters_CS, MOM_wave_interface_init use MOM_wave_interface, only : Update_Surface_Waves -use time_interp_external_mod, only : time_interp_external_init +use MOM_interpolate, only : time_interp_external_init ! MCT specfic routines use MOM_domains, only : MOM_infra_end diff --git a/config_src/drivers/mct_cap/mom_surface_forcing_mct.F90 b/config_src/drivers/mct_cap/mom_surface_forcing_mct.F90 index 4adccfef65..c2d84526fc 100644 --- a/config_src/drivers/mct_cap/mom_surface_forcing_mct.F90 +++ b/config_src/drivers/mct_cap/mom_surface_forcing_mct.F90 @@ -4,8 +4,12 @@ module MOM_surface_forcing_mct use MOM_coms, only : reproducing_sum, field_chksum use MOM_constants, only : hlv, hlf +use MOM_coupler_types, only : coupler_2d_bc_type, coupler_type_write_chksums +use MOM_coupler_types, only : coupler_type_initialized, coupler_type_spawn +use MOM_coupler_types, only : coupler_type_copy_data use MOM_cpu_clock, only : cpu_clock_id, cpu_clock_begin, cpu_clock_end use MOM_cpu_clock, only : CLOCK_SUBCOMPONENT +use MOM_data_override, only : data_override_init, data_override use MOM_diag_mediator, only : diag_ctrl use MOM_diag_mediator, only : safe_alloc_ptr, time_type use MOM_domains, only : pass_vector, pass_var, fill_symmetric_edges @@ -19,7 +23,10 @@ module MOM_surface_forcing_mct use MOM_forcing_type, only : allocate_mech_forcing, deallocate_mech_forcing use MOM_get_input, only : Get_MOM_Input, directories use MOM_grid, only : ocean_grid_type +use MOM_interpolate, only : init_external_field, time_interp_external +use MOM_interpolate, only : time_interp_external_init use MOM_io, only : slasher, write_version_number, MOM_read_data +use MOM_io, only : stdout use MOM_restart, only : register_restart_field, restart_init, MOM_restart_CS use MOM_restart, only : restart_init_end, save_restart, restore_state use MOM_string_functions, only : uppercase @@ -28,15 +35,6 @@ module MOM_surface_forcing_mct use MOM_variables, only : surface use user_revise_forcing, only : user_alter_forcing, user_revise_forcing_init use user_revise_forcing, only : user_revise_forcing_CS - -use coupler_types_mod, only : coupler_2d_bc_type, coupler_type_write_chksums -use coupler_types_mod, only : coupler_type_initialized, coupler_type_spawn -use coupler_types_mod, only : coupler_type_copy_data -use data_override_mod, only : data_override_init, data_override -use mpp_mod, only : mpp_chksum -use time_interp_external_mod, only : init_external_field, time_interp_external -use time_interp_external_mod, only : time_interp_external_init -use MOM_io, only : stdout use iso_fortran_env, only : int64 implicit none ; private diff --git a/config_src/drivers/mct_cap/ocn_comp_mct.F90 b/config_src/drivers/mct_cap/ocn_comp_mct.F90 index f4b2ceed77..85b7350b77 100644 --- a/config_src/drivers/mct_cap/ocn_comp_mct.F90 +++ b/config_src/drivers/mct_cap/ocn_comp_mct.F90 @@ -51,15 +51,12 @@ module ocn_comp_mct use MOM_surface_forcing_mct, only: surface_forcing_CS, forcing_save_restart, ice_ocean_boundary_type use ocn_cap_methods, only: ocn_import, ocn_export -! FMS modules -use time_interp_external_mod, only : time_interp_external - ! MCT indices structure and import and export routines that access mom data use ocn_cpl_indices, only : cpl_indices_type, cpl_indices_init ! GFDL coupler modules -use coupler_types_mod, only : coupler_type_spawn -use coupler_types_mod, only : coupler_type_initialized, coupler_type_copy_data +use MOM_coupler_types, only : coupler_type_spawn +use MOM_coupler_types, only : coupler_type_initialized, coupler_type_copy_data ! By default make data private implicit none; private diff --git a/config_src/drivers/nuopc_cap/mom_ocean_model_nuopc.F90 b/config_src/drivers/nuopc_cap/mom_ocean_model_nuopc.F90 index dddac936d4..a51767946c 100644 --- a/config_src/drivers/nuopc_cap/mom_ocean_model_nuopc.F90 +++ b/config_src/drivers/nuopc_cap/mom_ocean_model_nuopc.F90 @@ -40,21 +40,20 @@ module MOM_ocean_model_nuopc use MOM_time_manager, only : operator(+), operator(-), operator(*), operator(/) use MOM_time_manager, only : operator(/=), operator(<=), operator(>=) use MOM_time_manager, only : operator(<), real_to_time_type, time_type_to_real -use time_interp_external_mod,only : time_interp_external_init +use MOM_interpolate, only : time_interp_external_init use MOM_tracer_flow_control, only : call_tracer_flux_init use MOM_unit_scaling, only : unit_scale_type use MOM_variables, only : surface use MOM_verticalGrid, only : verticalGrid_type use MOM_ice_shelf, only : initialize_ice_shelf, shelf_calc_flux, ice_shelf_CS use MOM_ice_shelf, only : add_shelf_forces, ice_shelf_end, ice_shelf_save_restart -use coupler_types_mod, only : coupler_1d_bc_type, coupler_2d_bc_type -use coupler_types_mod, only : coupler_type_spawn, coupler_type_write_chksums -use coupler_types_mod, only : coupler_type_initialized, coupler_type_copy_data -use coupler_types_mod, only : coupler_type_set_diags, coupler_type_send_data +use MOM_coupler_types, only : coupler_1d_bc_type, coupler_2d_bc_type +use MOM_coupler_types, only : coupler_type_spawn, coupler_type_write_chksums +use MOM_coupler_types, only : coupler_type_initialized, coupler_type_copy_data +use MOM_coupler_types, only : coupler_type_set_diags, coupler_type_send_data use mpp_domains_mod, only : domain2d, mpp_get_layout, mpp_get_global_domain use mpp_domains_mod, only : mpp_define_domains, mpp_get_compute_domain, mpp_get_data_domain use fms_mod, only : stdout -use mpp_mod, only : mpp_chksum use MOM_EOS, only : gsw_sp_from_sr, gsw_pt_from_ct use MOM_wave_interface, only : wave_parameters_CS, MOM_wave_interface_init use MOM_wave_interface, only : Update_Surface_Waves, query_wave_properties diff --git a/config_src/drivers/nuopc_cap/mom_surface_forcing_nuopc.F90 b/config_src/drivers/nuopc_cap/mom_surface_forcing_nuopc.F90 index c45a59c221..7637f7bafd 100644 --- a/config_src/drivers/nuopc_cap/mom_surface_forcing_nuopc.F90 +++ b/config_src/drivers/nuopc_cap/mom_surface_forcing_nuopc.F90 @@ -5,8 +5,12 @@ module MOM_surface_forcing_nuopc use MOM_coms, only : reproducing_sum, field_chksum use MOM_constants, only : hlv, hlf +use MOM_coupler_types, only : coupler_2d_bc_type, coupler_type_write_chksums +use MOM_coupler_types, only : coupler_type_initialized, coupler_type_spawn +use MOM_coupler_types, only : coupler_type_copy_data use MOM_cpu_clock, only : cpu_clock_id, cpu_clock_begin, cpu_clock_end use MOM_cpu_clock, only : CLOCK_SUBCOMPONENT +use MOM_data_override, only : data_override_init, data_override use MOM_diag_mediator, only : diag_ctrl use MOM_diag_mediator, only : safe_alloc_ptr, time_type use MOM_domains, only : pass_vector, pass_var, fill_symmetric_edges @@ -20,6 +24,8 @@ module MOM_surface_forcing_nuopc use MOM_forcing_type, only : allocate_mech_forcing, deallocate_mech_forcing use MOM_get_input, only : Get_MOM_Input, directories use MOM_grid, only : ocean_grid_type +use MOM_interpolate, only : init_external_field, time_interp_external +use MOM_interpolate, only : time_interp_external_init use MOM_CFC_cap, only : CFC_cap_fluxes use MOM_io, only : slasher, write_version_number, MOM_read_data use MOM_io, only : stdout @@ -31,15 +37,7 @@ module MOM_surface_forcing_nuopc use MOM_variables, only : surface use user_revise_forcing, only : user_alter_forcing, user_revise_forcing_init use user_revise_forcing, only : user_revise_forcing_CS - -use coupler_types_mod, only : coupler_2d_bc_type, coupler_type_write_chksums -use coupler_types_mod, only : coupler_type_initialized, coupler_type_spawn -use coupler_types_mod, only : coupler_type_copy_data -use data_override_mod, only : data_override_init, data_override -use mpp_mod, only : mpp_chksum -use time_interp_external_mod, only : init_external_field, time_interp_external -use time_interp_external_mod, only : time_interp_external_init -use iso_fortran_env, only : int64 +use iso_fortran_env, only : int64 implicit none ; private From 17309d1a54ceac4e77a6648782d2096deac6b564 Mon Sep 17 00:00:00 2001 From: Robert Hallberg Date: Tue, 19 Jul 2022 11:48:11 -0400 Subject: [PATCH 11/40] +Rescale 2 elements of the surface type Applied dimensional rescaling of the ocean_heat and ocean_salt elements of the surface type. Although this surface_state is a public type, neither of these particular elements are used outside of MOM6 and these fields are not reused after they are set. They are instead being retained because they may become useful in the future. All answers are bitwise identical. --- src/core/MOM.F90 | 8 ++++---- src/core/MOM_variables.F90 | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/core/MOM.F90 b/src/core/MOM.F90 index 29bef6bcd8..a1a3d8c382 100644 --- a/src/core/MOM.F90 +++ b/src/core/MOM.F90 @@ -3544,8 +3544,8 @@ subroutine extract_surface_state(CS, sfc_state_in) do j=js,je ; do k=1,nz ; do i=is,ie mass = GV%H_to_RZ*h(i,j,k) sfc_state%ocean_mass(i,j) = sfc_state%ocean_mass(i,j) + mass - sfc_state%ocean_heat(i,j) = sfc_state%ocean_heat(i,j) + mass * US%C_to_degC*CS%tv%T(i,j,k) - sfc_state%ocean_salt(i,j) = sfc_state%ocean_salt(i,j) + mass * (1.0e-3*US%S_to_ppt*CS%tv%S(i,j,k)) + sfc_state%ocean_heat(i,j) = sfc_state%ocean_heat(i,j) + mass * CS%tv%T(i,j,k) + sfc_state%ocean_salt(i,j) = sfc_state%ocean_salt(i,j) + mass * (1.0e-3*CS%tv%S(i,j,k)) enddo ; enddo ; enddo else if (allocated(sfc_state%ocean_mass)) then @@ -3562,7 +3562,7 @@ subroutine extract_surface_state(CS, sfc_state_in) !$OMP parallel do default(shared) private(mass) do j=js,je ; do k=1,nz ; do i=is,ie mass = GV%H_to_RZ*h(i,j,k) - sfc_state%ocean_heat(i,j) = sfc_state%ocean_heat(i,j) + mass*US%C_to_degC*CS%tv%T(i,j,k) + sfc_state%ocean_heat(i,j) = sfc_state%ocean_heat(i,j) + mass*CS%tv%T(i,j,k) enddo ; enddo ; enddo endif if (allocated(sfc_state%ocean_salt)) then @@ -3571,7 +3571,7 @@ subroutine extract_surface_state(CS, sfc_state_in) !$OMP parallel do default(shared) private(mass) do j=js,je ; do k=1,nz ; do i=is,ie mass = GV%H_to_RZ*h(i,j,k) - sfc_state%ocean_salt(i,j) = sfc_state%ocean_salt(i,j) + mass * (1.0e-3*US%S_to_ppt*CS%tv%S(i,j,k)) + sfc_state%ocean_salt(i,j) = sfc_state%ocean_salt(i,j) + mass * (1.0e-3*CS%tv%S(i,j,k)) enddo ; enddo ; enddo endif endif diff --git a/src/core/MOM_variables.F90 b/src/core/MOM_variables.F90 index c8fcfc52eb..f877f781d5 100644 --- a/src/core/MOM_variables.F90 +++ b/src/core/MOM_variables.F90 @@ -56,8 +56,8 @@ module MOM_variables melt_potential, & !< Instantaneous amount of heat that can be used to melt sea ice [Q R Z ~> J m-2]. !! This is computed w.r.t. surface freezing temperature. ocean_mass, & !< The total mass of the ocean [R Z ~> kg m-2]. - ocean_heat, & !< The total heat content of the ocean in [degC R Z ~> degC kg m-2]. - ocean_salt, & !< The total salt content of the ocean in [kgSalt kg-1 R Z ~> kgSalt m-2]. + ocean_heat, & !< The total heat content of the ocean in [C R Z ~> degC kg m-2]. + ocean_salt, & !< The total salt content of the ocean in [1e-3 S R Z ~> kgSalt m-2]. taux_shelf, & !< The zonal stresses on the ocean under shelves [R L Z T-2 ~> Pa]. tauy_shelf !< The meridional stresses on the ocean under shelves [R L Z T-2 ~> Pa]. logical :: T_is_conT = .false. !< If true, the temperature variable SST is actually the From 4ae881c416b2c8c6afe6a8f1eaaffdcc3c4d6017 Mon Sep 17 00:00:00 2001 From: Robert Hallberg Date: Tue, 19 Jul 2022 16:11:17 -0400 Subject: [PATCH 12/40] +Rescale fluxes%C_p Applied temperature rescaling to the heat capacity element, C_p, of the forcing type. All answers are bitwise identical, but the rescaled units of one element of a public type were altered. --- .../FMS_cap/MOM_surface_forcing_gfdl.F90 | 4 +-- .../mct_cap/mom_surface_forcing_mct.F90 | 4 +-- .../nuopc_cap/mom_surface_forcing_nuopc.F90 | 4 +-- .../solo_driver/MESO_surface_forcing.F90 | 4 +-- .../solo_driver/MOM_surface_forcing.F90 | 10 +++---- .../solo_driver/user_surface_forcing.F90 | 4 +-- src/core/MOM.F90 | 4 +-- src/core/MOM_forcing_type.F90 | 26 +++++++++---------- src/user/BFB_surface_forcing.F90 | 2 +- src/user/SCM_CVMix_tests.F90 | 18 ++++++------- 10 files changed, 40 insertions(+), 40 deletions(-) diff --git a/config_src/drivers/FMS_cap/MOM_surface_forcing_gfdl.F90 b/config_src/drivers/FMS_cap/MOM_surface_forcing_gfdl.F90 index faa74a7fe0..b844d7c9ce 100644 --- a/config_src/drivers/FMS_cap/MOM_surface_forcing_gfdl.F90 +++ b/config_src/drivers/FMS_cap/MOM_surface_forcing_gfdl.F90 @@ -248,7 +248,7 @@ subroutine convert_IOB_to_fluxes(IOB, fluxes, index_bounds, Time, valid_time, G, real :: kg_m2_s_conversion ! A combination of unit conversion factors for rescaling ! mass fluxes [R Z s m2 kg-1 T-1 ~> 1] real :: rhoXcp ! Reference density times heat capacity times unit scaling - ! factors [Q R degC-1 ~> J m-3 degC-1] + ! factors [Q R C-1 ~> J m-3 degC-1] real :: sign_for_net_FW_bug ! Should be +1. but an old bug can be recovered by using -1. call cpu_clock_begin(id_clock_forcing) @@ -414,7 +414,7 @@ subroutine convert_IOB_to_fluxes(IOB, fluxes, index_bounds, Time, valid_time, G, delta_sst = data_restore(i,j)- sfc_state%SST(i,j) delta_sst = sign(1.0,delta_sst)*min(abs(delta_sst),CS%max_delta_trestore) fluxes%heat_added(i,j) = G%mask2dT(i,j) * CS%trestore_mask(i,j) * & - rhoXcp * delta_sst * CS%Flux_const_temp ! [Q R Z T-1 ~> W m-2] + US%degC_to_C*rhoXcp * delta_sst * CS%Flux_const_temp ! [Q R Z T-1 ~> W m-2] enddo ; enddo endif diff --git a/config_src/drivers/mct_cap/mom_surface_forcing_mct.F90 b/config_src/drivers/mct_cap/mom_surface_forcing_mct.F90 index c2d84526fc..04cb370628 100644 --- a/config_src/drivers/mct_cap/mom_surface_forcing_mct.F90 +++ b/config_src/drivers/mct_cap/mom_surface_forcing_mct.F90 @@ -258,7 +258,7 @@ subroutine convert_IOB_to_fluxes(IOB, fluxes, index_bounds, Time, valid_time, G, isr = is-isd+1 ; ier = ie-isd+1 ; jsr = js-jsd+1 ; jer = je-jsd+1 kg_m2_s_conversion = US%kg_m2s_to_RZ_T - C_p = US%Q_to_J_kg*fluxes%C_p + C_p = US%Q_to_J_kg*US%degC_to_C*fluxes%C_p open_ocn_mask(:,:) = 1.0 pme_adj(:,:) = 0.0 fluxes%vPrecGlobalAdj = 0.0 @@ -414,7 +414,7 @@ subroutine convert_IOB_to_fluxes(IOB, fluxes, index_bounds, Time, valid_time, G, delta_sst = data_restore(i,j)- sfc_state%SST(i,j) delta_sst = sign(1.0,delta_sst)*min(abs(delta_sst),CS%max_delta_trestore) fluxes%heat_added(i,j) = G%mask2dT(i,j) * CS%trestore_mask(i,j) * & - (CS%Rho0*fluxes%C_p) * delta_sst * CS%Flux_const ! W m-2 + (CS%Rho0*US%degC_to_C*fluxes%C_p) * delta_sst * CS%Flux_const ! W m-2 enddo ; enddo endif diff --git a/config_src/drivers/nuopc_cap/mom_surface_forcing_nuopc.F90 b/config_src/drivers/nuopc_cap/mom_surface_forcing_nuopc.F90 index 7637f7bafd..0e0e384e57 100644 --- a/config_src/drivers/nuopc_cap/mom_surface_forcing_nuopc.F90 +++ b/config_src/drivers/nuopc_cap/mom_surface_forcing_nuopc.F90 @@ -287,7 +287,7 @@ subroutine convert_IOB_to_fluxes(IOB, fluxes, index_bounds, Time, valid_time, G, isr = is-isd+1 ; ier = ie-isd+1 ; jsr = js-jsd+1 ; jer = je-jsd+1 kg_m2_s_conversion = US%kg_m2s_to_RZ_T - C_p = US%Q_to_J_kg*fluxes%C_p + C_p = US%Q_to_J_kg*US%degC_to_C*fluxes%C_p open_ocn_mask(:,:) = 1.0 pme_adj(:,:) = 0.0 fluxes%vPrecGlobalAdj = 0.0 @@ -443,7 +443,7 @@ subroutine convert_IOB_to_fluxes(IOB, fluxes, index_bounds, Time, valid_time, G, delta_sst = data_restore(i,j)- sfc_state%SST(i,j) delta_sst = sign(1.0,delta_sst)*min(abs(delta_sst),CS%max_delta_trestore) fluxes%heat_added(i,j) = G%mask2dT(i,j) * CS%trestore_mask(i,j) * & - (CS%Rho0*fluxes%C_p) * delta_sst * CS%Flux_const ! Q R Z T-1 ~> W m-2 + (CS%Rho0*US%degC_to_C*fluxes%C_p) * delta_sst * CS%Flux_const ! Q R Z T-1 ~> W m-2 enddo ; enddo endif diff --git a/config_src/drivers/solo_driver/MESO_surface_forcing.F90 b/config_src/drivers/solo_driver/MESO_surface_forcing.F90 index fa1d7f5701..43b4af6c55 100644 --- a/config_src/drivers/solo_driver/MESO_surface_forcing.F90 +++ b/config_src/drivers/solo_driver/MESO_surface_forcing.F90 @@ -78,7 +78,7 @@ subroutine MESO_buoyancy_forcing(sfc_state, fluxes, day, dt, G, US, CS) ! fluxes are in kg m-2 s-1 and positive for water moving into the ocean. real :: density_restore ! The potential density that is being restored toward [R ~> kg m-3]. - real :: rhoXcp ! The mean density times the heat capacity [Q R degC-1 ~> J m-3 degC-1]. + real :: rhoXcp ! The mean density times the heat capacity [Q R C-1 ~> J m-3 degC-1]. real :: buoy_rest_const ! A constant relating density anomalies to the ! restoring buoyancy flux [L2 T-3 R-1 ~> m5 s-3 kg-1]. @@ -172,7 +172,7 @@ subroutine MESO_buoyancy_forcing(sfc_state, fluxes, day, dt, G, US, CS) ! salinity (in ppt or PSU) that are being restored toward. if (G%mask2dT(i,j) > 0.0) then fluxes%heat_added(i,j) = G%mask2dT(i,j) * & - ((CS%T_Restore(i,j) - sfc_state%SST(i,j)) * rhoXcp * CS%Flux_const) + ((CS%T_Restore(i,j) - sfc_state%SST(i,j)) * US%degC_to_C*rhoXcp * CS%Flux_const) fluxes%vprec(i,j) = - (CS%Rho0 * CS%Flux_const) * & (CS%S_Restore(i,j) - sfc_state%SSS(i,j)) / & (0.5*(sfc_state%SSS(i,j) + CS%S_Restore(i,j))) diff --git a/config_src/drivers/solo_driver/MOM_surface_forcing.F90 b/config_src/drivers/solo_driver/MOM_surface_forcing.F90 index 6de59684b7..617e7993bb 100644 --- a/config_src/drivers/solo_driver/MOM_surface_forcing.F90 +++ b/config_src/drivers/solo_driver/MOM_surface_forcing.F90 @@ -916,7 +916,7 @@ subroutine buoyancy_forcing_from_files(sfc_state, fluxes, day, dt, G, US, CS) !#CTRL# SSS_mean ! A (mean?) salinity about which to normalize local salinity !#CTRL# ! anomalies when calculating restorative precipitation anomalies [ppt]. - real :: rhoXcp ! reference density times heat capacity [Q R degC-1 ~> J m-3 degC-1] + real :: rhoXcp ! reference density times heat capacity [Q R C-1 ~> J m-3 degC-1] integer :: time_lev_daily ! time levels to read for fields with daily cycle integer :: time_lev_monthly ! time levels to read for fields with monthly cycle @@ -1127,7 +1127,7 @@ subroutine buoyancy_forcing_from_files(sfc_state, fluxes, day, dt, G, US, CS) do j=js,je ; do i=is,ie if (G%mask2dT(i,j) > 0.0) then fluxes%heat_added(i,j) = G%mask2dT(i,j) * & - ((CS%T_Restore(i,j) - sfc_state%SST(i,j)) * rhoXcp * CS%Flux_const_T) + ((CS%T_Restore(i,j) - sfc_state%SST(i,j)) * US%degC_to_C*rhoXcp * CS%Flux_const_T) fluxes%vprec(i,j) = - (CS%Rho0*CS%Flux_const_S) * & (CS%S_Restore(i,j) - sfc_state%SSS(i,j)) / & (0.5*(sfc_state%SSS(i,j) + CS%S_Restore(i,j))) @@ -1188,7 +1188,7 @@ subroutine buoyancy_forcing_from_data_override(sfc_state, fluxes, day, dt, G, US !#CTRL# ! (observed) value [ppt]. !#CTRL# SSS_mean ! A (mean?) salinity about which to normalize local salinity !#CTRL# ! anomalies when calculating restorative precipitation anomalies [ppt]. - real :: rhoXcp ! The mean density times the heat capacity [Q R degC-1 ~> J m-3 degC-1]. + real :: rhoXcp ! The mean density times the heat capacity [Q R C-1 ~> J m-3 degC-1]. integer :: i, j, is, ie, js, je, isd, ied, jsd, jed call callTree_enter("buoyancy_forcing_from_data_override, MOM_surface_forcing.F90") @@ -1233,7 +1233,7 @@ subroutine buoyancy_forcing_from_data_override(sfc_state, fluxes, day, dt, G, US do j=js,je ; do i=is,ie if (G%mask2dT(i,j) > 0.0) then fluxes%heat_added(i,j) = G%mask2dT(i,j) * & - ((CS%T_Restore(i,j) - sfc_state%SST(i,j)) * rhoXcp * CS%Flux_const_T) + ((CS%T_Restore(i,j) - sfc_state%SST(i,j)) * US%degC_to_C*rhoXcp * CS%Flux_const_T) fluxes%vprec(i,j) = - (CS%Rho0*CS%Flux_const_S) * & (CS%S_Restore(i,j) - sfc_state%SSS(i,j)) / & (0.5*(sfc_state%SSS(i,j) + CS%S_Restore(i,j))) @@ -1433,7 +1433,7 @@ subroutine buoyancy_forcing_linear(sfc_state, fluxes, day, dt, G, US, CS) S_restore = CS%S_south + (CS%S_north-CS%S_south)*y if (G%mask2dT(i,j) > 0.0) then fluxes%heat_added(i,j) = G%mask2dT(i,j) * & - ((T_Restore - sfc_state%SST(i,j)) * ((CS%Rho0 * fluxes%C_p) * CS%Flux_const)) + ((T_Restore - sfc_state%SST(i,j)) * ((CS%Rho0 * US%degC_to_C*fluxes%C_p) * CS%Flux_const)) fluxes%vprec(i,j) = - (CS%Rho0*CS%Flux_const) * & (S_Restore - sfc_state%SSS(i,j)) / & (0.5*(sfc_state%SSS(i,j) + S_Restore)) diff --git a/config_src/drivers/solo_driver/user_surface_forcing.F90 b/config_src/drivers/solo_driver/user_surface_forcing.F90 index 0af6b126e1..eb1f78e3da 100644 --- a/config_src/drivers/solo_driver/user_surface_forcing.F90 +++ b/config_src/drivers/solo_driver/user_surface_forcing.F90 @@ -129,7 +129,7 @@ subroutine USER_buoyancy_forcing(sfc_state, fluxes, day, dt, G, US, CS) real :: Salin_restore ! The salinity that is being restored toward [ppt] real :: density_restore ! The potential density that is being restored ! toward [R ~> kg m-3]. - real :: rhoXcp ! The mean density times the heat capacity [Q R degC-1 ~> J m-3 degC-1]. + real :: rhoXcp ! The mean density times the heat capacity [Q R C-1 ~> J m-3 degC-1]. real :: buoy_rest_const ! A constant relating density anomalies to the ! restoring buoyancy flux [L2 m3 T-3 kg-1 ~> m5 s-3 kg-1]. @@ -206,7 +206,7 @@ subroutine USER_buoyancy_forcing(sfc_state, fluxes, day, dt, G, US, CS) Temp_restore = 0.0 Salin_restore = 0.0 - fluxes%heat_added(i,j) = (G%mask2dT(i,j) * (rhoXcp * CS%Flux_const)) * & + fluxes%heat_added(i,j) = (G%mask2dT(i,j) * (US%degC_to_C*rhoXcp * CS%Flux_const)) * & (Temp_restore - sfc_state%SST(i,j)) fluxes%vprec(i,j) = - (G%mask2dT(i,j) * (CS%Rho0*CS%Flux_const)) * & ((Salin_restore - sfc_state%SSS(i,j)) / (0.5 * (Salin_restore + sfc_state%SSS(i,j)))) diff --git a/src/core/MOM.F90 b/src/core/MOM.F90 index a1a3d8c382..f70fdae78f 100644 --- a/src/core/MOM.F90 +++ b/src/core/MOM.F90 @@ -3695,14 +3695,14 @@ subroutine get_MOM_state_elements(CS, G, GV, US, C_p, C_p_scaled, use_temp) type(unit_scale_type), optional, pointer :: US !< A dimensional unit scaling type real, optional, intent(out) :: C_p !< The heat capacity [J kg degC-1] real, optional, intent(out) :: C_p_scaled !< The heat capacity in scaled - !! units [Q degC-1 ~> J kg-1 degC-1] + !! units [Q C-1 ~> J kg-1 degC-1] logical, optional, intent(out) :: use_temp !< True if temperature is a state variable if (present(G)) G => CS%G_in if (present(GV)) GV => CS%GV if (present(US)) US => CS%US if (present(C_p)) C_p = CS%US%Q_to_J_kg*US%degC_to_C * CS%tv%C_p - if (present(C_p_scaled)) C_p_scaled = US%degC_to_C*CS%tv%C_p + if (present(C_p_scaled)) C_p_scaled = CS%tv%C_p if (present(use_temp)) use_temp = associated(CS%tv%T) end subroutine get_MOM_state_elements diff --git a/src/core/MOM_forcing_type.F90 b/src/core/MOM_forcing_type.F90 index 7b21093b7a..812361d3e1 100644 --- a/src/core/MOM_forcing_type.F90 +++ b/src/core/MOM_forcing_type.F90 @@ -188,8 +188,8 @@ module MOM_forcing_type !! type variable has not yet been initialized. logical :: gustless_accum_bug = .true. !< If true, use an incorrect expression in the time !! average of the gustless wind stress. - real :: C_p !< heat capacity of seawater [Q degC-1 ~> J kg-1 degC-1]. - !! C_p is is the same value as in thermovar_ptrs_type. + real :: C_p !< heat capacity of seawater [Q C-1 ~> J kg-1 degC-1]. + !! C_p is is the same value as in thermovar_ptrs_type. ! CFC-related arrays needed in the MOM_CFC_cap module real, pointer, dimension(:,:) :: & @@ -2719,17 +2719,17 @@ subroutine forcing_diagnostics(fluxes_in, sfc_state, G_in, US, time_end, diag, h if (handles%id_heat_content_surfwater > 0 .or. handles%id_total_heat_content_surfwater > 0) then do j=js,je ; do i=is,ie res(i,j) = 0.0 - if (associated(fluxes%heat_content_lrunoff)) res(i,j) = res(i,j) + fluxes%heat_content_lrunoff(i,j) - if (associated(fluxes%heat_content_frunoff)) res(i,j) = res(i,j) + fluxes%heat_content_frunoff(i,j) - if (associated(fluxes%heat_content_lprec)) res(i,j) = res(i,j) + fluxes%heat_content_lprec(i,j) - if (associated(fluxes%heat_content_fprec)) res(i,j) = res(i,j) + fluxes%heat_content_fprec(i,j) - if (associated(fluxes%heat_content_vprec)) res(i,j) = res(i,j) + fluxes%heat_content_vprec(i,j) - if (associated(fluxes%heat_content_cond)) res(i,j) = res(i,j) + fluxes%heat_content_cond(i,j) - if (mom_enthalpy) then - if (associated(fluxes%heat_content_massout)) res(i,j) = res(i,j) + fluxes%heat_content_massout(i,j) - else - if (associated(fluxes%heat_content_evap)) res(i,j) = res(i,j) + fluxes%heat_content_evap(i,j) - endif + if (associated(fluxes%heat_content_lrunoff)) res(i,j) = res(i,j) + fluxes%heat_content_lrunoff(i,j) + if (associated(fluxes%heat_content_frunoff)) res(i,j) = res(i,j) + fluxes%heat_content_frunoff(i,j) + if (associated(fluxes%heat_content_lprec)) res(i,j) = res(i,j) + fluxes%heat_content_lprec(i,j) + if (associated(fluxes%heat_content_fprec)) res(i,j) = res(i,j) + fluxes%heat_content_fprec(i,j) + if (associated(fluxes%heat_content_vprec)) res(i,j) = res(i,j) + fluxes%heat_content_vprec(i,j) + if (associated(fluxes%heat_content_cond)) res(i,j) = res(i,j) + fluxes%heat_content_cond(i,j) + if (mom_enthalpy) then + if (associated(fluxes%heat_content_massout)) res(i,j) = res(i,j) + fluxes%heat_content_massout(i,j) + else + if (associated(fluxes%heat_content_evap)) res(i,j) = res(i,j) + fluxes%heat_content_evap(i,j) + endif enddo ; enddo if (handles%id_heat_content_surfwater > 0) call post_data(handles%id_heat_content_surfwater, res, diag) if (handles%id_total_heat_content_surfwater > 0) then diff --git a/src/user/BFB_surface_forcing.F90 b/src/user/BFB_surface_forcing.F90 index 64fb31f68d..87b4d77758 100644 --- a/src/user/BFB_surface_forcing.F90 +++ b/src/user/BFB_surface_forcing.F90 @@ -125,7 +125,7 @@ subroutine BFB_buoyancy_forcing(sfc_state, fluxes, day, dt, G, US, CS) call MOM_error(FATAL, "User_buoyancy_surface_forcing: " // & "Temperature and salinity restoring used without modification." ) - rhoXcp = CS%Rho0 * fluxes%C_p + rhoXcp = CS%Rho0 * US%degC_to_C*fluxes%C_p do j=js,je ; do i=is,ie ! Set Temp_restore and Salin_restore to the temperature (in degC) and ! salinity (in ppt) that are being restored toward. diff --git a/src/user/SCM_CVMix_tests.F90 b/src/user/SCM_CVMix_tests.F90 index 64a834e062..f681231694 100644 --- a/src/user/SCM_CVMix_tests.F90 +++ b/src/user/SCM_CVMix_tests.F90 @@ -38,9 +38,9 @@ module SCM_CVMix_tests logical :: UseDiurnalSW !< True to use diurnal sw radiation real :: tau_x !< (Constant) Wind stress, X [R L Z T-2 ~> Pa] real :: tau_y !< (Constant) Wind stress, Y [R L Z T-2 ~> Pa] - real :: surf_HF !< (Constant) Heat flux [degC Z T-1 ~> m degC s-1] + real :: surf_HF !< (Constant) Heat flux [C Z T-1 ~> m degC s-1] real :: surf_evap !< (Constant) Evaporation rate [Z T-1 ~> m s-1] - real :: Max_sw !< maximum of diurnal sw radiation [degC Z T-1 ~> degC m s-1] + real :: Max_sw !< maximum of diurnal sw radiation [C Z T-1 ~> degC m s-1] real :: Rho0 !< reference density [R ~> kg m-3] end type @@ -166,7 +166,7 @@ subroutine SCM_CVMix_tests_surface_forcing_init(Time, G, param_file, CS) if (CS%UseHeatFlux) then call get_param(param_file, mdl, "SCM_HEAT_FLUX", CS%surf_HF, & "Constant surface heat flux used in the SCM CVMix test surface forcing.", & - units='m K/s', scale=US%m_to_Z*US%T_to_s, fail_if_missing=.true.) + units='m K/s', scale=US%m_to_Z*US%degC_to_C*US%T_to_s, fail_if_missing=.true.) endif if (CS%UseEvaporation) then call get_param(param_file, mdl, "SCM_EVAPORATION", CS%surf_evap, & @@ -176,7 +176,7 @@ subroutine SCM_CVMix_tests_surface_forcing_init(Time, G, param_file, CS) if (CS%UseDiurnalSW) then call get_param(param_file, mdl, "SCM_DIURNAL_SW_MAX", CS%Max_sw, & "Maximum diurnal sw radiation used in the SCM CVMix test surface forcing.", & - units='m K/s', scale=US%m_to_Z*US%T_to_s, fail_if_missing=.true.) + units='m K/s', scale=US%m_to_Z*US%degC_to_C*US%T_to_s, fail_if_missing=.true.) endif call get_param(param_file, mdl, "RHO_0", CS%Rho0, & "The mean ocean density used with BOUSSINESQ true to "//& @@ -242,8 +242,8 @@ subroutine SCM_CVMix_tests_buoyancy_forcing(sfc_state, fluxes, day, G, US, CS) IsdB = G%IsdB ; IedB = G%IedB ; JsdB = G%JsdB ; JedB = G%JedB if (CS%UseHeatFlux) then - ! Note CVMix test inputs give Heat flux in [m K/s] - ! therefore must convert to W/m2 by multiplying + ! Note CVMix test inputs give Heat flux in [Z C T-1 ~> m K/s] + ! therefore must convert to [Q R Z T-1 ~> W m-2] by multiplying ! by Rho0*Cp do J=Jsq,Jeq ; do i=is,ie fluxes%sens(i,J) = CS%surf_HF * CS%Rho0 * fluxes%C_p @@ -252,7 +252,7 @@ subroutine SCM_CVMix_tests_buoyancy_forcing(sfc_state, fluxes, day, G, US, CS) if (CS%UseEvaporation) then do J=Jsq,Jeq ; do i=is,ie - ! Note CVMix test inputs give evaporation in [m s-1] + ! Note CVMix test inputs give evaporation in [Z T-1 ~> m s-1] ! This therefore must be converted to mass flux in [R Z T-1 ~> kg m-2 s-1] ! by multiplying by density and some unit conversion factors. fluxes%evap(i,J) = CS%surf_evap * CS%Rho0 @@ -261,8 +261,8 @@ subroutine SCM_CVMix_tests_buoyancy_forcing(sfc_state, fluxes, day, G, US, CS) if (CS%UseDiurnalSW) then do J=Jsq,Jeq ; do i=is,ie - ! Note CVMix test inputs give max sw rad in [m degC/s] - ! therefore must convert to W/m2 by multiplying by Rho0*Cp + ! Note CVMix test inputs give max sw rad in [Z C T-1 ~> m degC s-1] + ! therefore must convert to [Q R Z T-1 ~> W m-2] by multiplying by Rho0*Cp ! Note diurnal cycle peaks at Noon. fluxes%sw(i,J) = CS%Max_sw * max(0.0, cos(2*PI*(time_type_to_real(DAY)/86400.0 - 0.5))) * CS%RHO0 * fluxes%C_p enddo ; enddo From 3893b80e80745784697c7ca84440d726225e0856 Mon Sep 17 00:00:00 2001 From: Robert Hallberg Date: Wed, 20 Jul 2022 10:19:17 -0400 Subject: [PATCH 13/40] +Rescale sfc_state%SST and sfc_state%SSS Rescaled the SST and SSS element of the surface type, usually sfc_state%SST and sfc_state%SSS, from units of [degC] and [ppt] to [degC ~> C] and [ppt ~> S], as well as a handful of other temperature and salinity variables related to the surface forcing (usually targets of restoring), and cancelled out a number of common US%C_to_degC or US%S_to_ppt conversion factors. Several unused variables were also removed, and a missing allocate and restart registration were added for the running-mean salinity in the (as yet unused) MOM_controlled_forcing module. All answers are bitwise identical, but there are changes to the rescaled units of two elements of a public type. --- .../FMS_cap/MOM_surface_forcing_gfdl.F90 | 51 ++++++++++--------- .../drivers/FMS_cap/ocean_model_MOM.F90 | 10 ++-- .../drivers/mct_cap/mom_ocean_model_mct.F90 | 10 ++-- .../mct_cap/mom_surface_forcing_mct.F90 | 32 +++++------- .../nuopc_cap/mom_ocean_model_nuopc.F90 | 10 ++-- .../nuopc_cap/mom_surface_forcing_nuopc.F90 | 40 +++++++-------- .../solo_driver/MESO_surface_forcing.F90 | 10 ++-- .../solo_driver/MOM_surface_forcing.F90 | 50 +++++++++--------- .../solo_driver/user_surface_forcing.F90 | 12 ++--- src/core/MOM.F90 | 32 ++++++------ src/core/MOM_checksum_packages.F90 | 6 ++- src/core/MOM_variables.F90 | 8 +-- src/diagnostics/MOM_diagnostics.F90 | 34 ++++++------- src/diagnostics/MOM_sum_output.F90 | 2 +- src/ice_shelf/MOM_ice_shelf.F90 | 43 ++++++++-------- src/tracer/MOM_CFC_cap.F90 | 6 +-- src/tracer/MOM_OCMIP2_CFC.F90 | 7 +-- src/tracer/MOM_generic_tracer.F90 | 26 ++++++---- src/tracer/MOM_tracer_flow_control.F90 | 5 +- src/user/BFB_surface_forcing.F90 | 32 ++++++------ src/user/MOM_controlled_forcing.F90 | 36 +++++++------ src/user/dumbbell_surface_forcing.F90 | 13 ++--- 22 files changed, 243 insertions(+), 232 deletions(-) diff --git a/config_src/drivers/FMS_cap/MOM_surface_forcing_gfdl.F90 b/config_src/drivers/FMS_cap/MOM_surface_forcing_gfdl.F90 index b844d7c9ce..aa5c10c958 100644 --- a/config_src/drivers/FMS_cap/MOM_surface_forcing_gfdl.F90 +++ b/config_src/drivers/FMS_cap/MOM_surface_forcing_gfdl.F90 @@ -114,7 +114,8 @@ module MOM_surface_forcing_gfdl real :: Flux_const_salt !< Piston velocity for surface salt restoring [Z T-1 ~> m s-1] real :: Flux_const_temp !< Piston velocity for surface temp restoring [Z T-1 ~> m s-1] logical :: trestore_SPEAR_ECDA !< If true, modify restoring data wrt local SSS - real :: SPEAR_dTf_dS !< The derivative of the freezing temperature with salinity. + real :: SPEAR_dTf_dS !< The derivative of the freezing temperature with + !! salinity [C S-1 ~> degC ppt-1]. logical :: salt_restore_as_sflux !< If true, SSS restore as salt flux instead of water flux logical :: adjust_net_srestore_to_zero !< Adjust srestore to zero (for both salt_flux or vprec) logical :: adjust_net_srestore_by_scaling !< Adjust srestore w/o moving zero contour @@ -125,8 +126,8 @@ module MOM_surface_forcing_gfdl !! for salinity restoring. real :: ice_salt_concentration !< Salt concentration for sea ice [kg/kg] logical :: mask_srestore_marginal_seas !< If true, then mask SSS restoring in marginal seas - real :: max_delta_srestore !< Maximum delta salinity used for restoring - real :: max_delta_trestore !< Maximum delta sst used for restoring + real :: max_delta_srestore !< Maximum delta salinity used for restoring [S ~> ppt] + real :: max_delta_trestore !< Maximum delta sst used for restoring [C ~> degC] real, pointer, dimension(:,:) :: basin_mask => NULL() !< Mask for surface salinity restoring by basin logical :: answers_2018 !< If true, use the order of arithmetic and expressions that recover !! the answers from the end of 2018. Otherwise, use a simpler @@ -228,11 +229,11 @@ subroutine convert_IOB_to_fluxes(IOB, fluxes, index_bounds, Time, valid_time, G, !! surface state of the ocean. real, dimension(SZI_(G),SZJ_(G)) :: & - data_restore, & ! The surface value toward which to restore [ppt] or [degC] - SST_anom, & ! Instantaneous sea surface temperature anomalies from a target value [degC] - SSS_anom, & ! Instantaneous sea surface salinity anomalies from a target value [ppt] + data_restore, & ! The surface value toward which to restore [S ~> ppt] or [C ~> degC] + SST_anom, & ! Instantaneous sea surface temperature anomalies from a target value [C ~> degC] + SSS_anom, & ! Instantaneous sea surface salinity anomalies from a target value [S ~> ppt] SSS_mean, & ! A (mean?) salinity about which to normalize local salinity - ! anomalies when calculating restorative precipitation anomalies [ppt] + ! anomalies when calculating restorative precipitation anomalies [S ~> ppt] net_FW, & ! The area integrated net freshwater flux into the ocean [kg s-1] net_FW2, & ! The net freshwater flux into the ocean [kg m-2 s-1] work_sum, & ! A 2-d array that is used as the work space for global sums [m2] or [kg s-1] @@ -242,8 +243,8 @@ subroutine convert_IOB_to_fluxes(IOB, fluxes, index_bounds, Time, valid_time, G, integer :: isd, ied, jsd, jed, IsdB, IedB, JsdB, JedB, isr, ier, jsr, jer integer :: isc_bnd, iec_bnd, jsc_bnd, jec_bnd - real :: delta_sss ! temporary storage for sss diff from restoring value [ppt] - real :: delta_sst ! temporary storage for sst diff from restoring value [degC] + real :: delta_sss ! temporary storage for sss diff from restoring value [S ~> ppt] + real :: delta_sst ! temporary storage for sst diff from restoring value [C ~> degC] real :: kg_m2_s_conversion ! A combination of unit conversion factors for rescaling ! mass fluxes [R Z s m2 kg-1 T-1 ~> 1] @@ -343,7 +344,7 @@ subroutine convert_IOB_to_fluxes(IOB, fluxes, index_bounds, Time, valid_time, G, ! Salinity restoring logic if (CS%restore_salt) then - call time_interp_external(CS%id_srestore, Time, data_restore) + call time_interp_external(CS%id_srestore, Time, data_restore, scale=US%ppt_to_S) ! open_ocn_mask indicates where to restore salinity (1 means restore, 0 does not) open_ocn_mask(:,:) = 1.0 if (CS%mask_srestore_under_ice) then ! Do not restore under sea-ice @@ -353,10 +354,10 @@ subroutine convert_IOB_to_fluxes(IOB, fluxes, index_bounds, Time, valid_time, G, endif if (CS%salt_restore_as_sflux) then do j=js,je ; do i=is,ie - delta_sss = data_restore(i,j)- sfc_state%SSS(i,j) - delta_sss = sign(1.0,delta_sss)*min(abs(delta_sss),CS%max_delta_srestore) - fluxes%salt_flux(i,j) = 1.e-3*G%mask2dT(i,j) * (CS%Rho0*CS%Flux_const_salt)* & - (CS%basin_mask(i,j)*open_ocn_mask(i,j)*CS%srestore_mask(i,j)) *delta_sss ! R Z T-1 ~> kg Salt m-2 s-1 + delta_sss = data_restore(i,j) - sfc_state%SSS(i,j) + delta_sss = sign(1.0,delta_sss) * min(abs(delta_sss), CS%max_delta_srestore) + fluxes%salt_flux(i,j) = 1.e-3*US%S_to_ppt*G%mask2dT(i,j) * (CS%Rho0*CS%Flux_const_salt)* & + (CS%basin_mask(i,j)*open_ocn_mask(i,j)*CS%srestore_mask(i,j)) * delta_sss ! R Z T-1 ~> kg Salt m-2 s-1 enddo ; enddo if (CS%adjust_net_srestore_to_zero) then if (CS%adjust_net_srestore_by_scaling) then @@ -376,7 +377,7 @@ subroutine convert_IOB_to_fluxes(IOB, fluxes, index_bounds, Time, valid_time, G, do j=js,je ; do i=is,ie if (G%mask2dT(i,j) > 0.0) then delta_sss = sfc_state%SSS(i,j) - data_restore(i,j) - delta_sss = sign(1.0,delta_sss)*min(abs(delta_sss),CS%max_delta_srestore) + delta_sss = sign(1.0,delta_sss) * min(abs(delta_sss), CS%max_delta_srestore) fluxes%vprec(i,j) = (CS%basin_mask(i,j)*open_ocn_mask(i,j)*CS%srestore_mask(i,j))* & (CS%Rho0*CS%Flux_const_salt) * & delta_sss / (0.5*(sfc_state%SSS(i,j) + data_restore(i,j))) @@ -401,20 +402,20 @@ subroutine convert_IOB_to_fluxes(IOB, fluxes, index_bounds, Time, valid_time, G, ! SST restoring logic if (CS%restore_temp) then - call time_interp_external(CS%id_trestore, Time, data_restore) + call time_interp_external(CS%id_trestore, Time, data_restore, scale=US%degC_to_C) if ( CS%trestore_SPEAR_ECDA ) then do j=js,je ; do i=is,ie - if (abs(data_restore(i,j)+1.8)<0.0001) then + if (abs(data_restore(i,j)+1.8*US%degC_to_C) < 0.0001*US%degC_to_C) then data_restore(i,j) = CS%SPEAR_dTf_dS*sfc_state%SSS(i,j) endif enddo ; enddo endif do j=js,je ; do i=is,ie - delta_sst = data_restore(i,j)- sfc_state%SST(i,j) - delta_sst = sign(1.0,delta_sst)*min(abs(delta_sst),CS%max_delta_trestore) + delta_sst = data_restore(i,j) - sfc_state%SST(i,j) + delta_sst = sign(1.0,delta_sst) * min(abs(delta_sst), CS%max_delta_trestore) fluxes%heat_added(i,j) = G%mask2dT(i,j) * CS%trestore_mask(i,j) * & - US%degC_to_C*rhoXcp * delta_sst * CS%Flux_const_temp ! [Q R Z T-1 ~> W m-2] + rhoXcp * delta_sst * CS%Flux_const_temp ! [Q R Z T-1 ~> W m-2] enddo ; enddo endif @@ -1404,9 +1405,8 @@ subroutine surface_forcing_init(Time, G, US, param_file, diag, CS, wind_stagger) "flux instead of as a freshwater flux.", default=.false.) call get_param(param_file, mdl, "MAX_DELTA_SRESTORE", CS%max_delta_srestore, & "The maximum salinity difference used in restoring terms.", & - units="PSU or g kg-1", default=999.0) - call get_param(param_file, mdl, "MASK_SRESTORE_UNDER_ICE", & - CS%mask_srestore_under_ice, & + units="PSU or g kg-1", default=999.0, scale=US%ppt_to_S) + call get_param(param_file, mdl, "MASK_SRESTORE_UNDER_ICE", CS%mask_srestore_under_ice, & "If true, disables SSS restoring under sea-ice based on a frazil "//& "criteria (SST<=Tf). Only used when RESTORE_SALINITY is True.", & default=.false.) @@ -1453,7 +1453,7 @@ subroutine surface_forcing_init(Time, G, US, param_file, diag, CS, wind_stagger) call get_param(param_file, mdl, "MAX_DELTA_TRESTORE", CS%max_delta_trestore, & "The maximum sst difference used in restoring terms.", & - units="degC ", default=999.0) + units="degC ", default=999.0, scale=US%degC_to_C) call get_param(param_file, mdl, "MASK_TRESTORE", CS%mask_trestore, & "If true, read a file (temp_restore_mask) containing "//& "a mask for SST restoring.", default=.false.) @@ -1466,7 +1466,8 @@ subroutine surface_forcing_init(Time, G, US, param_file, diag, CS, wind_stagger) endif call get_param(param_file, mdl, "SPEAR_DTFREEZE_DS", CS%SPEAR_dTf_dS, & "The derivative of the freezing temperature with salinity.", & - units="deg C PSU-1", default=-0.054, do_not_log=.not.CS%trestore_SPEAR_ECDA) + units="deg C PSU-1", default=-0.054, scale=US%degC_to_C*US%S_to_ppt, & + do_not_log=.not.CS%trestore_SPEAR_ECDA) ! Optionally read tidal amplitude from input file [Z T-1 ~> m s-1] on model grid. ! Otherwise use default tidal amplitude for bottom frictionally-generated diff --git a/config_src/drivers/FMS_cap/ocean_model_MOM.F90 b/config_src/drivers/FMS_cap/ocean_model_MOM.F90 index 5e1c512e98..b6bb14fc01 100644 --- a/config_src/drivers/FMS_cap/ocean_model_MOM.F90 +++ b/config_src/drivers/FMS_cap/ocean_model_MOM.F90 @@ -840,22 +840,22 @@ subroutine convert_state_to_ocean_type(sfc_state, Ocean_sfc, G, US, patm, press_ if (sfc_state%T_is_conT) then ! Convert the surface T from conservative T to potential T. do j=jsc_bnd,jec_bnd ; do i=isc_bnd,iec_bnd - Ocean_sfc%t_surf(i,j) = gsw_pt_from_ct(sfc_state%SSS(i+i0,j+j0), & - sfc_state%SST(i+i0,j+j0)) + CELSIUS_KELVIN_OFFSET + Ocean_sfc%t_surf(i,j) = gsw_pt_from_ct(US%S_to_ppt*sfc_state%SSS(i+i0,j+j0), & + US%C_to_degC*sfc_state%SST(i+i0,j+j0)) + CELSIUS_KELVIN_OFFSET enddo ; enddo else do j=jsc_bnd,jec_bnd ; do i=isc_bnd,iec_bnd - Ocean_sfc%t_surf(i,j) = sfc_state%SST(i+i0,j+j0) + CELSIUS_KELVIN_OFFSET + Ocean_sfc%t_surf(i,j) = US%C_to_degC*sfc_state%SST(i+i0,j+j0) + CELSIUS_KELVIN_OFFSET enddo ; enddo endif if (sfc_state%S_is_absS) then ! Convert the surface S from absolute salinity to practical salinity. do j=jsc_bnd,jec_bnd ; do i=isc_bnd,iec_bnd - Ocean_sfc%s_surf(i,j) = gsw_sp_from_sr(sfc_state%SSS(i+i0,j+j0)) + Ocean_sfc%s_surf(i,j) = gsw_sp_from_sr(US%S_to_ppt*sfc_state%SSS(i+i0,j+j0)) enddo ; enddo else do j=jsc_bnd,jec_bnd ; do i=isc_bnd,iec_bnd - Ocean_sfc%s_surf(i,j) = sfc_state%SSS(i+i0,j+j0) + Ocean_sfc%s_surf(i,j) = US%S_to_ppt*sfc_state%SSS(i+i0,j+j0) enddo ; enddo endif diff --git a/config_src/drivers/mct_cap/mom_ocean_model_mct.F90 b/config_src/drivers/mct_cap/mom_ocean_model_mct.F90 index f617dfa0f3..c2ee910dbb 100644 --- a/config_src/drivers/mct_cap/mom_ocean_model_mct.F90 +++ b/config_src/drivers/mct_cap/mom_ocean_model_mct.F90 @@ -871,22 +871,22 @@ subroutine convert_state_to_ocean_type(sfc_state, Ocean_sfc, G, US, patm, press_ if (sfc_state%T_is_conT) then ! Convert the surface T from conservative T to potential T. do j=jsc_bnd,jec_bnd ; do i=isc_bnd,iec_bnd - Ocean_sfc%t_surf(i,j) = gsw_pt_from_ct(sfc_state%SSS(i+i0,j+j0), & - sfc_state%SST(i+i0,j+j0)) + CELSIUS_KELVIN_OFFSET + Ocean_sfc%t_surf(i,j) = gsw_pt_from_ct(US%S_to_ppt*sfc_state%SSS(i+i0,j+j0), & + US%C_to_degC*sfc_state%SST(i+i0,j+j0)) + CELSIUS_KELVIN_OFFSET enddo ; enddo else do j=jsc_bnd,jec_bnd ; do i=isc_bnd,iec_bnd - Ocean_sfc%t_surf(i,j) = sfc_state%SST(i+i0,j+j0) + CELSIUS_KELVIN_OFFSET + Ocean_sfc%t_surf(i,j) = US%C_to_degC*sfc_state%SST(i+i0,j+j0) + CELSIUS_KELVIN_OFFSET enddo ; enddo endif if (sfc_state%S_is_absS) then ! Convert the surface S from absolute salinity to practical salinity. do j=jsc_bnd,jec_bnd ; do i=isc_bnd,iec_bnd - Ocean_sfc%s_surf(i,j) = gsw_sp_from_sr(sfc_state%SSS(i+i0,j+j0)) + Ocean_sfc%s_surf(i,j) = gsw_sp_from_sr(US%S_to_ppt*sfc_state%SSS(i+i0,j+j0)) enddo ; enddo else do j=jsc_bnd,jec_bnd ; do i=isc_bnd,iec_bnd - Ocean_sfc%s_surf(i,j) = sfc_state%SSS(i+i0,j+j0) + Ocean_sfc%s_surf(i,j) = US%S_to_ppt*sfc_state%SSS(i+i0,j+j0) enddo ; enddo endif diff --git a/config_src/drivers/mct_cap/mom_surface_forcing_mct.F90 b/config_src/drivers/mct_cap/mom_surface_forcing_mct.F90 index 04cb370628..259aa8a678 100644 --- a/config_src/drivers/mct_cap/mom_surface_forcing_mct.F90 +++ b/config_src/drivers/mct_cap/mom_surface_forcing_mct.F90 @@ -113,8 +113,8 @@ module MOM_surface_forcing_mct real :: ice_salt_concentration !< salt concentration for sea ice [kg/kg] logical :: mask_srestore_marginal_seas !< if true, then mask SSS restoring in marginal seas - real :: max_delta_srestore !< maximum delta salinity used for restoring - real :: max_delta_trestore !< maximum delta sst used for restoring + real :: max_delta_srestore !< maximum delta salinity used for restoring [S ~> ppt] + real :: max_delta_trestore !< maximum delta sst used for restoring [C ~> degC] real, pointer, dimension(:,:) :: basin_mask => NULL() !< mask for SSS restoring by basin logical :: fix_ustar_gustless_bug !< If true correct a bug in the time-averaging of the !! gustless wind friction velocity. @@ -218,11 +218,7 @@ subroutine convert_IOB_to_fluxes(IOB, fluxes, index_bounds, Time, valid_time, G, ! local variables real, dimension(SZI_(G),SZJ_(G)) :: & - data_restore, & !< The surface value toward which to restore [g/kg or degC] - SST_anom, & !< Instantaneous sea surface temperature anomalies from a target value [deg C] - SSS_anom, & !< Instantaneous sea surface salinity anomalies from a target value [g/kg] - SSS_mean, & !< A (mean?) salinity about which to normalize local salinity - !! anomalies when calculating restorative precipitation anomalies [g/kg] + data_restore, & !< The surface value toward which to restore [S ~> ppt] or [C ~> degC] PmE_adj, & !< The adjustment to PminusE that will cause the salinity !! to be restored toward its target value [kg/(m^2 * s)] net_FW, & !< The area integrated net freshwater flux into the ocean [kg/s] @@ -239,8 +235,8 @@ subroutine convert_IOB_to_fluxes(IOB, fluxes, index_bounds, Time, valid_time, G, !! is present, or false (no restoring) otherwise. logical :: restore_sst !< local copy of the argument restore_temp, if it !! is present, or false (no restoring) otherwise. - real :: delta_sss !< temporary storage for sss diff from restoring value - real :: delta_sst !< temporary storage for sst diff from restoring value + real :: delta_sss !< temporary storage for sss diff from restoring value [S ~> ppt] + real :: delta_sst !< temporary storage for sst diff from restoring value [C ~> degC] real :: kg_m2_s_conversion !< A combination of unit conversion factors for rescaling !! mass fluxes [R Z s m2 kg-1 T-1 ~> 1]. @@ -352,19 +348,19 @@ subroutine convert_IOB_to_fluxes(IOB, fluxes, index_bounds, Time, valid_time, G, ! Salinity restoring logic if (restore_salinity) then - call time_interp_external(CS%id_srestore,Time,data_restore) + call time_interp_external(CS%id_srestore, Time, data_restore, scale=US%ppt_to_S) ! open_ocn_mask indicates where to restore salinity (1 means restore, 0 does not) open_ocn_mask(:,:) = 1.0 if (CS%mask_srestore_under_ice) then ! Do not restore under sea-ice do j=js,je ; do i=is,ie - if (sfc_state%SST(i,j) <= -0.0539*sfc_state%SSS(i,j)) open_ocn_mask(i,j)=0.0 + if (sfc_state%SST(i,j) <= -0.0539*US%degC_to_C*US%S_to_ppt*sfc_state%SSS(i,j)) open_ocn_mask(i,j)=0.0 enddo ; enddo endif if (CS%salt_restore_as_sflux) then do j=js,je ; do i=is,ie - delta_sss = data_restore(i,j)- sfc_state%SSS(i,j) + delta_sss = data_restore(i,j) - sfc_state%SSS(i,j) delta_sss = sign(1.0,delta_sss)*min(abs(delta_sss),CS%max_delta_srestore) - fluxes%salt_flux(i,j) = 1.e-3*G%mask2dT(i,j) * (CS%Rho0*CS%Flux_const)* & + fluxes%salt_flux(i,j) = 1.e-3*US%S_to_ppt*G%mask2dT(i,j) * (CS%Rho0*CS%Flux_const)* & (CS%basin_mask(i,j)*open_ocn_mask(i,j)*CS%srestore_mask(i,j)) *delta_sss ! R Z T-1 ~> kg Salt m-2 s-1 enddo ; enddo if (CS%adjust_net_srestore_to_zero) then @@ -409,12 +405,12 @@ subroutine convert_IOB_to_fluxes(IOB, fluxes, index_bounds, Time, valid_time, G, ! SST restoring logic if (restore_sst) then - call time_interp_external(CS%id_trestore,Time,data_restore) + call time_interp_external(CS%id_trestore, Time, data_restore, scale=US%degC_to_C) do j=js,je ; do i=is,ie - delta_sst = data_restore(i,j)- sfc_state%SST(i,j) + delta_sst = data_restore(i,j) - sfc_state%SST(i,j) delta_sst = sign(1.0,delta_sst)*min(abs(delta_sst),CS%max_delta_trestore) fluxes%heat_added(i,j) = G%mask2dT(i,j) * CS%trestore_mask(i,j) * & - (CS%Rho0*US%degC_to_C*fluxes%C_p) * delta_sst * CS%Flux_const ! W m-2 + (CS%Rho0*fluxes%C_p) * delta_sst * CS%Flux_const ! W m-2 enddo ; enddo endif @@ -1143,7 +1139,7 @@ subroutine surface_forcing_init(Time, G, US, param_file, diag, CS, restore_salt, "flux instead of as a freshwater flux.", default=.false.) call get_param(param_file, mdl, "MAX_DELTA_SRESTORE", CS%max_delta_srestore, & "The maximum salinity difference used in restoring terms.", & - units="PSU or g kg-1", default=999.0) + units="PSU or g kg-1", default=999.0, scale=US%ppt_to_S) call get_param(param_file, mdl, "MASK_SRESTORE_UNDER_ICE", & CS%mask_srestore_under_ice, & "If true, disables SSS restoring under sea-ice based on a frazil "//& @@ -1185,7 +1181,7 @@ subroutine surface_forcing_init(Time, G, US, param_file, diag, CS, restore_salt, call get_param(param_file, mdl, "MAX_DELTA_TRESTORE", CS%max_delta_trestore, & "The maximum sst difference used in restoring terms.", & - units="degC ", default=999.0) + units="degC ", default=999.0, scale=US%degC_to_C) call get_param(param_file, mdl, "MASK_TRESTORE", CS%mask_trestore, & "If true, read a file (temp_restore_mask) containing "//& diff --git a/config_src/drivers/nuopc_cap/mom_ocean_model_nuopc.F90 b/config_src/drivers/nuopc_cap/mom_ocean_model_nuopc.F90 index a51767946c..1fb35b31a6 100644 --- a/config_src/drivers/nuopc_cap/mom_ocean_model_nuopc.F90 +++ b/config_src/drivers/nuopc_cap/mom_ocean_model_nuopc.F90 @@ -916,22 +916,22 @@ subroutine convert_state_to_ocean_type(sfc_state, Ocean_sfc, G, US, patm, press_ if (sfc_state%T_is_conT) then ! Convert the surface T from conservative T to potential T. do j=jsc_bnd,jec_bnd ; do i=isc_bnd,iec_bnd - Ocean_sfc%t_surf(i,j) = gsw_pt_from_ct(sfc_state%SSS(i+i0,j+j0), & - sfc_state%SST(i+i0,j+j0)) + CELSIUS_KELVIN_OFFSET + Ocean_sfc%t_surf(i,j) = gsw_pt_from_ct(US%S_to_ppt*sfc_state%SSS(i+i0,j+j0), & + US%C_to_degC*sfc_state%SST(i+i0,j+j0)) + CELSIUS_KELVIN_OFFSET enddo ; enddo else do j=jsc_bnd,jec_bnd ; do i=isc_bnd,iec_bnd - Ocean_sfc%t_surf(i,j) = sfc_state%SST(i+i0,j+j0) + CELSIUS_KELVIN_OFFSET + Ocean_sfc%t_surf(i,j) = US%C_to_degC*sfc_state%SST(i+i0,j+j0) + CELSIUS_KELVIN_OFFSET enddo ; enddo endif if (sfc_state%S_is_absS) then ! Convert the surface S from absolute salinity to practical salinity. do j=jsc_bnd,jec_bnd ; do i=isc_bnd,iec_bnd - Ocean_sfc%s_surf(i,j) = gsw_sp_from_sr(sfc_state%SSS(i+i0,j+j0)) + Ocean_sfc%s_surf(i,j) = gsw_sp_from_sr(US%S_to_ppt*sfc_state%SSS(i+i0,j+j0)) enddo ; enddo else do j=jsc_bnd,jec_bnd ; do i=isc_bnd,iec_bnd - Ocean_sfc%s_surf(i,j) = sfc_state%SSS(i+i0,j+j0) + Ocean_sfc%s_surf(i,j) = US%S_to_ppt*sfc_state%SSS(i+i0,j+j0) enddo ; enddo endif diff --git a/config_src/drivers/nuopc_cap/mom_surface_forcing_nuopc.F90 b/config_src/drivers/nuopc_cap/mom_surface_forcing_nuopc.F90 index 0e0e384e57..7e08f83530 100644 --- a/config_src/drivers/nuopc_cap/mom_surface_forcing_nuopc.F90 +++ b/config_src/drivers/nuopc_cap/mom_surface_forcing_nuopc.F90 @@ -121,8 +121,8 @@ module MOM_surface_forcing_nuopc !! criteria for salinity restoring. real :: ice_salt_concentration !< salt concentration for sea ice [kg/kg] logical :: mask_srestore_marginal_seas !< if true, then mask SSS restoring in marginal seas - real :: max_delta_srestore !< maximum delta salinity used for restoring - real :: max_delta_trestore !< maximum delta sst used for restoring + real :: max_delta_srestore !< maximum delta salinity used for restoring [S ~> ppt] + real :: max_delta_trestore !< maximum delta sst used for restoring [C ~> degC] real, pointer, dimension(:,:) :: basin_mask => NULL() !< mask for SSS restoring by basin logical :: fix_ustar_gustless_bug !< If true correct a bug in the time-averaging of the !! gustless wind friction velocity. @@ -247,11 +247,7 @@ subroutine convert_IOB_to_fluxes(IOB, fluxes, index_bounds, Time, valid_time, G, real, dimension(SZI_(G),SZJ_(G)) :: & cfc11_atm, & !< CFC11 concentration in the atmopshere [???????] cfc12_atm, & !< CFC11 concentration in the atmopshere [???????] - data_restore, & !< The surface value toward which to restore [g/kg or degC] - SST_anom, & !< Instantaneous sea surface temperature anomalies from a target value [deg C] - SSS_anom, & !< Instantaneous sea surface salinity anomalies from a target value [g/kg] - SSS_mean, & !< A (mean?) salinity about which to normalize local salinity - !! anomalies when calculating restorative precipitation anomalies [g/kg] + data_restore, & !< The surface value toward which to restore [S ~> ppt] or [C ~> degC] PmE_adj, & !< The adjustment to PminusE that will cause the salinity !! to be restored toward its target value [kg/(m^2 * s)] net_FW, & !< The area integrated net freshwater flux into the ocean [kg/s] @@ -268,8 +264,8 @@ subroutine convert_IOB_to_fluxes(IOB, fluxes, index_bounds, Time, valid_time, G, !! is present, or false (no restoring) otherwise. logical :: restore_sst !< local copy of the argument restore_temp, if it !! is present, or false (no restoring) otherwise. - real :: delta_sss !< temporary storage for sss diff from restoring value - real :: delta_sst !< temporary storage for sst diff from restoring value + real :: delta_sss !< temporary storage for sss diff from restoring value [S ~> ppt] + real :: delta_sst !< temporary storage for sst diff from restoring value [C ~> degC] real :: kg_m2_s_conversion !< A combination of unit conversion factors for rescaling !! mass fluxes [R Z s m2 kg-1 T-1 ~> 1]. @@ -381,19 +377,19 @@ subroutine convert_IOB_to_fluxes(IOB, fluxes, index_bounds, Time, valid_time, G, ! Salinity restoring logic if (restore_salinity) then - call time_interp_external(CS%id_srestore,Time,data_restore) + call time_interp_external(CS%id_srestore, Time, data_restore, scale=US%ppt_to_S) ! open_ocn_mask indicates where to restore salinity (1 means restore, 0 does not) open_ocn_mask(:,:) = 1.0 if (CS%mask_srestore_under_ice) then ! Do not restore under sea-ice do j=js,je ; do i=is,ie - if (sfc_state%SST(i,j) <= -0.0539*sfc_state%SSS(i,j)) open_ocn_mask(i,j)=0.0 + if (sfc_state%SST(i,j) <= -0.0539*US%degC_to_C*US%S_to_ppt*sfc_state%SSS(i,j)) open_ocn_mask(i,j)=0.0 enddo ; enddo endif if (CS%salt_restore_as_sflux) then do j=js,je ; do i=is,ie - delta_sss = data_restore(i,j)- sfc_state%SSS(i,j) + delta_sss = data_restore(i,j) - sfc_state%SSS(i,j) delta_sss = sign(1.0,delta_sss)*min(abs(delta_sss),CS%max_delta_srestore) - fluxes%salt_flux(i,j) = 1.e-3*G%mask2dT(i,j) * (CS%Rho0*CS%Flux_const)* & + fluxes%salt_flux(i,j) = 1.e-3*US%S_to_ppt*G%mask2dT(i,j) * (CS%Rho0*CS%Flux_const)* & (CS%basin_mask(i,j)*open_ocn_mask(i,j)*CS%srestore_mask(i,j)) *delta_sss ! kg Salt m-2 s-1 enddo ; enddo if (CS%adjust_net_srestore_to_zero) then @@ -438,12 +434,12 @@ subroutine convert_IOB_to_fluxes(IOB, fluxes, index_bounds, Time, valid_time, G, ! SST restoring logic if (restore_sst) then - call time_interp_external(CS%id_trestore,Time,data_restore) + call time_interp_external(CS%id_trestore, Time, data_restore, scale=US%degC_to_C) do j=js,je ; do i=is,ie - delta_sst = data_restore(i,j)- sfc_state%SST(i,j) + delta_sst = data_restore(i,j) - sfc_state%SST(i,j) delta_sst = sign(1.0,delta_sst)*min(abs(delta_sst),CS%max_delta_trestore) fluxes%heat_added(i,j) = G%mask2dT(i,j) * CS%trestore_mask(i,j) * & - (CS%Rho0*US%degC_to_C*fluxes%C_p) * delta_sst * CS%Flux_const ! Q R Z T-1 ~> W m-2 + (CS%Rho0*fluxes%C_p) * delta_sst * CS%Flux_const ! Q R Z T-1 ~> W m-2 enddo ; enddo endif @@ -671,13 +667,13 @@ subroutine convert_IOB_to_forces(IOB, forces, index_bounds, Time, G, US, CS) ! local variables real, dimension(SZIB_(G),SZJB_(G)) :: & - taux_at_q, & !< Zonal wind stresses at q points [Pa] - tauy_at_q !< Meridional wind stresses at q points [Pa] + taux_at_q, & !< Zonal wind stresses at q points [R Z L T-2 ~> Pa] + tauy_at_q !< Meridional wind stresses at q points [R Z L T-2 ~> Pa] real, dimension(SZI_(G),SZJ_(G)) :: & rigidity_at_h, & !< Ice rigidity at tracer points [L4 Z-1 T-1 ~> m3 s-1] - taux_at_h, & !< Zonal wind stresses at h points [Pa] - tauy_at_h !< Meridional wind stresses at h points [Pa] + taux_at_h, & !< Zonal wind stresses at h points [R Z L T-2 ~> Pa] + tauy_at_h !< Meridional wind stresses at h points [R Z L T-2 ~> Pa] real :: gustiness !< unresolved gustiness that contributes to ustar [R Z L T-2 ~> Pa] real :: Irho0 !< inverse of the mean density in [Z L-1 R-1 ~> m3 kg-1] @@ -1239,7 +1235,7 @@ subroutine surface_forcing_init(Time, G, US, param_file, diag, CS, restore_salt, "flux instead of as a freshwater flux.", default=.false.) call get_param(param_file, mdl, "MAX_DELTA_SRESTORE", CS%max_delta_srestore, & "The maximum salinity difference used in restoring terms.", & - units="PSU or g kg-1", default=999.0) + units="PSU or g kg-1", default=999.0, scale=US%ppt_to_S) call get_param(param_file, mdl, "MASK_SRESTORE_UNDER_ICE", & CS%mask_srestore_under_ice, & "If true, disables SSS restoring under sea-ice based on a frazil "//& @@ -1281,7 +1277,7 @@ subroutine surface_forcing_init(Time, G, US, param_file, diag, CS, restore_salt, call get_param(param_file, mdl, "MAX_DELTA_TRESTORE", CS%max_delta_trestore, & "The maximum sst difference used in restoring terms.", & - units="degC ", default=999.0) + units="degC ", default=999.0, scale=US%degC_to_C) call get_param(param_file, mdl, "MASK_TRESTORE", CS%mask_trestore, & "If true, read a file (temp_restore_mask) containing "//& "a mask for SST restoring.", default=.false.) diff --git a/config_src/drivers/solo_driver/MESO_surface_forcing.F90 b/config_src/drivers/solo_driver/MESO_surface_forcing.F90 index 43b4af6c55..18c3c33fdb 100644 --- a/config_src/drivers/solo_driver/MESO_surface_forcing.F90 +++ b/config_src/drivers/solo_driver/MESO_surface_forcing.F90 @@ -33,8 +33,8 @@ module MESO_surface_forcing real :: gust_const !< A constant unresolved background gustiness !! that contributes to ustar [R L Z T-1 ~> Pa] real, dimension(:,:), pointer :: & - T_Restore(:,:) => NULL(), & !< The temperature to restore the SST toward [degC]. - S_Restore(:,:) => NULL(), & !< The salinity to restore the sea surface salnity toward [ppt] + T_Restore(:,:) => NULL(), & !< The temperature to restore the SST toward [C ~> degC]. + S_Restore(:,:) => NULL(), & !< The salinity to restore the sea surface salnity toward [S ~> ppt] PmE(:,:) => NULL(), & !< The prescribed precip minus evap [Z T-1 ~> m s-1]. Solar(:,:) => NULL() !< The shortwave forcing into the ocean [Q R Z T-1 ~> W m-2]. real, dimension(:,:), pointer :: Heat(:,:) => NULL() !< The prescribed longwave, latent and sensible @@ -120,9 +120,9 @@ subroutine MESO_buoyancy_forcing(sfc_state, fluxes, day, dt, G, US, CS) call safe_alloc_ptr(CS%Solar, isd, ied, jsd, jed) call MOM_read_data(trim(CS%inputdir)//trim(CS%SSTrestore_file), "SST", & - CS%T_Restore(:,:), G%Domain) + CS%T_Restore(:,:), G%Domain, scale=US%degC_to_C) call MOM_read_data(trim(CS%inputdir)//trim(CS%salinityrestore_file), "SAL", & - CS%S_Restore(:,:), G%Domain) + CS%S_Restore(:,:), G%Domain, scale=US%ppt_to_S) call MOM_read_data(trim(CS%inputdir)//trim(CS%heating_file), "Heat", & CS%Heat(:,:), G%Domain, scale=US%W_m2_to_QRZ_T) call MOM_read_data(trim(CS%inputdir)//trim(CS%PmE_file), "PmE", & @@ -172,7 +172,7 @@ subroutine MESO_buoyancy_forcing(sfc_state, fluxes, day, dt, G, US, CS) ! salinity (in ppt or PSU) that are being restored toward. if (G%mask2dT(i,j) > 0.0) then fluxes%heat_added(i,j) = G%mask2dT(i,j) * & - ((CS%T_Restore(i,j) - sfc_state%SST(i,j)) * US%degC_to_C*rhoXcp * CS%Flux_const) + ((CS%T_Restore(i,j) - sfc_state%SST(i,j)) * rhoXcp * CS%Flux_const) fluxes%vprec(i,j) = - (CS%Rho0 * CS%Flux_const) * & (CS%S_Restore(i,j) - sfc_state%SSS(i,j)) / & (0.5*(sfc_state%SSS(i,j) + CS%S_Restore(i,j))) diff --git a/config_src/drivers/solo_driver/MOM_surface_forcing.F90 b/config_src/drivers/solo_driver/MOM_surface_forcing.F90 index 617e7993bb..58865888ca 100644 --- a/config_src/drivers/solo_driver/MOM_surface_forcing.F90 +++ b/config_src/drivers/solo_driver/MOM_surface_forcing.F90 @@ -93,8 +93,8 @@ module MOM_surface_forcing real, pointer :: gust(:,:) => NULL() !< spatially varying unresolved background gustiness [R L Z T-1 ~> Pa] !! gust is used when read_gust_2d is true. - real, pointer :: T_Restore(:,:) => NULL() !< temperature to damp (restore) the SST to [degC] - real, pointer :: S_Restore(:,:) => NULL() !< salinity to damp (restore) the SSS [ppt] + real, pointer :: T_Restore(:,:) => NULL() !< temperature to damp (restore) the SST to [C ~> degC] + real, pointer :: S_Restore(:,:) => NULL() !< salinity to damp (restore) the SSS [S ~> ppt] real, pointer :: Dens_Restore(:,:) => NULL() !< density to damp (restore) surface density [R ~> kg m-3] integer :: buoy_last_lev_read = -1 !< The last time level read from buoyancy input files @@ -115,10 +115,10 @@ module MOM_surface_forcing real :: scurves_ydata(20) = 90. !< Latitudes of scurve nodes [degreesN] real :: scurves_taux(20) = 0. !< Zonal wind stress values at scurve nodes [R L Z T-1 ~> Pa] - real :: T_north !< Target temperatures at north used in buoyancy_forcing_linear [degC] - real :: T_south !< Target temperatures at south used in buoyancy_forcing_linear [degC] - real :: S_north !< Target salinity at north used in buoyancy_forcing_linear [ppt] - real :: S_south !< Target salinity at south used in buoyancy_forcing_linear [ppt] + real :: T_north !< Target temperatures at north used in buoyancy_forcing_linear [C ~> degC] + real :: T_south !< Target temperatures at south used in buoyancy_forcing_linear [C ~> degC] + real :: S_north !< Target salinity at north used in buoyancy_forcing_linear [S ~> ppt] + real :: S_south !< Target salinity at south used in buoyancy_forcing_linear [S ~> ppt] logical :: first_call_set_forcing = .true. !< True until after the first call to set_forcing logical :: archaic_OMIP_file = .true. !< If true use the variable names and data fields from @@ -910,11 +910,11 @@ subroutine buoyancy_forcing_from_files(sfc_state, fluxes, day, dt, G, US, CS) ! [R Z T-1 ~> kg m-2 s-1] !#CTRL# real, dimension(SZI_(G),SZJ_(G)) :: & !#CTRL# SST_anom, & ! Instantaneous sea surface temperature anomalies from a -!#CTRL# ! target (observed) value [degC]. +!#CTRL# ! target (observed) value [C ~> degC]. !#CTRL# SSS_anom, & ! Instantaneous sea surface salinity anomalies from a target -!#CTRL# ! (observed) value [ppt]. +!#CTRL# ! (observed) value [S ~> ppt]. !#CTRL# SSS_mean ! A (mean?) salinity about which to normalize local salinity -!#CTRL# ! anomalies when calculating restorative precipitation anomalies [ppt]. +!#CTRL# ! anomalies when calculating restorative precipitation anomalies [S ~> ppt]. real :: rhoXcp ! reference density times heat capacity [Q R C-1 ~> J m-3 degC-1] @@ -1081,7 +1081,7 @@ subroutine buoyancy_forcing_from_files(sfc_state, fluxes, day, dt, G, US, CS) case default ; time_lev = 1 end select call MOM_read_data(CS%SSTrestore_file, CS%SST_restore_var, & - CS%T_Restore(:,:), G%Domain, timelevel=time_lev) + CS%T_Restore(:,:), G%Domain, timelevel=time_lev, scale=US%degC_to_C) CS%SST_last_lev = time_lev select case (CS%SSS_nlev) @@ -1090,7 +1090,7 @@ subroutine buoyancy_forcing_from_files(sfc_state, fluxes, day, dt, G, US, CS) case default ; time_lev = 1 end select call MOM_read_data(CS%salinityrestore_file, CS%SSS_restore_var, & - CS%S_Restore(:,:), G%Domain, timelevel=time_lev) + CS%S_Restore(:,:), G%Domain, timelevel=time_lev, scale=US%ppt_to_S) CS%SSS_last_lev = time_lev endif CS%buoy_last_lev_read = time_lev_daily @@ -1127,7 +1127,7 @@ subroutine buoyancy_forcing_from_files(sfc_state, fluxes, day, dt, G, US, CS) do j=js,je ; do i=is,ie if (G%mask2dT(i,j) > 0.0) then fluxes%heat_added(i,j) = G%mask2dT(i,j) * & - ((CS%T_Restore(i,j) - sfc_state%SST(i,j)) * US%degC_to_C*rhoXcp * CS%Flux_const_T) + ((CS%T_Restore(i,j) - sfc_state%SST(i,j)) * rhoXcp * CS%Flux_const_T) fluxes%vprec(i,j) = - (CS%Rho0*CS%Flux_const_S) * & (CS%S_Restore(i,j) - sfc_state%SSS(i,j)) / & (0.5*(sfc_state%SSS(i,j) + CS%S_Restore(i,j))) @@ -1183,11 +1183,11 @@ subroutine buoyancy_forcing_from_data_override(sfc_state, fluxes, day, dt, G, US ! Local variables !#CTRL# real, dimension(SZI_(G),SZJ_(G)) :: & !#CTRL# SST_anom, & ! Instantaneous sea surface temperature anomalies from a -!#CTRL# ! target (observed) value [degC]. +!#CTRL# ! target (observed) value [C ~> degC]. !#CTRL# SSS_anom, & ! Instantaneous sea surface salinity anomalies from a target -!#CTRL# ! (observed) value [ppt]. +!#CTRL# ! (observed) value [S ~> ppt]. !#CTRL# SSS_mean ! A (mean?) salinity about which to normalize local salinity -!#CTRL# ! anomalies when calculating restorative precipitation anomalies [ppt]. +!#CTRL# ! anomalies when calculating restorative precipitation anomalies [S ~> ppt]. real :: rhoXcp ! The mean density times the heat capacity [Q R C-1 ~> J m-3 degC-1]. integer :: i, j, is, ie, js, je, isd, ied, jsd, jed @@ -1223,8 +1223,8 @@ subroutine buoyancy_forcing_from_data_override(sfc_state, fluxes, day, dt, G, US ! Read the SST and SSS fields for damping. if (CS%restorebuoy) then !#CTRL# .or. associated(CS%ctrl_forcing_CSp)) then - call data_override(G%Domain, 'SST_restore', CS%T_restore, day) - call data_override(G%Domain, 'SSS_restore', CS%S_restore, day) + call data_override(G%Domain, 'SST_restore', CS%T_restore, day, scale=US%degC_to_C) + call data_override(G%Domain, 'SSS_restore', CS%S_restore, day, scale=US%ppt_to_S) endif ! restoring boundary fluxes @@ -1233,7 +1233,7 @@ subroutine buoyancy_forcing_from_data_override(sfc_state, fluxes, day, dt, G, US do j=js,je ; do i=is,ie if (G%mask2dT(i,j) > 0.0) then fluxes%heat_added(i,j) = G%mask2dT(i,j) * & - ((CS%T_Restore(i,j) - sfc_state%SST(i,j)) * US%degC_to_C*rhoXcp * CS%Flux_const_T) + ((CS%T_Restore(i,j) - sfc_state%SST(i,j)) * rhoXcp * CS%Flux_const_T) fluxes%vprec(i,j) = - (CS%Rho0*CS%Flux_const_S) * & (CS%S_Restore(i,j) - sfc_state%SSS(i,j)) / & (0.5*(sfc_state%SSS(i,j) + CS%S_Restore(i,j))) @@ -1395,8 +1395,8 @@ subroutine buoyancy_forcing_linear(sfc_state, fluxes, day, dt, G, US, CS) !! a previous surface_forcing_init call ! Local variables real :: y ! The latitude relative to the south normalized by the domain extent [nondim] - real :: T_restore ! The temperature towards which to restore [degC] - real :: S_restore ! The salinity towards which to restore [ppt] + real :: T_restore ! The temperature towards which to restore [C ~> degC] + real :: S_restore ! The salinity towards which to restore [S ~> ppt] integer :: i, j, is, ie, js, je call callTree_enter("buoyancy_forcing_linear, MOM_surface_forcing.F90") @@ -1433,7 +1433,7 @@ subroutine buoyancy_forcing_linear(sfc_state, fluxes, day, dt, G, US, CS) S_restore = CS%S_south + (CS%S_north-CS%S_south)*y if (G%mask2dT(i,j) > 0.0) then fluxes%heat_added(i,j) = G%mask2dT(i,j) * & - ((T_Restore - sfc_state%SST(i,j)) * ((CS%Rho0 * US%degC_to_C*fluxes%C_p) * CS%Flux_const)) + ((T_Restore - sfc_state%SST(i,j)) * ((CS%Rho0 * fluxes%C_p) * CS%Flux_const)) fluxes%vprec(i,j) = - (CS%Rho0*CS%Flux_const) * & (S_Restore - sfc_state%SSS(i,j)) / & (0.5*(sfc_state%SSS(i,j) + S_Restore)) @@ -1807,19 +1807,19 @@ subroutine surface_forcing_init(Time, G, US, param_file, diag, CS, tracer_flow_C call get_param(param_file, mdl, "SST_NORTH", CS%T_north, & "With buoy_config linear, the sea surface temperature "//& "at the northern end of the domain toward which to "//& - "to restore.", units="deg C", default=0.0) + "to restore.", units="deg C", default=0.0, scale=US%degC_to_C) call get_param(param_file, mdl, "SST_SOUTH", CS%T_south, & "With buoy_config linear, the sea surface temperature "//& "at the southern end of the domain toward which to "//& - "to restore.", units="deg C", default=0.0) + "to restore.", units="deg C", default=0.0, scale=US%degC_to_C) call get_param(param_file, mdl, "SSS_NORTH", CS%S_north, & "With buoy_config linear, the sea surface salinity "//& "at the northern end of the domain toward which to "//& - "to restore.", units="PSU", default=35.0) + "to restore.", units="PSU", default=35.0, scale=US%ppt_to_S) call get_param(param_file, mdl, "SSS_SOUTH", CS%S_south, & "With buoy_config linear, the sea surface salinity "//& "at the southern end of the domain toward which to "//& - "to restore.", units="PSU", default=35.0) + "to restore.", units="PSU", default=35.0, scale=US%ppt_to_S) endif endif call get_param(param_file, mdl, "G_EARTH", CS%G_Earth, & diff --git a/config_src/drivers/solo_driver/user_surface_forcing.F90 b/config_src/drivers/solo_driver/user_surface_forcing.F90 index eb1f78e3da..ae3f854335 100644 --- a/config_src/drivers/solo_driver/user_surface_forcing.F90 +++ b/config_src/drivers/solo_driver/user_surface_forcing.F90 @@ -125,13 +125,13 @@ subroutine USER_buoyancy_forcing(sfc_state, fluxes, day, dt, G, US, CS) ! fluxes are in [R Z T-1 ~> kg m-2 s-1] and positive for water moving into the ocean. ! Local variables - real :: Temp_restore ! The temperature that is being restored toward [degC]. - real :: Salin_restore ! The salinity that is being restored toward [ppt] + real :: Temp_restore ! The temperature that is being restored toward [C ~> degC]. + real :: Salin_restore ! The salinity that is being restored toward [S ~> ppt] real :: density_restore ! The potential density that is being restored ! toward [R ~> kg m-3]. real :: rhoXcp ! The mean density times the heat capacity [Q R C-1 ~> J m-3 degC-1]. real :: buoy_rest_const ! A constant relating density anomalies to the - ! restoring buoyancy flux [L2 m3 T-3 kg-1 ~> m5 s-3 kg-1]. + ! restoring buoyancy flux [L2 T-3 R-1 ~> m5 s-3 kg-1]. integer :: i, j, is, ie, js, je integer :: isd, ied, jsd, jed @@ -201,12 +201,12 @@ subroutine USER_buoyancy_forcing(sfc_state, fluxes, day, dt, G, US, CS) rhoXcp = CS%Rho0 * fluxes%C_p do j=js,je ; do i=is,ie - ! Set Temp_restore and Salin_restore to the temperature (in degC) and - ! salinity (in PSU or ppt) that are being restored toward. + ! Set Temp_restore and Salin_restore to the temperature (in [C ~> degC]) and + ! salinity (in [S ~> ppt]) that are being restored toward. Temp_restore = 0.0 Salin_restore = 0.0 - fluxes%heat_added(i,j) = (G%mask2dT(i,j) * (US%degC_to_C*rhoXcp * CS%Flux_const)) * & + fluxes%heat_added(i,j) = (G%mask2dT(i,j) * (rhoXcp * CS%Flux_const)) * & (Temp_restore - sfc_state%SST(i,j)) fluxes%vprec(i,j) = - (G%mask2dT(i,j) * (CS%Rho0*CS%Flux_const)) * & ((Salin_restore - sfc_state%SSS(i,j)) / (0.5 * (Salin_restore + sfc_state%SSS(i,j)))) diff --git a/src/core/MOM.F90 b/src/core/MOM.F90 index f70fdae78f..e8e95ea560 100644 --- a/src/core/MOM.F90 +++ b/src/core/MOM.F90 @@ -329,9 +329,9 @@ module MOM !! if a bulk mixed layer is being used. logical :: check_bad_sfc_vals !< If true, scan surface state for ridiculous values. real :: bad_val_ssh_max !< Maximum SSH before triggering bad value message [Z ~> m] - real :: bad_val_sst_max !< Maximum SST before triggering bad value message [degC] - real :: bad_val_sst_min !< Minimum SST before triggering bad value message [degC] - real :: bad_val_sss_max !< Maximum SSS before triggering bad value message [ppt] + real :: bad_val_sst_max !< Maximum SST before triggering bad value message [C ~> degC] + real :: bad_val_sst_min !< Minimum SST before triggering bad value message [C ~> degC] + real :: bad_val_sss_max !< Maximum SSS before triggering bad value message [S ~> ppt] real :: bad_val_col_thick !< Minimum column thickness before triggering bad value message [Z ~> m] logical :: answers_2018 !< If true, use expressions for the surface properties that recover !! the answers from the end of 2018. Otherwise, use more appropriate @@ -2132,16 +2132,16 @@ subroutine initialize_MOM(Time, Time_init, param_file, dirs, CS, restart_CSp, & units="m", default=20.0, scale=US%m_to_Z) call get_param(param_file, "MOM", "BAD_VAL_SSS_MAX", CS%bad_val_sss_max, & "The value of SSS above which a bad value message is "//& - "triggered, if CHECK_BAD_SURFACE_VALS is true.", units="PPT", & - default=45.0) + "triggered, if CHECK_BAD_SURFACE_VALS is true.", & + units="PPT", default=45.0, scale=US%ppt_to_S) call get_param(param_file, "MOM", "BAD_VAL_SST_MAX", CS%bad_val_sst_max, & "The value of SST above which a bad value message is "//& "triggered, if CHECK_BAD_SURFACE_VALS is true.", & - units="deg C", default=45.0) + units="deg C", default=45.0, scale=US%degC_to_C) call get_param(param_file, "MOM", "BAD_VAL_SST_MIN", CS%bad_val_sst_min, & "The value of SST below which a bad value message is "//& "triggered, if CHECK_BAD_SURFACE_VALS is true.", & - units="deg C", default=-2.1) + units="deg C", default=-2.1, scale=US%degC_to_C) call get_param(param_file, "MOM", "BAD_VAL_COLUMN_THICKNESS", CS%bad_val_col_thick, & "The value of column thickness below which a bad value message is "//& "triggered, if CHECK_BAD_SURFACE_VALS is true.", & @@ -3332,8 +3332,8 @@ subroutine extract_surface_state(CS, sfc_state_in) if (CS%Hmix < 0.0) then ! A bulk mixed layer is in use, so layer 1 has the properties if (use_temperature) then ; do j=js,je ; do i=is,ie - sfc_state%SST(i,j) = US%C_to_degC*CS%tv%T(i,j,1) - sfc_state%SSS(i,j) = US%S_to_ppt*CS%tv%S(i,j,1) + sfc_state%SST(i,j) = CS%tv%T(i,j,1) + sfc_state%SSS(i,j) = CS%tv%S(i,j,1) enddo ; enddo ; endif do j=js,je ; do I=is-1,ie sfc_state%u(I,j) = CS%u(I,j,1) @@ -3368,8 +3368,8 @@ subroutine extract_surface_state(CS, sfc_state_in) dh = 0.0 endif if (use_temperature) then - sfc_state%SST(i,j) = sfc_state%SST(i,j) + dh * US%C_to_degC*CS%tv%T(i,j,k) - sfc_state%SSS(i,j) = sfc_state%SSS(i,j) + dh * US%S_to_ppt*CS%tv%S(i,j,k) + sfc_state%SST(i,j) = sfc_state%SST(i,j) + dh * CS%tv%T(i,j,k) + sfc_state%SSS(i,j) = sfc_state%SSS(i,j) + dh * CS%tv%S(i,j,k) else sfc_state%sfc_density(i,j) = sfc_state%sfc_density(i,j) + dh * GV%Rlay(k) endif @@ -3391,8 +3391,8 @@ subroutine extract_surface_state(CS, sfc_state_in) I_depth = 1.0 / (GV%H_subroundoff*H_rescale) missing_depth = GV%H_subroundoff*H_rescale - depth(i) if (use_temperature) then - sfc_state%SST(i,j) = (sfc_state%SST(i,j) + missing_depth*US%C_to_degC*CS%tv%T(i,j,1)) * I_depth - sfc_state%SSS(i,j) = (sfc_state%SSS(i,j) + missing_depth*US%S_to_ppt*CS%tv%S(i,j,1)) * I_depth + sfc_state%SST(i,j) = (sfc_state%SST(i,j) + missing_depth*CS%tv%T(i,j,1)) * I_depth + sfc_state%SSS(i,j) = (sfc_state%SSS(i,j) + missing_depth*CS%tv%S(i,j,1)) * I_depth else sfc_state%sfc_density(i,j) = (sfc_state%sfc_density(i,j) + & missing_depth*GV%Rlay(1)) * I_depth @@ -3562,7 +3562,7 @@ subroutine extract_surface_state(CS, sfc_state_in) !$OMP parallel do default(shared) private(mass) do j=js,je ; do k=1,nz ; do i=is,ie mass = GV%H_to_RZ*h(i,j,k) - sfc_state%ocean_heat(i,j) = sfc_state%ocean_heat(i,j) + mass*CS%tv%T(i,j,k) + sfc_state%ocean_heat(i,j) = sfc_state%ocean_heat(i,j) + mass * CS%tv%T(i,j,k) enddo ; enddo ; enddo endif if (allocated(sfc_state%ocean_salt)) then @@ -3577,7 +3577,7 @@ subroutine extract_surface_state(CS, sfc_state_in) endif if (associated(CS%tracer_flow_CSp)) then - call call_tracer_surface_state(sfc_state, h, G, GV, CS%tracer_flow_CSp) + call call_tracer_surface_state(sfc_state, h, G, GV, US, CS%tracer_flow_CSp) endif if (CS%check_bad_sfc_vals) then @@ -3604,7 +3604,7 @@ subroutine extract_surface_state(CS, sfc_state_in) 'lon=',G%geoLonT(i,j), 'lat=',G%geoLatT(i,j), & 'x=',G%gridLonT(ig), 'y=',G%gridLatT(jg), & 'D=',US%Z_to_m*(G%bathyT(i,j)+G%Z_ref), 'SSH=',US%Z_to_m*sfc_state%sea_lev(i,j), & - 'SST=',sfc_state%SST(i,j), 'SSS=',sfc_state%SSS(i,j), & + 'SST=',US%C_to_degC*sfc_state%SST(i,j), 'SSS=',US%S_to_ppt*sfc_state%SSS(i,j), & 'U-=',US%L_T_to_m_s*sfc_state%u(I-1,j), 'U+=',US%L_T_to_m_s*sfc_state%u(I,j), & 'V-=',US%L_T_to_m_s*sfc_state%v(i,J-1), 'V+=',US%L_T_to_m_s*sfc_state%v(i,J) else diff --git a/src/core/MOM_checksum_packages.F90 b/src/core/MOM_checksum_packages.F90 index 2f091cae08..aa080e1e8e 100644 --- a/src/core/MOM_checksum_packages.F90 +++ b/src/core/MOM_checksum_packages.F90 @@ -153,8 +153,10 @@ subroutine MOM_surface_chksum(mesg, sfc_state, G, US, haloshift, symmetric) sym = .false. ; if (present(symmetric)) sym = symmetric hs = 1 ; if (present(haloshift)) hs = haloshift - if (allocated(sfc_state%SST)) call hchksum(sfc_state%SST, mesg//" SST", G%HI, haloshift=hs) - if (allocated(sfc_state%SSS)) call hchksum(sfc_state%SSS, mesg//" SSS", G%HI, haloshift=hs) + if (allocated(sfc_state%SST)) call hchksum(sfc_state%SST, mesg//" SST", G%HI, haloshift=hs, & + scale=US%C_to_degC) + if (allocated(sfc_state%SSS)) call hchksum(sfc_state%SSS, mesg//" SSS", G%HI, haloshift=hs, & + scale=US%S_to_ppt) if (allocated(sfc_state%sea_lev)) call hchksum(sfc_state%sea_lev, mesg//" sea_lev", G%HI, & haloshift=hs, scale=US%Z_to_m) if (allocated(sfc_state%Hml)) call hchksum(sfc_state%Hml, mesg//" Hml", G%HI, haloshift=hs, & diff --git a/src/core/MOM_variables.F90 b/src/core/MOM_variables.F90 index f877f781d5..a6f9d79fe6 100644 --- a/src/core/MOM_variables.F90 +++ b/src/core/MOM_variables.F90 @@ -41,8 +41,8 @@ module MOM_variables !! will be returned to the calling program type, public :: surface real, allocatable, dimension(:,:) :: & - SST, & !< The sea surface temperature [degC]. - SSS, & !< The sea surface salinity [ppt ~> psu or gSalt/kg]. + SST, & !< The sea surface temperature [C ~> degC]. + SSS, & !< The sea surface salinity [S ~> psu or gSalt/kg]. sfc_density, & !< The mixed layer density [R ~> kg m-3]. sfc_cfc11, & !< Sea surface concentration of CFC11 [mol kg-1]. sfc_cfc12, & !< Sea surface concentration of CFC12 [mol kg-1]. @@ -61,9 +61,9 @@ module MOM_variables taux_shelf, & !< The zonal stresses on the ocean under shelves [R L Z T-2 ~> Pa]. tauy_shelf !< The meridional stresses on the ocean under shelves [R L Z T-2 ~> Pa]. logical :: T_is_conT = .false. !< If true, the temperature variable SST is actually the - !! conservative temperature in [degC]. + !! conservative temperature in [C ~> degC]. logical :: S_is_absS = .false. !< If true, the salinity variable SSS is actually the - !! absolute salinity in [gSalt kg-1]. + !! absolute salinity in [S ~> gSalt kg-1]. type(coupler_2d_bc_type) :: tr_fields !< A structure that may contain an !! array of named fields describing tracer-related quantities. !### NOTE: ALL OF THE ARRAYS IN TR_FIELDS USE THE COUPLER'S INDEXING CONVENTION AND HAVE NO diff --git a/src/diagnostics/MOM_diagnostics.F90 b/src/diagnostics/MOM_diagnostics.F90 index 7390db2b92..d6df58a39b 100644 --- a/src/diagnostics/MOM_diagnostics.F90 +++ b/src/diagnostics/MOM_diagnostics.F90 @@ -1390,7 +1390,7 @@ subroutine post_surface_thermo_diags(IDs, G, GV, US, diag, dt_int, sfc_state, tv ! Use TEOS-10 function calls convert T&S diagnostics from conservative temp ! to potential temperature. do j=js,je ; do i=is,ie - work_2d(i,j) = gsw_pt_from_ct(sfc_state%SSS(i,j), sfc_state%SST(i,j)) + work_2d(i,j) = US%degC_to_C*gsw_pt_from_ct(US%S_to_ppt*sfc_state%SSS(i,j), US%C_to_degC*sfc_state%SST(i,j)) enddo ; enddo if (IDs%id_sst > 0) call post_data(IDs%id_sst, work_2d, diag, mask=G%mask2dT) else @@ -1404,7 +1404,7 @@ subroutine post_surface_thermo_diags(IDs, G, GV, US, diag, dt_int, sfc_state, tv ! Use TEOS-10 function calls convert T&S diagnostics from absolute salinity ! to practical salinity. do j=js,je ; do i=is,ie - work_2d(i,j) = gsw_sp_from_sr(sfc_state%SSS(i,j)) + work_2d(i,j) = US%ppt_to_S*gsw_sp_from_sr(US%S_to_ppt*sfc_state%SSS(i,j)) enddo ; enddo if (IDs%id_sss > 0) call post_data(IDs%id_sss, work_2d, diag, mask=G%mask2dT) else @@ -1633,11 +1633,11 @@ subroutine MOM_diagnostics_init(MIS, ADp, CDp, Time, G, GV, US, param_file, diag units='psu', conversion=US%S_to_ppt) CS%id_tosq = register_diag_field('ocean_model', 'tosq', diag%axesTL, & - Time, 'Square of Potential Temperature', 'degC2', conversion=US%C_to_degC**2, & - standard_name='Potential Temperature Squared') + Time, 'Square of Potential Temperature', 'degC2', conversion=US%C_to_degC**2, & + standard_name='Potential Temperature Squared') CS%id_sosq = register_diag_field('ocean_model', 'sosq', diag%axesTL, & - Time, 'Square of Salinity', 'psu2', conversion=US%S_to_ppt**2, & - standard_name='Salinity Squared') + Time, 'Square of Salinity', 'psu2', conversion=US%S_to_ppt**2, & + standard_name='Salinity Squared') CS%id_temp_layer_ave = register_diag_field('ocean_model', 'temp_layer_ave', & diag%axesZL, Time, 'Layer Average Ocean Temperature', 'degC') @@ -1645,7 +1645,7 @@ subroutine MOM_diagnostics_init(MIS, ADp, CDp, Time, G, GV, US, param_file, diag diag%axesZL, Time, 'Layer Average Ocean Salinity', 'psu') CS%id_thetaoga = register_scalar_field('ocean_model', 'thetaoga', & - Time, diag, 'Global Mean Ocean Potential Temperature', 'degC',& + Time, diag, 'Global Mean Ocean Potential Temperature', 'degC', & standard_name='sea_water_potential_temperature') CS%id_soga = register_scalar_field('ocean_model', 'soga', & Time, diag, 'Global Mean Ocean Salinity', 'psu', & @@ -1886,28 +1886,28 @@ subroutine register_surface_diags(Time, G, US, IDs, diag, tv) if (associated(tv%T)) then IDs%id_sst = register_diag_field('ocean_model', 'SST', diag%axesT1, Time, & - 'Sea Surface Temperature', 'degC', cmor_field_name='tos', & - cmor_long_name='Sea Surface Temperature', & + 'Sea Surface Temperature', 'degC', conversion=US%C_to_degC, & + cmor_field_name='tos', cmor_long_name='Sea Surface Temperature', & cmor_standard_name='sea_surface_temperature') IDs%id_sst_sq = register_diag_field('ocean_model', 'SST_sq', diag%axesT1, Time, & - 'Sea Surface Temperature Squared', 'degC2', cmor_field_name='tossq', & - cmor_long_name='Square of Sea Surface Temperature ', & + 'Sea Surface Temperature Squared', 'degC2', conversion=US%C_to_degC**2, & + cmor_field_name='tossq', cmor_long_name='Square of Sea Surface Temperature ', & cmor_standard_name='square_of_sea_surface_temperature') IDs%id_sss = register_diag_field('ocean_model', 'SSS', diag%axesT1, Time, & - 'Sea Surface Salinity', 'psu', cmor_field_name='sos', & - cmor_long_name='Sea Surface Salinity', & + 'Sea Surface Salinity', 'psu', conversion=US%S_to_ppt, & + cmor_field_name='sos', cmor_long_name='Sea Surface Salinity', & cmor_standard_name='sea_surface_salinity') IDs%id_sss_sq = register_diag_field('ocean_model', 'SSS_sq', diag%axesT1, Time, & - 'Sea Surface Salinity Squared', 'psu', cmor_field_name='sossq', & - cmor_long_name='Square of Sea Surface Salinity ', & + 'Sea Surface Salinity Squared', 'psu2', conversion=US%S_to_ppt**2, & + cmor_field_name='sossq', cmor_long_name='Square of Sea Surface Salinity ', & cmor_standard_name='square_of_sea_surface_salinity') if (tv%T_is_conT) then IDs%id_sstcon = register_diag_field('ocean_model', 'conSST', diag%axesT1, Time, & - 'Sea Surface Conservative Temperature', 'Celsius') + 'Sea Surface Conservative Temperature', 'Celsius', conversion=US%C_to_degC) endif if (tv%S_is_absS) then IDs%id_sssabs = register_diag_field('ocean_model', 'absSSS', diag%axesT1, Time, & - 'Sea Surface Absolute Salinity', 'g kg-1') + 'Sea Surface Absolute Salinity', 'g kg-1', conversion=US%S_to_ppt) endif if (associated(tv%frazil)) then IDs%id_fraz = register_diag_field('ocean_model', 'frazil', diag%axesT1, Time, & diff --git a/src/diagnostics/MOM_sum_output.F90 b/src/diagnostics/MOM_sum_output.F90 index b590a1e816..4eb1e67e96 100644 --- a/src/diagnostics/MOM_sum_output.F90 +++ b/src/diagnostics/MOM_sum_output.F90 @@ -1008,7 +1008,7 @@ subroutine accumulate_net_input(fluxes, sfc_state, tv, dt, G, US, CS) enddo ; enddo elseif (associated(fluxes%evap)) then do j=js,je ; do i=is,ie - heat_in(i,j) = heat_in(i,j) + (US%Q_to_J_kg*tv%C_p * US%degC_to_C*sfc_state%SST(i,j)) * FW_in(i,j) + heat_in(i,j) = heat_in(i,j) + (US%Q_to_J_kg*tv%C_p * sfc_state%SST(i,j)) * FW_in(i,j) enddo ; enddo endif diff --git a/src/ice_shelf/MOM_ice_shelf.F90 b/src/ice_shelf/MOM_ice_shelf.F90 index aaa0830273..26c74d73ec 100644 --- a/src/ice_shelf/MOM_ice_shelf.F90 +++ b/src/ice_shelf/MOM_ice_shelf.F90 @@ -365,7 +365,7 @@ subroutine shelf_calc_flux(sfc_state_in, fluxes_in, Time, time_step_in, CS) ISS%salt_flux(:,:) = 0.0 ; ISS%tflux_ocn(:,:) = 0.0 ; ISS%tfreeze(:,:) = 0.0 ! define Sbdry to avoid Run-Time Check Failure, when melt is not computed. haline_driving(:,:) = 0.0 - Sbdry(:,:) = US%ppt_to_S*sfc_state%sss(:,:) + Sbdry(:,:) = sfc_state%sss(:,:) !update time CS%Time = Time @@ -378,9 +378,9 @@ subroutine shelf_calc_flux(sfc_state_in, fluxes_in, Time, time_step_in, CS) if (CS%debug) then call hchksum(fluxes_in%frac_shelf_h, "frac_shelf_h before apply melting", CS%Grid_in%HI, haloshift=0) - call hchksum(sfc_state_in%sst, "sst before apply melting", CS%Grid_in%HI, haloshift=0) - call hchksum(sfc_state_in%sss, "sss before apply melting", CS%Grid_in%HI, haloshift=0) - call uvchksum("[uv]_ml before apply melting",sfc_state_in%u, sfc_state_in%v, & + call hchksum(sfc_state_in%sst, "sst before apply melting", CS%Grid_in%HI, haloshift=0, scale=US%C_to_degC) + call hchksum(sfc_state_in%sss, "sss before apply melting", CS%Grid_in%HI, haloshift=0, scale=US%S_to_ppt) + call uvchksum("[uv]_ml before apply melting", sfc_state_in%u, sfc_state_in%v, & CS%Grid_in%HI, haloshift=0, scale=US%L_T_to_m_s) call hchksum(sfc_state_in%ocean_mass, "ocean_mass before apply melting", CS%Grid_in%HI, haloshift=0, & scale=US%RZ_to_kg_m2) @@ -429,9 +429,9 @@ subroutine shelf_calc_flux(sfc_state_in, fluxes_in, Time, time_step_in, CS) do i=is,ie ; p_int(i) = CS%g_Earth * ISS%mass_shelf(i,j) ; enddo ! Calculate insitu densities and expansion coefficients - call calculate_density(US%degC_to_C*sfc_state%sst(:,j), US%ppt_to_S*sfc_state%sss(:,j), p_int, Rhoml(:), & + call calculate_density(sfc_state%sst(:,j), sfc_state%sss(:,j), p_int, Rhoml(:), & CS%eqn_of_state, EOSdom) - call calculate_density_derivs(US%degC_to_C*sfc_state%sst(:,j), US%ppt_to_S*sfc_state%sss(:,j), p_int, & + call calculate_density_derivs(sfc_state%sst(:,j), sfc_state%sss(:,j), p_int, & dR0_dT, dR0_dS, CS%eqn_of_state, EOSdom) do i=is,ie @@ -466,9 +466,9 @@ subroutine shelf_calc_flux(sfc_state_in, fluxes_in, Time, time_step_in, CS) ! S_a is always < 0.0 with a realistic expression for the freezing point. S_a = CS%dTFr_dS * CS%Gamma_T_3EQ * CS%Cp - S_b = CS%Gamma_T_3EQ*CS%Cp*(CS%TFr_0_0 + CS%dTFr_dp*p_int(i) - US%degC_to_C*sfc_state%sst(i,j)) - & + S_b = CS%Gamma_T_3EQ*CS%Cp*(CS%TFr_0_0 + CS%dTFr_dp*p_int(i) - sfc_state%sst(i,j)) - & CS%Lat_fusion * CS%Gamma_S_3EQ ! S_b Can take either sign, but is usually negative. - S_c = CS%Lat_fusion * CS%Gamma_S_3EQ * US%ppt_to_S*sfc_state%sss(i,j) ! Always >= 0 + S_c = CS%Lat_fusion * CS%Gamma_S_3EQ * sfc_state%sss(i,j) ! Always >= 0 if (S_c == 0.0) then ! The solution for fresh water. Sbdry(i,j) = 0.0 @@ -486,14 +486,14 @@ subroutine shelf_calc_flux(sfc_state_in, fluxes_in, Time, time_step_in, CS) ! Safety check if (Sbdry(i,j) < 0.) then - write(mesg,*) 'sfc_state%sss(i,j) = ',sfc_state%sss(i,j), & + write(mesg,*) 'sfc_state%sss(i,j) = ',US%S_to_ppt*sfc_state%sss(i,j), & 'S_a, S_b, S_c', US%ppt_to_S*S_a, S_b, US%S_to_ppt*S_c call MOM_error(WARNING, mesg, .true.) call MOM_error(FATAL, "shelf_calc_flux: Negative salinity (Sbdry).") endif else ! Guess sss as the iteration starting point for the boundary salinity. - Sbdry(i,j) = US%ppt_to_S*sfc_state%sss(i,j) ; Sb_max_set = .false. + Sbdry(i,j) = sfc_state%sss(i,j) ; Sb_max_set = .false. Sb_min_set = .false. endif !find_salt_root @@ -503,8 +503,8 @@ subroutine shelf_calc_flux(sfc_state_in, fluxes_in, Time, time_step_in, CS) ! call calculate_TFreeze(Sbdry(i,j), p_int(i), ISS%tfreeze(i,j), CS%eqn_of_state, scale_from_EOS=.true.) call calculate_TFreeze(Sbdry(i:i,j), p_int(i:i), ISS%tfreeze(i:i,j), CS%eqn_of_state) - dT_ustar = (ISS%tfreeze(i,j) - US%degC_to_C*sfc_state%sst(i,j)) * ustar_h - dS_ustar = (Sbdry(i,j) - US%ppt_to_S*sfc_state%sss(i,j)) * ustar_h + dT_ustar = (ISS%tfreeze(i,j) - sfc_state%sst(i,j)) * ustar_h + dS_ustar = (Sbdry(i,j) - sfc_state%sss(i,j)) * ustar_h ! First, determine the buoyancy flux assuming no effects of stability ! on the turbulence. Following H & J '99, this limit also applies @@ -610,11 +610,10 @@ subroutine shelf_calc_flux(sfc_state_in, fluxes_in, Time, time_step_in, CS) else mass_exch = exch_vel_s(i,j) * CS%Rho_ocn - Sbdry_it = (US%ppt_to_S*sfc_state%sss(i,j) * mass_exch + CS%Salin_ice * ISS%water_flux(i,j)) / & + Sbdry_it = (sfc_state%sss(i,j) * mass_exch + CS%Salin_ice * ISS%water_flux(i,j)) / & (mass_exch + ISS%water_flux(i,j)) dS_it = Sbdry_it - Sbdry(i,j) - if (abs(dS_it) < 1.0e-4*(0.5*(US%ppt_to_S*sfc_state%sss(i,j) + Sbdry(i,j) + 1.0e-10*US%ppt_to_S))) exit - + if (abs(dS_it) < 1.0e-4*(0.5*(sfc_state%sss(i,j) + Sbdry(i,j) + 1.0e-10*US%ppt_to_S))) exit if (dS_it < 0.0) then ! Sbdry is now the upper bound. if (Sb_max_set) then @@ -649,10 +648,10 @@ subroutine shelf_calc_flux(sfc_state_in, fluxes_in, Time, time_step_in, CS) ! is about the same as the boundary layer salinity. ! The following two lines are equivalent: ! call calculate_TFreeze(Sbdry(i,j), p_int(i), ISS%tfreeze(i,j), CS%eqn_of_state, scale_from_EOS=.true.) - call calculate_TFreeze(US%ppt_to_S*sfc_state%SSS(i:i,j), p_int(i:i), ISS%tfreeze(i:i,j), CS%eqn_of_state) + call calculate_TFreeze(sfc_state%SSS(i:i,j), p_int(i:i), ISS%tfreeze(i:i,j), CS%eqn_of_state) exch_vel_t(i,j) = CS%gamma_t - ISS%tflux_ocn(i,j) = RhoCp * exch_vel_t(i,j) * (ISS%tfreeze(i,j) - US%degC_to_C*sfc_state%sst(i,j)) + ISS%tflux_ocn(i,j) = RhoCp * exch_vel_t(i,j) * (ISS%tfreeze(i,j) - sfc_state%sst(i,j)) ISS%tflux_shelf(i,j) = 0.0 ISS%water_flux(i,j) = -I_LF * ISS%tflux_ocn(i,j) Sbdry(i,j) = 0.0 @@ -663,7 +662,7 @@ subroutine shelf_calc_flux(sfc_state_in, fluxes_in, Time, time_step_in, CS) ISS%tflux_ocn(i,j) = 0.0 endif -! haline_driving(i,j) = US%ppt_to_S*sfc_state%sss(i,j) - Sbdry(i,j) +! haline_driving(i,j) = sfc_state%sss(i,j) - Sbdry(i,j) enddo ! i-loop enddo ! j-loop @@ -687,11 +686,11 @@ subroutine shelf_calc_flux(sfc_state_in, fluxes_in, Time, time_step_in, CS) !!!!!!!!!!!!!!!!!!!!!!!!!!!!Safety checks !!!!!!!!!!!!!!!!!!!!!!!!! !1)Check if haline_driving computed above is consistent with - ! haline_driving = US%ppt_to_S*sfc_state%sss - Sbdry + ! haline_driving = sfc_state%sss - Sbdry !if (fluxes%iceshelf_melt(i,j) /= 0.0) then - ! if (haline_driving(i,j) /= (US%ppt_to_S*sfc_state%sss(i,j) - Sbdry(i,j))) then + ! if (haline_driving(i,j) /= (sfc_state%sss(i,j) - Sbdry(i,j))) then ! write(mesg,*) 'at i,j=',i,j,' haline_driving, sss-Sbdry',US%S_to_ppt*haline_driving(i,j), & - ! US%S_to_ppt*(US%ppt_to_S*sfc_state%sss(i,j) - Sbdry(i,j)) + ! US%S_to_ppt*(sfc_state%sss(i,j) - Sbdry(i,j)) ! call MOM_error(FATAL, & ! "shelf_calc_flux: Inconsistency in melt and haline_driving"//trim(mesg)) ! endif @@ -777,7 +776,7 @@ subroutine shelf_calc_flux(sfc_state_in, fluxes_in, Time, time_step_in, CS) if (CS%id_shelf_sfc_mass_flux > 0) call post_data(CS%id_shelf_sfc_mass_flux, fluxes%shelf_sfc_mass_flux, CS%diag) if (CS%id_melt > 0) call post_data(CS%id_melt, fluxes%iceshelf_melt, CS%diag) - if (CS%id_thermal_driving > 0) call post_data(CS%id_thermal_driving,(US%degC_to_C*sfc_state%sst-ISS%tfreeze), CS%diag) + if (CS%id_thermal_driving > 0) call post_data(CS%id_thermal_driving, (sfc_state%sst-ISS%tfreeze), CS%diag) if (CS%id_Sbdry > 0) call post_data(CS%id_Sbdry, Sbdry, CS%diag) if (CS%id_haline_driving > 0) call post_data(CS%id_haline_driving, haline_driving, CS%diag) if (CS%id_mass_flux > 0) call post_data(CS%id_mass_flux, mass_flux, CS%diag) diff --git a/src/tracer/MOM_CFC_cap.F90 b/src/tracer/MOM_CFC_cap.F90 index 44f83a475a..fc7e78e150 100644 --- a/src/tracer/MOM_CFC_cap.F90 +++ b/src/tracer/MOM_CFC_cap.F90 @@ -497,15 +497,15 @@ subroutine CFC_cap_fluxes(fluxes, sfc_state, G, US, Rho0, Time, id_cfc11_atm, id do j=js,je ; do i=is,ie ! ta in hectoKelvin - ta = max(0.01, (sfc_state%SST(i,j) + 273.15) * 0.01) - sal = sfc_state%SSS(i,j) + ta = max(0.01, (US%C_to_degC*sfc_state%SST(i,j) + 273.15) * 0.01) + sal = US%S_to_ppt*sfc_state%SSS(i,j) ! Calculate solubilities call get_solubility(alpha_11, alpha_12, ta, sal , G%mask2dT(i,j)) ! Calculate Schmidt numbers using coefficients given by ! Wanninkhof (2014); doi:10.4319/lom.2014.12.351. - call comp_CFC_schmidt(sfc_state%SST(i,j), sc_11, sc_12) + call comp_CFC_schmidt(US%C_to_degC*sfc_state%SST(i,j), sc_11, sc_12) kw_wo_sc_no_term(i,j) = kw_coeff * ((1.0 - fluxes%ice_fraction(i,j))*fluxes%u10_sqr(i,j)) diff --git a/src/tracer/MOM_OCMIP2_CFC.F90 b/src/tracer/MOM_OCMIP2_CFC.F90 index 8139d6e8c1..a864ec907f 100644 --- a/src/tracer/MOM_OCMIP2_CFC.F90 +++ b/src/tracer/MOM_OCMIP2_CFC.F90 @@ -524,13 +524,14 @@ end function OCMIP2_CFC_stock !> This subroutine extracts the surface CFC concentrations and other fields that !! are shared with the atmosphere to calculate CFC fluxes. -subroutine OCMIP2_CFC_surface_state(sfc_state, h, G, GV, CS) +subroutine OCMIP2_CFC_surface_state(sfc_state, h, G, GV, US, CS) type(ocean_grid_type), intent(in) :: G !< The ocean's grid structure. type(verticalGrid_type), intent(in) :: GV !< The ocean's vertical grid structure type(surface), intent(inout) :: sfc_state !< A structure containing fields that !! describe the surface state of the ocean. real, dimension(SZI_(G),SZJ_(G),SZK_(GV)), & intent(in) :: h !< Layer thickness [H ~> m or kg m-2]. + type(unit_scale_type), intent(in) :: US !< A dimensional unit scaling type type(OCMIP2_CFC_CS), pointer :: CS !< The control structure returned by a previous !! call to register_OCMIP2_CFC. @@ -555,8 +556,8 @@ subroutine OCMIP2_CFC_surface_state(sfc_state, h, G, GV, CS) if (.not.associated(CS)) return do j=js,je ; do i=is,ie - ta = max(0.01, (sfc_state%SST(i,j) + 273.15) * 0.01) ! Why is this in hectoKelvin? - sal = sfc_state%SSS(i,j) ; SST = sfc_state%SST(i,j) + ta = max(0.01, (US%C_to_degC*sfc_state%SST(i,j) + 273.15) * 0.01) ! Why is this in hectoKelvin? + sal = US%S_to_ppt*sfc_state%SSS(i,j) ; SST = US%C_to_degC*sfc_state%SST(i,j) ! Calculate solubilities using Warner and Weiss (1985) DSR, vol 32. ! The final result is in mol/cm3/pptv (1 part per trillion 1e-12) ! Use Bullister and Wisegavger for CCl4. diff --git a/src/tracer/MOM_generic_tracer.F90 b/src/tracer/MOM_generic_tracer.F90 index 902b91fccc..6170aee602 100644 --- a/src/tracer/MOM_generic_tracer.F90 +++ b/src/tracer/MOM_generic_tracer.F90 @@ -860,15 +860,23 @@ subroutine MOM_generic_tracer_surface_state(sfc_state, h, G, GV, CS) dzt(:,:,:) = GV%H_to_m * h(:,:,:) - sosga = global_area_mean(sfc_state%SSS, G) - - call generic_tracer_coupler_set(sfc_state%tr_fields,& - ST=sfc_state%SST,& - SS=sfc_state%SSS,& - rho=rho0,& !nnz: required for MOM5 and previous versions. - ilb=G%isd, jlb=G%jsd,& - dzt=dzt,& !This is needed for the Mocsy method of carbonate system vars - tau=1,sosga=sosga,model_time=get_diag_time_end(CS%diag)) + sosga = global_area_mean(sfc_state%SSS, G, scale=G%US%S_to_ppt) + + if ((G%US%C_to_degC == 1.0) .and. (G%US%S_to_ppt == 1.0)) then + call generic_tracer_coupler_set(sfc_state%tr_fields, & + ST=sfc_state%SST, SS=sfc_state%SSS, & + rho=rho0, & !nnz: required for MOM5 and previous versions. + ilb=G%isd, jlb=G%jsd, & + dzt=dzt,& !This is needed for the Mocsy method of carbonate system vars + tau=1, sosga=sosga, model_time=get_diag_time_end(CS%diag)) + else + call generic_tracer_coupler_set(sfc_state%tr_fields, & + ST=G%US%C_to_degC*sfc_state%SST, SS=G%US%S_to_ppt*sfc_state%SSS, & + rho=rho0, & !nnz: required for MOM5 and previous versions. + ilb=G%isd, jlb=G%jsd, & + dzt=dzt,& !This is needed for the Mocsy method of carbonate system vars + tau=1, sosga=sosga, model_time=get_diag_time_end(CS%diag)) + endif !Output diagnostics via diag_manager for all tracers in this module ! if (.NOT. associated(CS%g_tracer_list)) call MOM_error(FATAL, trim(sub_name)//& diff --git a/src/tracer/MOM_tracer_flow_control.F90 b/src/tracer/MOM_tracer_flow_control.F90 index ee1a1c30d0..1345126d73 100644 --- a/src/tracer/MOM_tracer_flow_control.F90 +++ b/src/tracer/MOM_tracer_flow_control.F90 @@ -789,13 +789,14 @@ end subroutine store_stocks !> This subroutine calls all registered tracer packages to enable them to !! add to the surface state returned to the coupler. These routines are optional. -subroutine call_tracer_surface_state(sfc_state, h, G, GV, CS) +subroutine call_tracer_surface_state(sfc_state, h, G, GV, US, CS) type(surface), intent(inout) :: sfc_state !< A structure containing fields that !! describe the surface state of the ocean. type(ocean_grid_type), intent(in) :: G !< The ocean's grid structure. type(verticalGrid_type), intent(in) :: GV !< The ocean's vertical grid structure. real, dimension(SZI_(G),SZJ_(G),SZK_(GV)), & intent(in) :: h !< Layer thicknesses [H ~> m or kg m-2] + type(unit_scale_type), intent(in) :: US !< A dimensional unit scaling type type(tracer_flow_control_CS), pointer :: CS !< The control structure returned by a !! previous call to call_tracer_register. @@ -818,7 +819,7 @@ subroutine call_tracer_surface_state(sfc_state, h, G, GV, CS) if (CS%use_advection_test_tracer) & call advection_test_tracer_surface_state(sfc_state, h, G, GV, CS%advection_test_tracer_CSp) if (CS%use_OCMIP2_CFC) & - call OCMIP2_CFC_surface_state(sfc_state, h, G, GV, CS%OCMIP2_CFC_CSp) + call OCMIP2_CFC_surface_state(sfc_state, h, G, GV, US, CS%OCMIP2_CFC_CSp) if (CS%use_CFC_cap) & call CFC_cap_surface_state(sfc_state, G, CS%CFC_cap_CSp) if (CS%use_MOM_generic_tracer) & diff --git a/src/user/BFB_surface_forcing.F90 b/src/user/BFB_surface_forcing.F90 index 87b4d77758..6f16bdd6f0 100644 --- a/src/user/BFB_surface_forcing.F90 +++ b/src/user/BFB_surface_forcing.F90 @@ -29,11 +29,11 @@ module BFB_surface_forcing real :: Rho0 !< The density used in the Boussinesq approximation [R ~> kg m-3]. real :: G_Earth !< The gravitational acceleration [L2 Z-1 T-2 ~> m s-2] real :: Flux_const !< The restoring rate at the surface [Z T-1 ~> m s-1]. - real :: SST_s !< SST at the southern edge of the linear forcing ramp [degC] - real :: SST_n !< SST at the northern edge of the linear forcing ramp [degC] + real :: SST_s !< SST at the southern edge of the linear forcing ramp [C ~> degC] + real :: SST_n !< SST at the northern edge of the linear forcing ramp [C ~> degC] real :: lfrslat !< Southern latitude where the linear forcing ramp begins [degLat] real :: lfrnlat !< Northern latitude where the linear forcing ramp ends [degLat] - real :: drho_dt !< Rate of change of density with temperature [R degC-1 ~> kg m-3 degC-1]. + real :: drho_dt !< Rate of change of density with temperature [R C-1 ~> kg m-3 degC-1]. !! Note that temperature is being used as a dummy variable here. !! All temperatures are converted into density. @@ -59,12 +59,12 @@ subroutine BFB_buoyancy_forcing(sfc_state, fluxes, day, dt, G, US, CS) !! returned by a previous call to !! BFB_surface_forcing_init. ! Local variables - real :: Temp_restore ! The temperature that is being restored toward [degC]. - real :: Salin_restore ! The salinity that is being restored toward [ppt]. + real :: Temp_restore ! The temperature that is being restored toward [C ~> degC]. + real :: Salin_restore ! The salinity that is being restored toward [S ~> ppt]. real :: density_restore ! The potential density that is being restored ! toward [R ~> kg m-3]. real :: rhoXcp ! Reference density times heat capacity times unit scaling - ! factors [Q R degC-1 ~> J m-3 degC-1] + ! factors [Q R C-1 ~> J m-3 degC-1] real :: buoy_rest_const ! A constant relating density anomalies to the ! restoring buoyancy flux [L2 T-3 R-1 ~> m5 s-3 kg-1]. integer :: i, j, is, ie, js, je @@ -125,10 +125,10 @@ subroutine BFB_buoyancy_forcing(sfc_state, fluxes, day, dt, G, US, CS) call MOM_error(FATAL, "User_buoyancy_surface_forcing: " // & "Temperature and salinity restoring used without modification." ) - rhoXcp = CS%Rho0 * US%degC_to_C*fluxes%C_p + rhoXcp = CS%Rho0 * fluxes%C_p do j=js,je ; do i=is,ie - ! Set Temp_restore and Salin_restore to the temperature (in degC) and - ! salinity (in ppt) that are being restored toward. + ! Set Temp_restore and Salin_restore to the temperature (in [C ~> degC]) and + ! salinity (in [S ~> ppt]) that are being restored toward. Temp_restore = 0.0 Salin_restore = 0.0 @@ -150,12 +150,12 @@ subroutine BFB_buoyancy_forcing(sfc_state, fluxes, day, dt, G, US, CS) ! Set density_restore to an expression for the surface potential ! density [R ~> kg m-3] that is being restored toward. if (G%geoLatT(i,j) < CS%lfrslat) then - Temp_restore = CS%SST_s + Temp_restore = CS%SST_s elseif (G%geoLatT(i,j) > CS%lfrnlat) then - Temp_restore = CS%SST_n + Temp_restore = CS%SST_n else - Temp_restore = (CS%SST_s - CS%SST_n)/(CS%lfrslat - CS%lfrnlat) * & - (G%geoLatT(i,j) - CS%lfrslat) + CS%SST_s + Temp_restore = (CS%SST_s - CS%SST_n)/(CS%lfrslat - CS%lfrnlat) * & + (G%geoLatT(i,j) - CS%lfrslat) + CS%SST_s endif density_restore = Temp_restore*CS%drho_dt + CS%Rho0 @@ -212,13 +212,13 @@ subroutine BFB_surface_forcing_init(Time, G, US, param_file, diag, CS) units="degrees", default=40.0) call get_param(param_file, mdl, "SST_S", CS%SST_s, & "SST at the southern edge of the linear forcing ramp.", & - units="C", default=20.0) + units="C", default=20.0, scale=US%degC_to_C) call get_param(param_file, mdl, "SST_N", CS%SST_n, & "SST at the northern edge of the linear forcing ramp.", & - units="C", default=10.0) + units="C", default=10.0, scale=US%degC_to_C) call get_param(param_file, mdl, "DRHO_DT", CS%drho_dt, & "The rate of change of density with temperature.", & - units="kg m-3 K-1", default=-0.2, scale=US%kg_m3_to_R) + units="kg m-3 K-1", default=-0.2, scale=US%kg_m3_to_R*US%C_to_degC) call get_param(param_file, mdl, "RESTOREBUOY", CS%restorebuoy, & "If true, the buoyancy fluxes drive the model back "//& diff --git a/src/user/MOM_controlled_forcing.F90 b/src/user/MOM_controlled_forcing.F90 index 7583485ad7..24d370e920 100644 --- a/src/user/MOM_controlled_forcing.F90 +++ b/src/user/MOM_controlled_forcing.F90 @@ -42,11 +42,11 @@ module MOM_controlled_forcing real :: Len2 !< The square of the length scale over which the anomalies !! are smoothed via a Laplacian filter [L2 ~> m2] real :: lam_heat !< A constant of proportionality between SST anomalies - !! and heat fluxes [Q R Z T-1 degC-1 ~> W m-2 degC-1] + !! and heat fluxes [Q R Z T-1 C-1 ~> W m-2 degC-1] real :: lam_prec !< A constant of proportionality between SSS anomalies !! (normalised by mean SSS) and precipitation [R Z T-1 ~> kg m-2 s-1] real :: lam_cyc_heat !< A constant of proportionality between cyclical SST - !! anomalies and corrective heat fluxes [Q R Z T-1 degC-1 ~> W m-2 degC-1] + !! anomalies and corrective heat fluxes [Q R Z T-1 C-1 ~> W m-2 degC-1] real :: lam_cyc_prec !< A constant of proportionality between cyclical SSS !! anomalies (normalised by mean SSS) and corrective !! precipitation [R Z T-1 ~> kg m-2 s-1] @@ -71,17 +71,17 @@ module MOM_controlled_forcing !! the actual averages, and not time integrals. !! The dimension is the periodic bins. real, pointer, dimension(:,:,:) :: & - avg_SST_anom => NULL(), & !< The time-averaged periodic sea surface temperature anomalies [degC], + avg_SST_anom => NULL(), & !< The time-averaged periodic sea surface temperature anomalies [C ~> degC], !! or (at some points in the code), the time-integrated periodic - !! temperature anomalies [T degC ~> s degC]. + !! temperature anomalies [T C ~> s degC]. !! The third dimension is the periodic bins. - avg_SSS_anom => NULL(), & !< The time-averaged periodic sea surface salinity anomalies [ppt], + avg_SSS_anom => NULL(), & !< The time-averaged periodic sea surface salinity anomalies [S ~> ppt], !! or (at some points in the code), the time-integrated periodic - !! salinity anomalies [T ppt ~> s ppt]. + !! salinity anomalies [T S ~> s ppt]. !! The third dimension is the periodic bins. - avg_SSS => NULL() !< The time-averaged periodic sea surface salinities [ppt], or (at + avg_SSS => NULL() !< The time-averaged periodic sea surface salinities [S ~> ppt], or (at !! some points in the code), the time-integrated periodic - !! salinities [T ppt ~> s ppt]. + !! salinities [T S ~> s ppt]. !! The third dimension is the periodic bins. type(diag_ctrl), pointer :: diag => NULL() !< A structure that is used to @@ -96,9 +96,9 @@ module MOM_controlled_forcing subroutine apply_ctrl_forcing(SST_anom, SSS_anom, SSS_mean, virt_heat, virt_precip, & day_start, dt, G, US, CS) type(ocean_grid_type), intent(inout) :: G !< The ocean's grid structure - real, dimension(SZI_(G),SZJ_(G)), intent(in) :: SST_anom !< The sea surface temperature anomalies [degC] - real, dimension(SZI_(G),SZJ_(G)), intent(in) :: SSS_anom !< The sea surface salinity anomlies [ppt] - real, dimension(SZI_(G),SZJ_(G)), intent(in) :: SSS_mean !< The mean sea surface salinity [ppt] + real, dimension(SZI_(G),SZJ_(G)), intent(in) :: SST_anom !< The sea surface temperature anomalies [C ~> degC] + real, dimension(SZI_(G),SZJ_(G)), intent(in) :: SSS_anom !< The sea surface salinity anomlies [S ~> ppt] + real, dimension(SZI_(G),SZJ_(G)), intent(in) :: SSS_mean !< The mean sea surface salinity [S ~> ppt] real, dimension(SZI_(G),SZJ_(G)), intent(inout) :: virt_heat !< Virtual (corrective) heat !! fluxes that are augmented in this !! subroutine [Q R Z T-1 ~> W m-2] @@ -483,6 +483,7 @@ subroutine register_ctrl_forcing_restarts(G, US, param_file, CS, restart_CS) allocate(CS%avg_time(CS%num_cycle), source=0.0) allocate(CS%avg_SST_anom(isd:ied,jsd:jed,CS%num_cycle), source=0.0) allocate(CS%avg_SSS_anom(isd:ied,jsd:jed,CS%num_cycle), source=0.0) + allocate(CS%avg_SSS(isd:ied,jsd:jed,CS%num_cycle), source=0.0) write (period_str, '(i8)') CS%num_cycle period_str = trim('p ')//trim(adjustl(period_str)) @@ -497,9 +498,14 @@ subroutine register_ctrl_forcing_restarts(G, US, param_file, CS, restart_CS) longname="Cyclical accumulated averaging time", & units="sec", conversion=US%T_to_s, z_grid='1', t_grid=period_str) call register_restart_field(CS%avg_SST_anom, "avg_SST_anom", .false., restart_CS, & - longname="Cyclical average SST Anomaly", units="degC", z_grid='1', t_grid=period_str) + longname="Cyclical average SST Anomaly", & + units="degC", conversion=US%C_to_degC, z_grid='1', t_grid=period_str) call register_restart_field(CS%avg_SSS_anom, "avg_SSS_anom", .false., restart_CS, & - longname="Cyclical average SSS Anomaly", units="g kg-1", z_grid='1', t_grid=period_str) + longname="Cyclical average SSS Anomaly", & + units="g kg-1", conversion=US%S_to_ppt, z_grid='1', t_grid=period_str) + call register_restart_field(CS%avg_SSS_anom, "avg_SSS", .false., restart_CS, & + longname="Cyclical average SSS", & + units="g kg-1", conversion=US%S_to_ppt, z_grid='1', t_grid=period_str) endif end subroutine register_ctrl_forcing_restarts @@ -572,7 +578,7 @@ subroutine controlled_forcing_init(Time, G, US, param_file, diag, CS) call get_param(param_file, mdl, "CTRL_FORCE_LAMDA_HEAT", CS%lam_heat, & "A constant of proportionality between SST anomalies "//& "and controlling heat fluxes", & - units="W m-2 K-1", default=0.0, scale=US%W_m2_to_QRZ_T) + units="W m-2 K-1", default=0.0, scale=US%W_m2_to_QRZ_T*US%C_to_degC) call get_param(param_file, mdl, "CTRL_FORCE_LAMDA_PREC", CS%lam_prec, & "A constant of proportionality between SSS anomalies "//& "(normalised by mean SSS) and controlling precipitation.", & @@ -580,7 +586,7 @@ subroutine controlled_forcing_init(Time, G, US, param_file, diag, CS) call get_param(param_file, mdl, "CTRL_FORCE_LAMDA_CYC_HEAT", CS%lam_cyc_heat, & "A constant of proportionality between SST anomalies "//& "and cyclical controlling heat fluxes", & - units="W m-2 K-1", default=0.0, scale=US%W_m2_to_QRZ_T) + units="W m-2 K-1", default=0.0, scale=US%W_m2_to_QRZ_T*US%C_to_degC) call get_param(param_file, mdl, "CTRL_FORCE_LAMDA_CYC_PREC", CS%lam_cyc_prec, & "A constant of proportionality between SSS anomalies "//& "(normalised by mean SSS) and cyclical controlling precipitation.", & diff --git a/src/user/dumbbell_surface_forcing.F90 b/src/user/dumbbell_surface_forcing.F90 index e97478b1a5..a672a4378b 100644 --- a/src/user/dumbbell_surface_forcing.F90 +++ b/src/user/dumbbell_surface_forcing.F90 @@ -36,7 +36,7 @@ module dumbbell_surface_forcing real, dimension(:,:), allocatable :: & forcing_mask !< A mask regulating where forcing occurs real, dimension(:,:), allocatable :: & - S_restore !< The surface salinity field toward which to restore [ppt]. + S_restore !< The surface salinity field toward which to restore [S ~> ppt]. type(diag_ctrl), pointer :: diag => NULL() !< A structure that is used to regulate the !! timing of diagnostic output. end type dumbbell_surface_forcing_CS @@ -178,8 +178,8 @@ subroutine dumbbell_surface_forcing_init(Time, G, US, param_file, diag, CS) type(dumbbell_surface_forcing_CS), & pointer :: CS !< A pointer to the control structure for this module ! Local variables - real :: S_surf ! Initial surface salinity [ppt] - real :: S_range ! Range of the initial vertical distribution of salinity [ppt] + real :: S_surf ! Initial surface salinity [S ~> ppt] + real :: S_range ! Range of the initial vertical distribution of salinity [S ~> ppt] real :: x ! Latitude normalized by the domain size [nondim] integer :: i, j logical :: dbrotate ! If true, rotate the domain. @@ -218,10 +218,11 @@ subroutine dumbbell_surface_forcing_init(Time, G, US, param_file, diag, CS) 'Logical for rotation of dumbbell domain.',& units='nondim', default=.false., do_not_log=.true.) call get_param(param_file, mdl,"INITIAL_SSS", S_surf, & - "Initial surface salinity", units="1e-3", default=34.0, do_not_log=.true.) + "Initial surface salinity", & + units="1e-3", default=34.0, scale=US%ppt_to_S, do_not_log=.true.) call get_param(param_file, mdl,"INITIAL_S_RANGE", S_range, & - "Initial salinity range (bottom - surface)", units="1e-3", & - default=2., do_not_log=.true.) + "Initial salinity range (bottom - surface)", & + units="1e-3", default=2., scale=US%ppt_to_S, do_not_log=.true.) call get_param(param_file, mdl, "RESTOREBUOY", CS%restorebuoy, & "If true, the buoyancy fluxes drive the model back "//& From d0db65aeb43ef5da379a8c0c3e338f35cd8353f3 Mon Sep 17 00:00:00 2001 From: Robert Hallberg Date: Wed, 20 Jul 2022 10:45:17 -0400 Subject: [PATCH 14/40] Use cons_temp_to_pot_temp to call gsw_pt_from_ct Use cons_temp_to_pot_temp and abs_saln_to_prac_saln to do the conversions for several diagnostics, working with rescaled variables on array segments, rather than calling gsw_pt_from_ct and gsw_sp_from_sr once from each point. All answers are bitwise identical. --- src/diagnostics/MOM_diagnostics.F90 | 31 +++++++++++++++++------------ 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/src/diagnostics/MOM_diagnostics.F90 b/src/diagnostics/MOM_diagnostics.F90 index d6df58a39b..52546dd366 100644 --- a/src/diagnostics/MOM_diagnostics.F90 +++ b/src/diagnostics/MOM_diagnostics.F90 @@ -20,7 +20,7 @@ module MOM_diagnostics use MOM_domains, only : create_group_pass, do_group_pass, group_pass_type use MOM_domains, only : To_North, To_East use MOM_EOS, only : calculate_density, calculate_density_derivs, EOS_domain -use MOM_EOS, only : gsw_sp_from_sr, gsw_pt_from_ct +use MOM_EOS, only : cons_temp_to_pot_temp, abs_saln_to_prac_saln use MOM_error_handler, only : MOM_error, FATAL, WARNING use MOM_file_parser, only : get_param, log_version, param_file_type use MOM_grid, only : ocean_grid_type @@ -401,9 +401,10 @@ subroutine calculate_diagnostic_fields(u, v, h, uh, vh, tv, ADp, CDp, p_surf, & ! so they need to converted to potential temperature and practical salinity ! for some diagnostics using TEOS-10 function calls. if ((CS%id_Tpot > 0) .or. (CS%id_tob > 0) .or. (CS%id_tosq > 0)) then - do k=1,nz ; do j=js,je ; do i=is,ie - work_3d(i,j,k) = US%degC_to_C*gsw_pt_from_ct(US%S_to_ppt*tv%S(i,j,k),US%C_to_degC*tv%T(i,j,k)) - enddo ; enddo ; enddo + EOSdom(:) = EOS_domain(G%HI) + do k=1,nz ; do j=js,je + call cons_temp_to_pot_temp(tv%T(:,j,k), tv%S(:,j,k), work_3d(:,j,k), tv%eqn_of_state, EOSdom) + enddo ; enddo if (CS%id_Tpot > 0) call post_data(CS%id_Tpot, work_3d, CS%diag) if (CS%id_tob > 0) call post_data(CS%id_tob, work_3d(:,:,nz), CS%diag, mask=G%mask2dT) if (CS%id_tosq > 0) then @@ -430,9 +431,10 @@ subroutine calculate_diagnostic_fields(u, v, h, uh, vh, tv, ADp, CDp, p_surf, & ! so they need to converted to potential temperature and practical salinity ! for some diagnostics using TEOS-10 function calls. if ((CS%id_Sprac > 0) .or. (CS%id_sob > 0) .or. (CS%id_sosq >0)) then - do k=1,nz ; do j=js,je ; do i=is,ie - work_3d(i,j,k) = US%ppt_to_S*gsw_sp_from_sr(US%S_to_ppt*tv%S(i,j,k)) - enddo ; enddo ; enddo + EOSdom(:) = EOS_domain(G%HI) + do k=1,nz ; do j=js,je + call abs_saln_to_prac_saln(tv%S(:,j,k), work_3d(:,j,k), tv%eqn_of_state, EOSdom) + enddo ; enddo if (CS%id_Sprac > 0) call post_data(CS%id_Sprac, work_3d, CS%diag) if (CS%id_sob > 0) call post_data(CS%id_sob, work_3d(:,:,nz), CS%diag, mask=G%mask2dT) if (CS%id_sosq > 0) then @@ -1314,6 +1316,7 @@ subroutine post_surface_thermo_diags(IDs, G, GV, US, diag, dt_int, sfc_state, tv real :: zos_area_mean ! Global area mean sea surface height [Z ~> m] real :: volo ! Total volume of the ocean [m3] real :: ssh_ga ! Global ocean area weighted mean sea seaface height [Z ~> m] + integer, dimension(2) :: EOSdom ! The i-computational domain for the equation of state integer :: i, j, is, ie, js, je is = G%isc ; ie = G%iec ; js = G%jsc ; je = G%jec @@ -1389,9 +1392,10 @@ subroutine post_surface_thermo_diags(IDs, G, GV, US, diag, dt_int, sfc_state, tv if (IDs%id_sstcon > 0) call post_data(IDs%id_sstcon, sfc_state%SST, diag, mask=G%mask2dT) ! Use TEOS-10 function calls convert T&S diagnostics from conservative temp ! to potential temperature. - do j=js,je ; do i=is,ie - work_2d(i,j) = US%degC_to_C*gsw_pt_from_ct(US%S_to_ppt*sfc_state%SSS(i,j), US%C_to_degC*sfc_state%SST(i,j)) - enddo ; enddo + EOSdom(:) = EOS_domain(G%HI) + do j=js,je + call cons_temp_to_pot_temp(sfc_state%SST(:,j), sfc_state%SSS(:,j), work_2d(:,j), tv%eqn_of_state, EOSdom) + enddo if (IDs%id_sst > 0) call post_data(IDs%id_sst, work_2d, diag, mask=G%mask2dT) else ! Internal T&S variables are potential temperature & practical salinity @@ -1403,9 +1407,10 @@ subroutine post_surface_thermo_diags(IDs, G, GV, US, diag, dt_int, sfc_state, tv if (IDs%id_sssabs > 0) call post_data(IDs%id_sssabs, sfc_state%SSS, diag, mask=G%mask2dT) ! Use TEOS-10 function calls convert T&S diagnostics from absolute salinity ! to practical salinity. - do j=js,je ; do i=is,ie - work_2d(i,j) = US%ppt_to_S*gsw_sp_from_sr(US%S_to_ppt*sfc_state%SSS(i,j)) - enddo ; enddo + EOSdom(:) = EOS_domain(G%HI) + do j=js,je + call abs_saln_to_prac_saln(sfc_state%SSS(:,j), work_2d(:,j), tv%eqn_of_state, EOSdom) + enddo if (IDs%id_sss > 0) call post_data(IDs%id_sss, work_2d, diag, mask=G%mask2dT) else ! Internal T&S variables are potential temperature & practical salinity From 2d574b6967222651775fa78c50114bfc41d69cf8 Mon Sep 17 00:00:00 2001 From: Robert Hallberg Date: Wed, 20 Jul 2022 17:08:58 -0400 Subject: [PATCH 15/40] +Rescaled the Stokes drift velocity variables Dimensionally rescaled the Stokes drift velocity variables in the mech_forcing type from [m s-1] to [L T-1 ~> m s-1], and the surface wave wavenumber variable from [rad m-1] to [rad Z-1 ~> rad m-1], eliminating several scaling factors from the code in the process, and attaching a scaling factor to a hard-coded dimensional velocity. All answers in the MOM6-examples test suite are bitwise identical. --- .../nuopc_cap/mom_surface_forcing_nuopc.F90 | 6 +++--- src/core/MOM_forcing_type.F90 | 6 +++--- src/user/MOM_wave_interface.F90 | 20 +++++++++---------- 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/config_src/drivers/nuopc_cap/mom_surface_forcing_nuopc.F90 b/config_src/drivers/nuopc_cap/mom_surface_forcing_nuopc.F90 index 7e08f83530..8691f564dd 100644 --- a/config_src/drivers/nuopc_cap/mom_surface_forcing_nuopc.F90 +++ b/config_src/drivers/nuopc_cap/mom_surface_forcing_nuopc.F90 @@ -903,11 +903,11 @@ subroutine convert_IOB_to_forces(IOB, forces, index_bounds, Time, G, US, CS) ! wave to ocean coupling if ( associated(IOB%ustkb) ) then - forces%stk_wavenumbers(:) = IOB%stk_wavenumbers + forces%stk_wavenumbers(:) = IOB%stk_wavenumbers * US%Z_to_m do istk = 1,IOB%num_stk_bands do j=js,je; do i=is,ie - forces%ustkb(i,j,istk) = IOB%ustkb(i-I0,j-J0,istk) - forces%vstkb(i,j,istk) = IOB%vstkb(i-I0,j-J0,istk) + forces%ustkb(i,j,istk) = IOB%ustkb(i-I0,j-J0,istk) * US%m_s_to_L_T + forces%vstkb(i,j,istk) = IOB%vstkb(i-I0,j-J0,istk) * US%m_s_to_L_T enddo; enddo call pass_var(forces%ustkb(:,:,istk), G%domain ) call pass_var(forces%vstkb(:,:,istk), G%domain ) diff --git a/src/core/MOM_forcing_type.F90 b/src/core/MOM_forcing_type.F90 index 812361d3e1..4365dd6296 100644 --- a/src/core/MOM_forcing_type.F90 +++ b/src/core/MOM_forcing_type.F90 @@ -265,12 +265,12 @@ module MOM_forcing_type !! ice needs to be accumulated, and the rigidity explicitly !! reset to zero at the driver level when appropriate. real, pointer, dimension(:) :: & - stk_wavenumbers => NULL() !< The central wave number of Stokes bands [rad m-1] + stk_wavenumbers => NULL() !< The central wave number of Stokes bands [rad Z-1 ~> rad m-1] real, pointer, dimension(:,:,:) :: & - ustkb => NULL(), & !< Stokes Drift spectrum, zonal [m s-1] + ustkb => NULL(), & !< Stokes Drift spectrum, zonal [L T-1 ~> m s-1] !! Horizontal - u points !! 3rd dimension - wavenumber - vstkb => NULL() !< Stokes Drift spectrum, meridional [m s-1] + vstkb => NULL() !< Stokes Drift spectrum, meridional [L T-1 ~> m s-1] !! Horizontal - v points !! 3rd dimension - wavenumber diff --git a/src/user/MOM_wave_interface.F90 b/src/user/MOM_wave_interface.F90 index cd45f33bfd..a423ddc8b8 100644 --- a/src/user/MOM_wave_interface.F90 +++ b/src/user/MOM_wave_interface.F90 @@ -571,16 +571,16 @@ subroutine Update_Surface_Waves(G, GV, US, Time_present, dt, CS, forces) endif do b=1,CS%NumBands - CS%WaveNum_Cen(b) = US%Z_to_m * forces%stk_wavenumbers(b) + CS%WaveNum_Cen(b) = forces%stk_wavenumbers(b) !Interpolate from a grid to c grid do jj=G%jsc,G%jec do II=G%iscB,G%iecB - CS%STKx0(II,jj,b) = US%m_s_to_L_T*0.5*(forces%UStkb(ii,jj,b)+forces%UStkb(ii+1,jj,b)) + CS%STKx0(II,jj,b) = 0.5*(forces%UStkb(ii,jj,b)+forces%UStkb(ii+1,jj,b)) enddo enddo do JJ=G%jscB, G%jecB do ii=G%isc,G%iec - CS%STKY0(ii,JJ,b) = US%m_s_to_L_T*0.5*(forces%VStkb(ii,jj,b)+forces%VStkb(ii,jj+1,b)) + CS%STKY0(ii,JJ,b) = 0.5*(forces%VStkb(ii,jj,b)+forces%VStkb(ii,jj+1,b)) enddo enddo call pass_vector(CS%STKx0(:,:,b),CS%STKy0(:,:,b), G%Domain) @@ -915,8 +915,8 @@ subroutine Surface_Bands_by_data_override(Time, G, GV, US, CS) type(unit_scale_type), intent(in) :: US !< A dimensional unit scaling type ! Local variables - real :: temp_x(SZI_(G),SZJ_(G)) ! Pseudo-zonal Stokes drift of band at h-points [m s-1] - real :: temp_y(SZI_(G),SZJ_(G)) ! Psuedo-meridional Stokes drift of band at h-points [m s-1] + real :: temp_x(SZI_(G),SZJ_(G)) ! Pseudo-zonal Stokes drift of band at h-points [L T-1 ~> m s-1] + real :: temp_y(SZI_(G),SZJ_(G)) ! Psuedo-meridional Stokes drift of band at h-points [L T-1 ~> m s-1] integer, dimension(4) :: sizes ! The sizes of the various dimensions of the variable. character(len=48) :: dim_name(4) ! The names of the dimensions of the variable. character(len=20) :: varname ! The name of an input variable for data override. @@ -985,16 +985,16 @@ subroutine Surface_Bands_by_data_override(Time, G, GV, US, CS) temp_y(:,:) = 0.0 varname = ' ' write(varname, "(A3,I0)") 'Usx', b - call data_override('OCN', trim(varname), temp_x, Time) + call data_override(G%Domain, trim(varname), temp_x, Time, scale=US%m_s_to_L_T) varname = ' ' write(varname, "(A3,I0)") 'Usy', b - call data_override('OCN', trim(varname), temp_y, Time) + call data_override(G%Domain, trim(varname), temp_y, Time, scale=US%m_s_to_L_T) ! Update halo on h-grid call pass_vector(temp_x, temp_y, G%Domain, To_All, AGRID) ! Filter land values do j = G%jsd,G%jed do i = G%Isd,G%Ied - if (abs(temp_x(i,j)) > 10. .or. abs(temp_y(i,j)) > 10.) then + if ((abs(temp_x(i,j)) > 10.0*US%m_s_to_L_T) .or. (abs(temp_y(i,j)) > 10.0*US%m_s_to_L_T)) then ! Assume land-mask and zero out temp_x(i,j) = 0.0 temp_y(i,j) = 0.0 @@ -1005,12 +1005,12 @@ subroutine Surface_Bands_by_data_override(Time, G, GV, US, CS) ! Interpolate to u/v grids do j = G%jsc,G%jec do I = G%IscB,G%IecB - CS%STKx0(I,j,b) = 0.5 * US%m_s_to_L_T*(temp_x(i,j) + temp_x(i+1,j)) + CS%STKx0(I,j,b) = 0.5 * (temp_x(i,j) + temp_x(i+1,j)) enddo enddo do J = G%JscB,G%JecB do i = G%isc,G%iec - CS%STKy0(i,J,b) = 0.5 * US%m_s_to_L_T*(temp_y(i,j) + temp_y(i,j+1)) + CS%STKy0(i,J,b) = 0.5 * (temp_y(i,j) + temp_y(i,j+1)) enddo enddo enddo !Closes b-loop From f0c12bddae70d6c83eb9e06ca1f48d246996a7f9 Mon Sep 17 00:00:00 2001 From: Robert Hallberg Date: Sun, 31 Jul 2022 13:35:27 -0400 Subject: [PATCH 16/40] (*)Set defaults in USE_REGRIDDING get_param calls Set default values in all get_param calls for USE_REGRIDDING. Previously, there had been 4 calls where this was missing, which led to the problems noted at https://github.com/mom-ocean/MOM6/issues/1576. This PR will allow that issue to be closed. Also used the default argument in a get_param call for INPUTDIR, although that case would not change any behavior because the value was set before the get_param call. A fail_in_missing argument was added to the FMS_cap call to get_param for GUST_2D_FILE, mirroring what is done for the solo_driver code, but cases where this was actually missing were very likely to have failed later anyway, but without an explicit error message. This PR could change unpredictable behavior in cases where USE_REGRIDDING is not explicitly set, but all answers are bitwise identical in the MOM6-examples test suite. --- .../FMS_cap/MOM_surface_forcing_gfdl.F90 | 10 +++++----- .../MOM_state_initialization.F90 | 18 ++++++++--------- src/user/dumbbell_initialization.F90 | 20 +++++++++---------- 3 files changed, 24 insertions(+), 24 deletions(-) diff --git a/config_src/drivers/FMS_cap/MOM_surface_forcing_gfdl.F90 b/config_src/drivers/FMS_cap/MOM_surface_forcing_gfdl.F90 index aa5c10c958..edd2517adc 100644 --- a/config_src/drivers/FMS_cap/MOM_surface_forcing_gfdl.F90 +++ b/config_src/drivers/FMS_cap/MOM_surface_forcing_gfdl.F90 @@ -1388,7 +1388,7 @@ subroutine surface_forcing_init(Time, G, US, param_file, diag, CS, wind_stagger) call get_param(param_file, mdl, "FLUXCONST_SALT", CS%Flux_const_salt, & "The constant that relates the restoring surface salt fluxes to the relative "//& "surface anomalies (akin to a piston velocity). Note the non-MKS units.", & - fail_if_missing=.false.,default=unscaled_fluxconst, units="m day-1", scale=US%m_to_Z*US%T_to_s) + fail_if_missing=.false., default=unscaled_fluxconst, units="m day-1", scale=US%m_to_Z*US%T_to_s) ! Finish converting CS%Flux_const from m day-1 to [Z T-1 ~> m s-1]. CS%Flux_const = CS%Flux_const / 86400.0 CS%Flux_const_salt = CS%Flux_const_salt / 86400.0 @@ -1435,11 +1435,11 @@ subroutine surface_forcing_init(Time, G, US, param_file, diag, CS, wind_stagger) call get_param(param_file, mdl, "FLUXCONST", CS%Flux_const, & "The constant that relates the restoring surface fluxes to the relative "//& "surface anomalies (akin to a piston velocity). Note the non-MKS units.", & - default=0.0, units="m day-1", scale=US%m_to_Z*US%T_to_s,unscaled=unscaled_fluxconst) + default=0.0, units="m day-1", scale=US%m_to_Z*US%T_to_s, unscaled=unscaled_fluxconst) call get_param(param_file, mdl, "FLUXCONST_TEMP", CS%Flux_const_temp, & "The constant that relates the restoring surface temperature fluxes to the relative "//& "surface anomalies (akin to a piston velocity). Note the non-MKS units.", & - fail_if_missing=.false.,default=unscaled_fluxconst, units="m day-1", scale=US%m_to_Z*US%T_to_s) + fail_if_missing=.false., default=unscaled_fluxconst, units="m day-1", scale=US%m_to_Z*US%T_to_s) ! Convert CS%Flux_const from m day-1 to m s-1. CS%Flux_const = CS%Flux_const / 86400.0 CS%Flux_const_temp = CS%Flux_const_temp / 86400.0 @@ -1524,7 +1524,7 @@ subroutine surface_forcing_init(Time, G, US, param_file, diag, CS, wind_stagger) if (CS%read_gust_2d) then call get_param(param_file, mdl, "GUST_2D_FILE", gust_file, & "The file in which the wind gustiness is found in "//& - "variable gustiness.") + "variable gustiness.", fail_if_missing=.true.) call safe_alloc_ptr(CS%gust,isd,ied,jsd,jed) gust_file = trim(CS%inputdir) // trim(gust_file) @@ -1550,7 +1550,7 @@ subroutine surface_forcing_init(Time, G, US, param_file, diag, CS, wind_stagger) if (CS%rigid_sea_ice) then call get_param(param_file, mdl, "G_EARTH", CS%g_Earth, & "The gravitational acceleration of the Earth.", & - units="m s-2", default = 9.80, scale=US%Z_to_m*US%m_s_to_L_T**2) + units="m s-2", default=9.80, scale=US%Z_to_m*US%m_s_to_L_T**2) call get_param(param_file, mdl, "SEA_ICE_MEAN_DENSITY", CS%density_sea_ice, & "A typical density of sea ice, used with the kinematic "//& "viscosity, when USE_RIGID_SEA_ICE is true.", & diff --git a/src/initialization/MOM_state_initialization.F90 b/src/initialization/MOM_state_initialization.F90 index 257d25dad0..d21c13a3e5 100644 --- a/src/initialization/MOM_state_initialization.F90 +++ b/src/initialization/MOM_state_initialization.F90 @@ -1087,11 +1087,11 @@ subroutine depress_surface(h, G, GV, US, param_file, tv, just_read, z_top_shelf) call get_param(param_file, mdl, "INPUTDIR", inputdir, default=".") inputdir = slasher(inputdir) - call get_param(param_file, mdl, "SURFACE_HEIGHT_IC_FILE", eta_srf_file,& + call get_param(param_file, mdl, "SURFACE_HEIGHT_IC_FILE", eta_srf_file, & "The initial condition file for the surface height.", & fail_if_missing=.not.just_read, do_not_log=just_read) call get_param(param_file, mdl, "SURFACE_HEIGHT_IC_VAR", eta_srf_var, & - "The initial condition variable for the surface height.",& + "The initial condition variable for the surface height.", & default="SSH", do_not_log=just_read) filename = trim(inputdir)//trim(eta_srf_file) if (.not.just_read) & @@ -1263,7 +1263,7 @@ subroutine calc_sfc_displacement(PF, G, GV, US, mass_shelf, tv, h) call get_param(PF, mdl, "ICE_SHELF_INITIALIZATION_Z_TOLERANCE", tol, & "A initialization tolerance for the calculation of the static "// & - "ice shelf displacement (m) using initial temperature and salinity profile.",& + "ice shelf displacement (m) using initial temperature and salinity profile.", & default=0.001, units="m", scale=US%m_to_Z) max_iter = 1e3 call MOM_mesg("Started calculating initial interface position under ice shelf ") @@ -1949,13 +1949,13 @@ subroutine initialize_sponges_file(G, GV, US, use_temperature, tv, u, v, depth_t "The name of the inverse damping rate variable in "//& "SPONGE_UV_DAMPING_FILE for the velocities.", default=Idamp_var) endif - call get_param(param_file, mdl, "USE_REGRIDDING", use_ALE, do_not_log=.true.) + call get_param(param_file, mdl, "USE_REGRIDDING", use_ALE, default=.false., do_not_log=.true.) !### NEW_SPONGES should be obsoleted properly, rather than merely deprecated, at which ! point only the else branch of the new_sponge_param block would be retained. call get_param(param_file, mdl, "NEW_SPONGES", new_sponge_param, & "Set True if using the newer sponging code which "//& - "performs on-the-fly regridding in lat-lon-time.",& + "performs on-the-fly regridding in lat-lon-time"//& "of sponge restoring data.", default=.false., do_not_log=.true.) if (new_sponge_param) then call get_param(param_file, mdl, "INTERPOLATE_SPONGE_TIME_SPACE", time_space_interp_sponge, & @@ -2230,7 +2230,7 @@ subroutine initialize_oda_incupd_file(G, GV, US, use_temperature, tv, h, u, v, p default=.false.) endif call get_param(param_file, mdl, "ODA_INCUPD_RESET_NCOUNT", reset_ncount, & - "If True, reinitialize number of updates already done, ncount.",& + "If True, reinitialize number of updates already done, ncount.", & default=.true.) if (.not.oda_inc .and. .not.reset_ncount) & call MOM_error(FATAL, " initialize_oda_incupd: restarting during update "// & @@ -2258,7 +2258,7 @@ subroutine initialize_oda_incupd_file(G, GV, US, use_temperature, tv, h, u, v, p "The name of the meridional vel. inc. variable in "//& "ODA_INCUPD_FILE.", default="v_inc") -! call get_param(param_file, mdl, "USE_REGRIDDING", use_ALE, do_not_log=.true.) +! call get_param(param_file, mdl, "USE_REGRIDDING", use_ALE, default=.false., do_not_log=.true.) ! Read in incremental update for tracers filename = trim(inputdir)//trim(inc_file) @@ -2486,7 +2486,7 @@ subroutine MOM_temp_salt_initialize_from_Z(h, tv, depth_tot, G, GV, US, PF, just if (.not.just_read) call callTree_enter(trim(mdl)//"(), MOM_state_initialization.F90") if (.not.just_read) call log_version(PF, mdl, version, "") - inputdir = "." ; call get_param(PF, mdl, "INPUTDIR", inputdir) + call get_param(PF, mdl, "INPUTDIR", inputdir, default=".") inputdir = slasher(inputdir) eos => tv%eqn_of_state @@ -2525,7 +2525,7 @@ subroutine MOM_temp_salt_initialize_from_Z(h, tv, depth_tot, G, GV, US, PF, just "is True.", default="PPM_IH4", do_not_log=just_read) call get_param(PF, mdl, "Z_INIT_REMAP_GENERAL", remap_general, & "If false, only initializes to z* coordinates. "//& - "If true, allows initialization directly to general coordinates.",& + "If true, allows initialization directly to general coordinates.", & default=.false., do_not_log=just_read) call get_param(PF, mdl, "Z_INIT_REMAP_FULL_COLUMN", remap_full_column, & "If false, only reconstructs profiles for valid data points. "//& diff --git a/src/user/dumbbell_initialization.F90 b/src/user/dumbbell_initialization.F90 index 570e638465..e4ce7e77f5 100644 --- a/src/user/dumbbell_initialization.F90 +++ b/src/user/dumbbell_initialization.F90 @@ -51,13 +51,13 @@ subroutine dumbbell_initialize_topography( D, G, param_file, max_depth ) logical :: dbrotate call get_param(param_file, mdl, "DUMBBELL_LEN",dblen, & - 'Lateral Length scale for dumbbell.',& + 'Lateral Length scale for dumbbell.', & units='km', default=600., do_not_log=.false.) call get_param(param_file, mdl, "DUMBBELL_FRACTION",dbfrac, & - 'Meridional fraction for narrow part of dumbbell.',& + 'Meridional fraction for narrow part of dumbbell.', & units='nondim', default=0.5, do_not_log=.false.) call get_param(param_file, mdl, "DUMBBELL_ROTATION", dbrotate, & - 'Logical for rotation of dumbbell domain.',& + 'Logical for rotation of dumbbell domain.', & units='nondim', default=.false., do_not_log=.false.) if (G%x_axis_units == 'm') then @@ -128,11 +128,11 @@ subroutine dumbbell_initialize_thickness ( h, depth_tot, G, GV, US, param_file, if (.not.just_read) call log_version(param_file, mdl, version, "") call get_param(param_file, mdl,"MIN_THICKNESS", min_thickness, & - 'Minimum thickness for layer',& + 'Minimum thickness for layer', & units='m', default=1.0e-3, scale=US%m_to_Z, do_not_log=just_read) call get_param(param_file, mdl,"REGRIDDING_COORDINATE_MODE", verticalCoordinate, & default=DEFAULT_COORDINATE_MODE, do_not_log=just_read) - call get_param(param_file, mdl, "USE_REGRIDDING", use_ALE, do_not_log = .true.) + call get_param(param_file, mdl, "USE_REGRIDDING", use_ALE, default=.false., do_not_log=.true.) if (.not. use_ALE) verticalCoordinate = "LAYER" ! WARNING: this routine specifies the interface heights so that the last layer @@ -149,7 +149,7 @@ subroutine dumbbell_initialize_thickness ( h, depth_tot, G, GV, US, param_file, select case ( coordinateMode(verticalCoordinate) ) case ( REGRIDDING_LAYER) ! Initial thicknesses for isopycnal coordinates call get_param(param_file, mdl, "DUMBBELL_ROTATION", dbrotate, & - 'Logical for rotation of dumbbell domain.',& + 'Logical for rotation of dumbbell domain.', & units='nondim', default=.false., do_not_log=just_read) do j=js,je do i=is,ie @@ -273,7 +273,7 @@ subroutine dumbbell_initialize_temperature_salinity ( T, S, h, G, GV, US, param_ T_surf = 20.0*US%degC_to_C ! layer mode - call get_param(param_file, mdl, "USE_REGRIDDING", use_ALE, do_not_log = .true.) + call get_param(param_file, mdl, "USE_REGRIDDING", use_ALE, default=.false., do_not_log=.true.) if (.not. use_ALE) call MOM_error(FATAL, "dumbbell_initialize_temperature_salinity: "//& "Please use 'fit' for 'TS_CONFIG' in the LAYER mode.") @@ -357,10 +357,10 @@ subroutine dumbbell_initialize_sponges(G, GV, US, tv, h_in, depth_tot, param_fil logical :: dbrotate ! If true, rotate the domain. call get_param(param_file, mdl,"DUMBBELL_LEN",dblen, & - 'Lateral Length scale for dumbbell ',& + 'Lateral Length scale for dumbbell ', & units='km', default=600., do_not_log=.true.) call get_param(param_file, mdl, "DUMBBELL_ROTATION", dbrotate, & - 'Logical for rotation of dumbbell domain.',& + 'Logical for rotation of dumbbell domain.', & units='nondim', default=.false., do_not_log=.true.) if (G%x_axis_units == 'm') then @@ -379,7 +379,7 @@ subroutine dumbbell_initialize_sponges(G, GV, US, tv, h_in, depth_tot, param_fil 'DUMBBELL salinity range (right-left)', & units='1e-3', default=2., scale=US%ppt_to_S, do_not_log=.true.) call get_param(param_file, mdl,"MIN_THICKNESS", min_thickness, & - 'Minimum thickness for layer',& + 'Minimum thickness for layer', & units='m', default=1.0e-3, scale=US%m_to_Z, do_not_log=.true.) ! no active sponges From 6c7812a4615fc7a6dcf5cd8ec0af740c43f55b6c Mon Sep 17 00:00:00 2001 From: Robert Hallberg Date: Tue, 2 Aug 2022 07:04:03 -0400 Subject: [PATCH 17/40] Pass fail_if_missing to 4 user get_param calls Added fail_if_missing or default arguments to 5 get_param calls in user code, with values set consistently with other calls for the same parameters. Also replaced 4 get_param calls with copies of equivalent fields from the grid type in one use initialization routine. All answers are bitwise identical. --- src/user/BFB_initialization.F90 | 18 +++++++++--------- src/user/DOME2d_initialization.F90 | 2 +- src/user/Kelvin_initialization.F90 | 2 +- src/user/dense_water_initialization.F90 | 6 ++++-- src/user/shelfwave_initialization.F90 | 2 +- 5 files changed, 16 insertions(+), 14 deletions(-) diff --git a/src/user/BFB_initialization.F90 b/src/user/BFB_initialization.F90 index 22d3156723..68a6b6530b 100644 --- a/src/user/BFB_initialization.F90 +++ b/src/user/BFB_initialization.F90 @@ -93,7 +93,11 @@ subroutine BFB_initialize_sponges_southonly(G, GV, US, use_temperature, tv, dept real :: Idamp(SZI_(G),SZJ_(G)) ! The sponge damping rate [T-1 ~> s-1] real :: H0(SZK_(GV)) ! Resting layer thicknesses in depth units [Z ~> m]. real :: min_depth ! The minimum ocean depth in depth units [Z ~> m]. - real :: slat, wlon, lenlat, lenlon, nlat + real :: slat ! The southern latitude of the domain [degrees_N] + real :: wlon ! The western longitude of the domain [degrees_E] + real :: lenlat ! The latitudinal length of the domain [degrees_N] + real :: lenlon ! The longitudinal length of the domain [degrees_E] + real :: nlat ! The northern latitude of the domain [degrees_N] real :: max_damping ! The maximum damping rate [T-1 ~> s-1] character(len=40) :: mdl = "BFB_initialize_sponges_southonly" ! This subroutine's name. integer :: i, j, k, is, ie, js, je, isd, ied, jsd, jed, nz @@ -112,14 +116,10 @@ subroutine BFB_initialize_sponges_southonly(G, GV, US, use_temperature, tv, dept call get_param(param_file, mdl, "MINIMUM_DEPTH", min_depth, & "The minimum depth of the ocean.", units="m", default=0.0, scale=US%m_to_Z) - call get_param(param_file, mdl, "SOUTHLAT", slat, & - "The southern latitude of the domain.", units="degrees") - call get_param(param_file, mdl, "LENLAT", lenlat, & - "The latitudinal length of the domain.", units="degrees") - call get_param(param_file, mdl, "WESTLON", wlon, & - "The western longitude of the domain.", units="degrees", default=0.0) - call get_param(param_file, mdl, "LENLON", lenlon, & - "The longitudinal length of the domain.", units="degrees") + slat = G%south_lat + lenlat = G%len_lat + wlon = G%west_lon + lenlon = G%len_lon nlat = slat + lenlat do k=1,nz ; H0(k) = -G%max_depth * real(k-1) / real(nz) ; enddo diff --git a/src/user/DOME2d_initialization.F90 b/src/user/DOME2d_initialization.F90 index 393347d1f2..d0ed88c128 100644 --- a/src/user/DOME2d_initialization.F90 +++ b/src/user/DOME2d_initialization.F90 @@ -411,7 +411,7 @@ subroutine DOME2d_initialize_sponges(G, GV, US, tv, depth_tot, param_file, use_A call get_param(param_file, mdl, "DOME2D_SHELF_DEPTH", dome2d_depth_bay, & default=0.2, do_not_log=.true.) call get_param(param_file, mdl, "S_REF", S_ref, default=35.0, scale=US%ppt_to_S) - call get_param(param_file, mdl, "T_REF", T_ref, scale=US%degC_to_C) + call get_param(param_file, mdl, "T_REF", T_ref, scale=US%degC_to_C, fail_if_missing=.false.) call get_param(param_file, mdl, "S_RANGE", S_range, default=2.0, scale=US%ppt_to_S) call get_param(param_file, mdl, "T_RANGE", T_range, default=0.0, scale=US%degC_to_C) diff --git a/src/user/Kelvin_initialization.F90 b/src/user/Kelvin_initialization.F90 index a65dc45e73..595736540e 100644 --- a/src/user/Kelvin_initialization.F90 +++ b/src/user/Kelvin_initialization.F90 @@ -75,7 +75,7 @@ function register_Kelvin_OBC(param_file, CS, US, OBC_Reg) default=0) call get_param(param_file, mdl, "F_0", CS%F_0, & default=0.0, units="s-1", scale=US%T_to_s, do_not_log=.true.) - call get_param(param_file, mdl, "TOPO_CONFIG", config, do_not_log=.true.) + call get_param(param_file, mdl, "TOPO_CONFIG", config, fail_if_missing=.true., do_not_log=.true.) if (trim(config) == "Kelvin") then call get_param(param_file, mdl, "ROTATED_COAST_OFFSET_1", CS%coast_offset1, & "The distance along the southern and northern boundaries "//& diff --git a/src/user/dense_water_initialization.F90 b/src/user/dense_water_initialization.F90 index 1c372bf1b7..fa44a78604 100644 --- a/src/user/dense_water_initialization.F90 +++ b/src/user/dense_water_initialization.F90 @@ -197,8 +197,10 @@ subroutine dense_water_initialize_sponges(G, GV, US, tv, depth_tot, param_file, call get_param(param_file, mdl, "DENSE_WATER_SILL_HEIGHT", sill_height, default=default_sill, do_not_log=.true.) call get_param(param_file, mdl, "S_REF", S_ref, default=35.0, scale=US%ppt_to_S, do_not_log=.true.) - call get_param(param_file, mdl, "S_RANGE", S_range, scale=US%ppt_to_S, do_not_log=.true.) - call get_param(param_file, mdl, "T_REF", T_ref, scale=US%degC_to_C, do_not_log=.true.) + call get_param(param_file, mdl, "S_RANGE", S_range, & + units='1e-3', default=2.0, scale=US%ppt_to_S, do_not_log=.true.) + call get_param(param_file, mdl, "T_REF", T_ref, & + units='degC', scale=US%degC_to_C, fail_if_missing=.true., do_not_log=.true.) ! no active sponges if (west_sponge_time_scale <= 0. .and. east_sponge_time_scale <= 0.) return diff --git a/src/user/shelfwave_initialization.F90 b/src/user/shelfwave_initialization.F90 index 3bb031bbb6..a9c1914356 100644 --- a/src/user/shelfwave_initialization.F90 +++ b/src/user/shelfwave_initialization.F90 @@ -66,7 +66,7 @@ function register_shelfwave_OBC(param_file, CS, US, OBC_Reg) call get_param(param_file, mdl, "F_0", CS%f0, & default=0.0, units="s-1", scale=US%T_to_s, do_not_log=.true.) call get_param(param_file, mdl, "LENLAT", len_lat, & - do_not_log=.true.) + do_not_log=.true., fail_if_missing=.true.) call get_param(param_file, mdl,"SHELFWAVE_X_WAVELENGTH",CS%Lx, & "Length scale of shelfwave in x-direction.",& units="Same as x,y", default=100.) From 2192db98134dfafba2890188856a552e9df4c763 Mon Sep 17 00:00:00 2001 From: Robert Hallberg Date: Mon, 1 Aug 2022 14:01:30 -0400 Subject: [PATCH 18/40] +Non-Boussinesq thickness diagnostics in kg m-2 Modified the units for 10 thickness or thickness tendency diagnostics to write them in their native units (e.g., kg m-2) when run in non-Boussinesq mode. These changes were proposed in https://github.com/mom-ocean/MOM6/issues/1565 and discussed and agreed to at the MOM6 dev call on 4/25/22. Previously these diagnostics had been converted to approximate thicknesses in m using a constant reference density, but this was only accurate to about 0.1% and could be misinterpreted. A number of other (more commonly used) thickness-related variables were already being written in their native units, and this makes the model's output more self consistent. All solutions are bitwise identical, but some diagnostics will change when run in non-Boussinesq mode. --- src/ALE/MOM_ALE.F90 | 19 ++++++++------ src/diagnostics/MOM_diagnostics.F90 | 25 +++++++++---------- .../vertical/MOM_diabatic_driver.F90 | 13 +++++----- src/tracer/MOM_offline_main.F90 | 4 +-- 4 files changed, 33 insertions(+), 28 deletions(-) diff --git a/src/ALE/MOM_ALE.F90 b/src/ALE/MOM_ALE.F90 index 5240061c3f..293817e24f 100644 --- a/src/ALE/MOM_ALE.F90 +++ b/src/ALE/MOM_ALE.F90 @@ -306,7 +306,11 @@ subroutine ALE_register_diags(Time, G, GV, US, diag, CS) type(diag_ctrl), target, intent(in) :: diag !< Diagnostics control structure type(ALE_CS), pointer :: CS !< Module control structure + ! Local variables + character(len=48) :: thickness_units + CS%diag => diag + thickness_units = get_thickness_units(GV) ! These diagnostics of the state variables before ALE are useful for ! debugging the ALE code. @@ -315,7 +319,7 @@ subroutine ALE_register_diags(Time, G, GV, US, diag, CS) CS%id_v_preale = register_diag_field('ocean_model', 'v_preale', diag%axesCvL, Time, & 'Meridional velocity before remapping', 'm s-1', conversion=US%L_T_to_m_s) CS%id_h_preale = register_diag_field('ocean_model', 'h_preale', diag%axesTL, Time, & - 'Layer Thickness before remapping', get_thickness_units(GV), conversion=GV%H_to_MKS, & + 'Layer Thickness before remapping', thickness_units, conversion=GV%H_to_MKS, & v_extensive=.true.) CS%id_T_preale = register_diag_field('ocean_model', 'T_preale', diag%axesTL, Time, & 'Temperature before remapping', 'degC', conversion=US%C_to_degC) @@ -324,14 +328,15 @@ subroutine ALE_register_diags(Time, G, GV, US, diag, CS) CS%id_e_preale = register_diag_field('ocean_model', 'e_preale', diag%axesTi, Time, & 'Interface Heights before remapping', 'm', conversion=US%Z_to_m) - CS%id_dzRegrid = register_diag_field('ocean_model','dzRegrid',diag%axesTi,Time, & + CS%id_dzRegrid = register_diag_field('ocean_model', 'dzRegrid', diag%axesTi, Time, & 'Change in interface height due to ALE regridding', 'm', conversion=GV%H_to_m) - cs%id_vert_remap_h = register_diag_field('ocean_model', 'vert_remap_h', & - diag%axestl, time, 'layer thicknesses after ALE regridding and remapping', & - 'm', conversion=GV%H_to_m, v_extensive=.true.) - cs%id_vert_remap_h_tendency = register_diag_field('ocean_model','vert_remap_h_tendency',diag%axestl,time, & + cs%id_vert_remap_h = register_diag_field('ocean_model', 'vert_remap_h', diag%axestl, Time, & + 'layer thicknesses after ALE regridding and remapping', & + thickness_units, conversion=GV%H_to_MKS, v_extensive=.true.) + cs%id_vert_remap_h_tendency = register_diag_field('ocean_model', & + 'vert_remap_h_tendency', diag%axestl, Time, & 'Layer thicknesses tendency due to ALE regridding and remapping', & - 'm s-1', conversion=GV%H_to_m*US%s_to_T, v_extensive=.true.) + trim(thickness_units)//" s-1", conversion=GV%H_to_MKS*US%s_to_T, v_extensive=.true.) end subroutine ALE_register_diags diff --git a/src/diagnostics/MOM_diagnostics.F90 b/src/diagnostics/MOM_diagnostics.F90 index 52546dd366..e1c4f19083 100644 --- a/src/diagnostics/MOM_diagnostics.F90 +++ b/src/diagnostics/MOM_diagnostics.F90 @@ -31,7 +31,7 @@ module MOM_diagnostics use MOM_unit_scaling, only : unit_scale_type use MOM_variables, only : thermo_var_ptrs, ocean_internal_state, p3d use MOM_variables, only : accel_diag_ptrs, cont_diag_ptrs, surface -use MOM_verticalGrid, only : verticalGrid_type, get_thickness_units +use MOM_verticalGrid, only : verticalGrid_type, get_thickness_units, get_flux_units use MOM_wave_speed, only : wave_speed, wave_speed_CS, wave_speed_init implicit none ; private @@ -1593,11 +1593,9 @@ subroutine MOM_diagnostics_init(MIS, ADp, CDp, Time, G, GV, US, param_file, diag "forms of the same expressions.", default=default_2018_answers) call get_param(param_file, mdl, "SPLIT", split, default=.true., do_not_log=.true.) - if (GV%Boussinesq) then - thickness_units = "m" ; flux_units = "m3 s-1" ; convert_H = GV%H_to_m - else - thickness_units = "kg m-2" ; flux_units = "kg s-1" ; convert_H = GV%H_to_kg_m2 - endif + thickness_units = get_thickness_units(GV) + flux_units = get_flux_units(GV) + convert_H = GV%H_to_MKS CS%id_masscello = register_diag_field('ocean_model', 'masscello', diag%axesTL,& Time, 'Mass per unit area of liquid ocean grid cell', 'kg m-2', & !### , conversion=GV%H_to_kg_m2, & @@ -1607,11 +1605,11 @@ subroutine MOM_diagnostics_init(MIS, ADp, CDp, Time, G, GV, US, param_file, diag diag, 'Mass of liquid ocean', 'kg', standard_name='sea_water_mass') CS%id_thkcello = register_diag_field('ocean_model', 'thkcello', diag%axesTL, Time, & - long_name = 'Cell Thickness', standard_name='cell_thickness', & + long_name='Cell Thickness', standard_name='cell_thickness', & units='m', conversion=US%Z_to_m, v_extensive=.true.) CS%id_h_pre_sync = register_diag_field('ocean_model', 'h_pre_sync', diag%axesTL, Time, & - long_name = 'Cell thickness from the previous timestep', & - units='m', conversion=GV%H_to_m, v_extensive=.true.) + long_name='Cell thickness from the previous timestep', & + units=thickness_units, conversion=GV%H_to_MKS, v_extensive=.true.) ! Note that CS%id_volcello would normally be registered here but because it is a "cell measure" and ! must be registered first. We earlier stored the handle of volcello but need it here for posting @@ -1948,10 +1946,11 @@ subroutine register_transport_diags(Time, G, GV, US, IDs, diag) character(len=48) :: thickness_units, accum_flux_units thickness_units = get_thickness_units(GV) + H_convert = GV%H_to_MKS if (GV%Boussinesq) then - H_convert = GV%H_to_m ; accum_flux_units = "m3" + accum_flux_units = "m3" else - H_convert = GV%H_to_kg_m2 ; accum_flux_units = "kg" + accum_flux_units = "kg" endif ! Diagnostics related to tracer and mass transport @@ -1979,10 +1978,10 @@ subroutine register_transport_diags(Time, G, GV, US, IDs, diag) standard_name='ocean_mass_y_transport_vertical_sum', x_cell_method='sum') IDs%id_dynamics_h = register_diag_field('ocean_model','dynamics_h', & diag%axesTl, Time, 'Layer thicknesses prior to horizontal dynamics', & - 'm', v_extensive=.true., conversion=GV%H_to_m) + thickness_units, conversion=GV%H_to_MKS, v_extensive=.true.) IDs%id_dynamics_h_tendency = register_diag_field('ocean_model','dynamics_h_tendency', & diag%axesTl, Time, 'Change in layer thicknesses due to horizontal dynamics', & - 'm s-1', v_extensive=.true., conversion=GV%H_to_m*US%s_to_T) + trim(thickness_units)//" s-1", conversion=GV%H_to_MKS*US%s_to_T, v_extensive=.true.) end subroutine register_transport_diags diff --git a/src/parameterizations/vertical/MOM_diabatic_driver.F90 b/src/parameterizations/vertical/MOM_diabatic_driver.F90 index 32857da1c4..278fb1ddda 100644 --- a/src/parameterizations/vertical/MOM_diabatic_driver.F90 +++ b/src/parameterizations/vertical/MOM_diabatic_driver.F90 @@ -3253,8 +3253,9 @@ subroutine diabatic_driver_init(Time, G, GV, US, param_file, useALEalgorithm, di ! available only for ALE algorithm. ! diagnostics for tendencies of temp and heat due to frazil CS%id_diabatic_diff_h = register_diag_field('ocean_model', 'diabatic_diff_h', diag%axesTL, Time, & - long_name='Cell thickness used during diabatic diffusion', & - units='m', conversion=GV%H_to_m, v_extensive=.true.) + 'Cell thickness used during diabatic diffusion', & + thickness_units, conversion=GV%H_to_MKS, v_extensive=.true.) + if (CS%useALEalgorithm) then CS%id_diabatic_diff_temp_tend = register_diag_field('ocean_model', & 'diabatic_diff_temp_tendency', diag%axesTL, Time, & @@ -3326,12 +3327,12 @@ subroutine diabatic_driver_init(Time, G, GV, US, param_file, useALEalgorithm, di ! available only for ALE algorithm. ! diagnostics for tendencies of temp and heat due to frazil CS%id_boundary_forcing_h = register_diag_field('ocean_model', 'boundary_forcing_h', diag%axesTL, Time, & - long_name='Cell thickness after applying boundary forcing', & - units='m', conversion=GV%H_to_m, v_extensive=.true.) + 'Cell thickness after applying boundary forcing', & + thickness_units, conversion=GV%H_to_MKS, v_extensive=.true.) CS%id_boundary_forcing_h_tendency = register_diag_field('ocean_model', & 'boundary_forcing_h_tendency', diag%axesTL, Time, & 'Cell thickness tendency due to boundary forcing', & - 'm s-1', conversion=GV%H_to_m*US%s_to_T, v_extensive=.true.) + trim(thickness_units)//" s-1", conversion=GV%H_to_MKS*US%s_to_T, v_extensive=.true.) if (CS%id_boundary_forcing_h_tendency > 0) then CS%boundary_forcing_tendency_diag = .true. endif @@ -3388,7 +3389,7 @@ subroutine diabatic_driver_init(Time, G, GV, US, param_file, useALEalgorithm, di ! diagnostics for tendencies of temp and heat due to frazil CS%id_frazil_h = register_diag_field('ocean_model', 'frazil_h', diag%axesTL, Time, & long_name='Cell Thickness', standard_name='cell_thickness', & - units='m', conversion=GV%H_to_m, v_extensive=.true.) + units=thickness_units, conversion=GV%H_to_MKS, v_extensive=.true.) ! diagnostic for tendency of temp due to frazil CS%id_frazil_temp_tend = register_diag_field('ocean_model',& diff --git a/src/tracer/MOM_offline_main.F90 b/src/tracer/MOM_offline_main.F90 index 31b7b29445..c1582dca4a 100644 --- a/src/tracer/MOM_offline_main.F90 +++ b/src/tracer/MOM_offline_main.F90 @@ -35,7 +35,7 @@ module MOM_offline_main use MOM_tracer_registry, only : tracer_registry_type, MOM_tracer_chksum, MOM_tracer_chkinv use MOM_unit_scaling, only : unit_scale_type use MOM_variables, only : thermo_var_ptrs -use MOM_verticalGrid, only : verticalGrid_type +use MOM_verticalGrid, only : verticalGrid_type, get_thickness_units implicit none ; private @@ -1160,7 +1160,7 @@ subroutine register_diags_offline_transport(Time, diag, CS, GV, US) 'at the end of the offline timestep', 'm', conversion=GV%H_to_m) CS%id_h_redist = register_diag_field('ocean_model','h_redist', diag%axesTL, Time, & 'Layer thicknesses before redistribution of mass fluxes', & - 'm', conversion=GV%H_to_m) + get_thickness_units(GV), conversion=GV%H_to_MKS) ! Regridded/remapped input fields CS%id_uhtr_regrid = register_diag_field('ocean_model', 'uhtr_regrid', diag%axesCuL, Time, & From 50131c66c95ad5b54ff217143b557b6697e4913d Mon Sep 17 00:00:00 2001 From: Robert Hallberg Date: Fri, 29 Jul 2022 12:02:19 -0400 Subject: [PATCH 19/40] +Add answer_date optional arguments Added optional answer_date arguments to various remapping routines. These are vintage-encoding integers intended to replace the logical answers_2018 arguments, and allow for multiple generations of improved algorithms rather than just two choices, without requiring added interface changes. However, this change is backward compatible, and these two arguments are both offered for now. All answers are bitwise identical, but there are new optional arguments to numerous publicly visible routines. --- src/ALE/P1M_functions.F90 | 6 ++- src/ALE/P3M_functions.F90 | 11 ++++-- src/ALE/PPM_functions.F90 | 10 +++-- src/ALE/PQM_functions.F90 | 10 +++-- src/ALE/regrid_edge_values.F90 | 25 ++++++++++--- src/ALE/regrid_solvers.F90 | 8 +++- src/framework/MOM_horizontal_regridding.F90 | 41 ++++++++++++++------- 7 files changed, 76 insertions(+), 35 deletions(-) diff --git a/src/ALE/P1M_functions.F90 b/src/ALE/P1M_functions.F90 index d99c611229..281971cca4 100644 --- a/src/ALE/P1M_functions.F90 +++ b/src/ALE/P1M_functions.F90 @@ -24,7 +24,7 @@ module P1M_functions !! !! It is assumed that the size of the array 'u' is equal to the number of cells !! defining 'grid' and 'ppoly'. No consistency check is performed here. -subroutine P1M_interpolation( N, h, u, edge_values, ppoly_coef, h_neglect, answers_2018 ) +subroutine P1M_interpolation( N, h, u, edge_values, ppoly_coef, h_neglect, answers_2018, answer_date ) integer, intent(in) :: N !< Number of cells real, dimension(:), intent(in) :: h !< cell widths (size N) [H] real, dimension(:), intent(in) :: u !< cell average properties (size N) [A] @@ -33,13 +33,15 @@ subroutine P1M_interpolation( N, h, u, edge_values, ppoly_coef, h_neglect, answe !! piecewise polynomial coefficients, mainly [A] real, optional, intent(in) :: h_neglect !< A negligibly small width [H] logical, optional, intent(in) :: answers_2018 !< If true use older, less acccurate expressions. + integer, optional, intent(in) :: answer_date !< The vintage of the expressions to use ! Local variables integer :: k ! loop index real :: u0_l, u0_r ! edge values (left and right) ! Bound edge values (routine found in 'edge_values.F90') - call bound_edge_values( N, h, u, edge_values, h_neglect, answers_2018 ) + call bound_edge_values( N, h, u, edge_values, h_neglect, & + answers_2018=answers_2018, answer_date=answer_date ) ! Systematically average discontinuous edge values (routine found in ! 'edge_values.F90') diff --git a/src/ALE/P3M_functions.F90 b/src/ALE/P3M_functions.F90 index e3a9f75a3c..4d39542337 100644 --- a/src/ALE/P3M_functions.F90 +++ b/src/ALE/P3M_functions.F90 @@ -25,7 +25,7 @@ module P3M_functions !! !! It is assumed that the size of the array 'u' is equal to the number of cells !! defining 'grid' and 'ppoly'. No consistency check is performed here. -subroutine P3M_interpolation( N, h, u, edge_values, ppoly_S, ppoly_coef, h_neglect, answers_2018 ) +subroutine P3M_interpolation( N, h, u, edge_values, ppoly_S, ppoly_coef, h_neglect, answers_2018, answer_date ) integer, intent(in) :: N !< Number of cells real, dimension(:), intent(in) :: h !< cell widths (size N) [H] real, dimension(:), intent(in) :: u !< cell averages (size N) in arbitrary units [A] @@ -35,13 +35,15 @@ subroutine P3M_interpolation( N, h, u, edge_values, ppoly_S, ppoly_coef, h_negle real, optional, intent(in) :: h_neglect !< A negligibly small width for the !! purpose of cell reconstructions [H] logical, optional, intent(in) :: answers_2018 !< If true use older, less acccurate expressions. + integer, optional, intent(in) :: answer_date !< The vintage of the expressions to use ! Call the limiter for p3m, which takes care of everything from ! computing the coefficients of the cubic to monotonizing it. ! This routine could be called directly instead of having to call ! 'P3M_interpolation' first but we do that to provide an homogeneous ! interface. - call P3M_limiter( N, h, u, edge_values, ppoly_S, ppoly_coef, h_neglect, answers_2018 ) + call P3M_limiter( N, h, u, edge_values, ppoly_S, ppoly_coef, h_neglect, & + answers_2018=answers_2018, answer_date=answer_date ) end subroutine P3M_interpolation @@ -58,7 +60,7 @@ end subroutine P3M_interpolation !! c. If not, monotonize cubic curve and rebuild it !! !! Step 3 of the monotonization process leaves all edge values unchanged. -subroutine P3M_limiter( N, h, u, edge_values, ppoly_S, ppoly_coef, h_neglect, answers_2018 ) +subroutine P3M_limiter( N, h, u, edge_values, ppoly_S, ppoly_coef, h_neglect, answers_2018, answer_date ) integer, intent(in) :: N !< Number of cells real, dimension(:), intent(in) :: h !< cell widths (size N) [H] real, dimension(:), intent(in) :: u !< cell averages (size N) in arbitrary units [A] @@ -68,6 +70,7 @@ subroutine P3M_limiter( N, h, u, edge_values, ppoly_S, ppoly_coef, h_neglect, an real, optional, intent(in) :: h_neglect !< A negligibly small width for !! the purpose of cell reconstructions [H] logical, optional, intent(in) :: answers_2018 !< If true use older, less acccurate expressions. + integer, optional, intent(in) :: answer_date !< The vintage of the expressions to use ! Local variables integer :: k ! loop index @@ -86,7 +89,7 @@ subroutine P3M_limiter( N, h, u, edge_values, ppoly_S, ppoly_coef, h_neglect, an eps = 1e-10 ! 1. Bound edge values (boundary cells are assumed to be local extrema) - call bound_edge_values( N, h, u, edge_values, hNeglect, answers_2018 ) + call bound_edge_values( N, h, u, edge_values, hNeglect, answers_2018=answers_2018, answer_date=answer_date ) ! 2. Systematically average discontinuous edge values call average_discontinuous_edge_values( N, edge_values ) diff --git a/src/ALE/PPM_functions.F90 b/src/ALE/PPM_functions.F90 index bbf93b4a81..16441565ac 100644 --- a/src/ALE/PPM_functions.F90 +++ b/src/ALE/PPM_functions.F90 @@ -25,7 +25,7 @@ module PPM_functions contains !> Builds quadratic polynomials coefficients from cell mean and edge values. -subroutine PPM_reconstruction( N, h, u, edge_values, ppoly_coef, h_neglect, answers_2018) +subroutine PPM_reconstruction( N, h, u, edge_values, ppoly_coef, h_neglect, answers_2018, answer_date) integer, intent(in) :: N !< Number of cells real, dimension(N), intent(in) :: h !< Cell widths [H] real, dimension(N), intent(in) :: u !< Cell averages [A] @@ -33,13 +33,14 @@ subroutine PPM_reconstruction( N, h, u, edge_values, ppoly_coef, h_neglect, answ real, dimension(N,3), intent(inout) :: ppoly_coef !< Polynomial coefficients, mainly [A] real, optional, intent(in) :: h_neglect !< A negligibly small width [H] logical, optional, intent(in) :: answers_2018 !< If true use older, less acccurate expressions. + integer, optional, intent(in) :: answer_date !< The vintage of the expressions to use ! Local variables integer :: k ! Loop index real :: edge_l, edge_r ! Edge values (left and right) ! PPM limiter - call PPM_limiter_standard( N, h, u, edge_values, h_neglect, answers_2018 ) + call PPM_limiter_standard( N, h, u, edge_values, h_neglect, answers_2018=answers_2018, answer_date=answer_date ) ! Loop over all cells do k = 1,N @@ -59,13 +60,14 @@ end subroutine PPM_reconstruction !> Adjusts edge values using the standard PPM limiter (Colella & Woodward, JCP 1984) !! after first checking that the edge values are bounded by neighbors cell averages !! and that the edge values are monotonic between cell averages. -subroutine PPM_limiter_standard( N, h, u, edge_values, h_neglect, answers_2018 ) +subroutine PPM_limiter_standard( N, h, u, edge_values, h_neglect, answers_2018, answer_date ) integer, intent(in) :: N !< Number of cells real, dimension(:), intent(in) :: h !< cell widths (size N) [H] real, dimension(:), intent(in) :: u !< cell average properties (size N) [A] real, dimension(:,:), intent(inout) :: edge_values !< Potentially modified edge values [A] real, optional, intent(in) :: h_neglect !< A negligibly small width [H] logical, optional, intent(in) :: answers_2018 !< If true use older, less acccurate expressions. + integer, optional, intent(in) :: answer_date !< The vintage of the expressions to use ! Local variables integer :: k ! Loop index @@ -74,7 +76,7 @@ subroutine PPM_limiter_standard( N, h, u, edge_values, h_neglect, answers_2018 ) real :: expr1, expr2 ! Bound edge values - call bound_edge_values( N, h, u, edge_values, h_neglect, answers_2018 ) + call bound_edge_values( N, h, u, edge_values, h_neglect, answers_2018=answers_2018, answer_date=answer_date ) ! Make discontinuous edge values monotonic call check_discontinuous_edge_values( N, u, edge_values ) diff --git a/src/ALE/PQM_functions.F90 b/src/ALE/PQM_functions.F90 index 630ecb34fc..d3809a5d1c 100644 --- a/src/ALE/PQM_functions.F90 +++ b/src/ALE/PQM_functions.F90 @@ -17,7 +17,7 @@ module PQM_functions !! !! It is assumed that the dimension of 'u' is equal to the number of cells !! defining 'grid' and 'ppoly'. No consistency check is performed. -subroutine PQM_reconstruction( N, h, u, edge_values, edge_slopes, ppoly_coef, h_neglect, answers_2018 ) +subroutine PQM_reconstruction( N, h, u, edge_values, edge_slopes, ppoly_coef, h_neglect, answers_2018, answer_date ) integer, intent(in) :: N !< Number of cells real, dimension(:), intent(in) :: h !< cell widths (size N) [H] real, dimension(:), intent(in) :: u !< cell averages (size N) [A] @@ -27,6 +27,7 @@ subroutine PQM_reconstruction( N, h, u, edge_values, edge_slopes, ppoly_coef, h_ real, optional, intent(in) :: h_neglect !< A negligibly small width for !! the purpose of cell reconstructions [H] logical, optional, intent(in) :: answers_2018 !< If true use older, less acccurate expressions. + integer, optional, intent(in) :: answer_date !< The vintage of the expressions to use ! Local variables integer :: k ! loop index @@ -36,7 +37,7 @@ subroutine PQM_reconstruction( N, h, u, edge_values, edge_slopes, ppoly_coef, h_ real :: a, b, c, d, e ! parabola coefficients ! PQM limiter - call PQM_limiter( N, h, u, edge_values, edge_slopes, h_neglect, answers_2018 ) + call PQM_limiter( N, h, u, edge_values, edge_slopes, h_neglect, answers_2018=answers_2018, answer_date=answer_date ) ! Loop on cells to construct the cubic within each cell do k = 1,N @@ -72,7 +73,7 @@ end subroutine PQM_reconstruction !! !! It is assumed that the dimension of 'u' is equal to the number of cells !! defining 'grid' and 'ppoly'. No consistency check is performed. -subroutine PQM_limiter( N, h, u, edge_values, edge_slopes, h_neglect, answers_2018 ) +subroutine PQM_limiter( N, h, u, edge_values, edge_slopes, h_neglect, answers_2018, answer_date ) integer, intent(in) :: N !< Number of cells real, dimension(:), intent(in) :: h !< cell widths (size N) [H] real, dimension(:), intent(in) :: u !< cell average properties (size N) [A] @@ -81,6 +82,7 @@ subroutine PQM_limiter( N, h, u, edge_values, edge_slopes, h_neglect, answers_20 real, optional, intent(in) :: h_neglect !< A negligibly small width for !! the purpose of cell reconstructions [H] logical, optional, intent(in) :: answers_2018 !< If true use older, less acccurate expressions. + integer, optional, intent(in) :: answer_date !< The vintage of the expressions to use ! Local variables integer :: k ! loop index @@ -102,7 +104,7 @@ subroutine PQM_limiter( N, h, u, edge_values, edge_slopes, h_neglect, answers_20 hNeglect = hNeglect_dflt ; if (present(h_neglect)) hNeglect = h_neglect ! Bound edge values - call bound_edge_values( N, h, u, edge_values, hNeglect, answers_2018 ) + call bound_edge_values( N, h, u, edge_values, hNeglect, answers_2018=answers_2018, answer_date=answer_date ) ! Make discontinuous edge values monotonic (thru averaging) call check_discontinuous_edge_values( N, u, edge_values ) diff --git a/src/ALE/regrid_edge_values.F90 b/src/ALE/regrid_edge_values.F90 index a972fc3444..08425fc92d 100644 --- a/src/ALE/regrid_edge_values.F90 +++ b/src/ALE/regrid_edge_values.F90 @@ -41,7 +41,7 @@ module regrid_edge_values !! Both boundary edge values are set equal to the boundary cell averages. !! Any extrapolation scheme is applied after this routine has been called. !! Therefore, boundary cells are treated as if they were local extrama. -subroutine bound_edge_values( N, h, u, edge_val, h_neglect, answers_2018 ) +subroutine bound_edge_values( N, h, u, edge_val, h_neglect, answers_2018, answer_date ) integer, intent(in) :: N !< Number of cells real, dimension(N), intent(in) :: h !< cell widths [H] real, dimension(N), intent(in) :: u !< cell average properties in arbitrary units [A] @@ -49,6 +49,8 @@ subroutine bound_edge_values( N, h, u, edge_val, h_neglect, answers_2018 ) !! second index is for the two edges of each cell. real, optional, intent(in) :: h_neglect !< A negligibly small width [H] logical, optional, intent(in) :: answers_2018 !< If true use older, less acccurate expressions. + integer, optional, intent(in) :: answer_date !< The vintage of the expressions to use + ! Local variables real :: sigma_l, sigma_c, sigma_r ! left, center and right van Leer slopes [A H-1] or [A] real :: slope_x_h ! retained PLM slope times half grid step [A] @@ -57,6 +59,7 @@ subroutine bound_edge_values( N, h, u, edge_val, h_neglect, answers_2018 ) integer :: k, km1, kp1 ! Loop index and the values to either side. use_2018_answers = .true. ; if (present(answers_2018)) use_2018_answers = answers_2018 + if (present(answer_date)) use_2018_answers = (answer_date < 20190101) if (use_2018_answers) then hNeglect = hNeglect_dflt ; if (present(h_neglect)) hNeglect = h_neglect endif @@ -218,7 +221,7 @@ end subroutine edge_values_explicit_h2 !! available interpolant. !! !! For this fourth-order scheme, at least four cells must exist. -subroutine edge_values_explicit_h4( N, h, u, edge_val, h_neglect, answers_2018 ) +subroutine edge_values_explicit_h4( N, h, u, edge_val, h_neglect, answers_2018, answer_date ) integer, intent(in) :: N !< Number of cells real, dimension(N), intent(in) :: h !< cell widths [H] real, dimension(N), intent(in) :: u !< cell average properties in arbitrary units [A] @@ -226,6 +229,7 @@ subroutine edge_values_explicit_h4( N, h, u, edge_val, h_neglect, answers_2018 ) !! is for the two edges of each cell. real, optional, intent(in) :: h_neglect !< A negligibly small width [H] logical, optional, intent(in) :: answers_2018 !< If true use older, less acccurate expressions. + integer, optional, intent(in) :: answer_date !< The vintage of the expressions to use ! Local variables real :: h0, h1, h2, h3 ! temporary thicknesses [H] @@ -247,6 +251,7 @@ subroutine edge_values_explicit_h4( N, h, u, edge_val, h_neglect, answers_2018 ) logical :: use_2018_answers ! If true use older, less acccurate expressions. use_2018_answers = .true. ; if (present(answers_2018)) use_2018_answers = answers_2018 + if (present(answer_date)) use_2018_answers = (answer_date < 20190101) if (use_2018_answers) then hNeglect = hNeglect_edge_dflt ; if (present(h_neglect)) hNeglect = h_neglect else @@ -382,7 +387,7 @@ end subroutine edge_values_explicit_h4 !! !! There are N+1 unknowns and we are able to write N-1 equations. The !! boundary conditions close the system. -subroutine edge_values_implicit_h4( N, h, u, edge_val, h_neglect, answers_2018 ) +subroutine edge_values_implicit_h4( N, h, u, edge_val, h_neglect, answers_2018, answer_date ) integer, intent(in) :: N !< Number of cells real, dimension(N), intent(in) :: h !< cell widths [H] real, dimension(N), intent(in) :: u !< cell average properties in arbitrary units [A] @@ -390,6 +395,7 @@ subroutine edge_values_implicit_h4( N, h, u, edge_val, h_neglect, answers_2018 ) !! is for the two edges of each cell. real, optional, intent(in) :: h_neglect !< A negligibly small width [H] logical, optional, intent(in) :: answers_2018 !< If true use older, less acccurate expressions. + integer, optional, intent(in) :: answer_date !< The vintage of the expressions to use ! Local variables integer :: i, j ! loop indexes @@ -418,6 +424,7 @@ subroutine edge_values_implicit_h4( N, h, u, edge_val, h_neglect, answers_2018 ) logical :: use_2018_answers ! If true use older, less acccurate expressions. use_2018_answers = .true. ; if (present(answers_2018)) use_2018_answers = answers_2018 + if (present(answer_date)) use_2018_answers = (answer_date < 20190101) if (use_2018_answers) then hNeglect = hNeglect_edge_dflt ; if (present(h_neglect)) hNeglect = h_neglect else @@ -690,7 +697,7 @@ end subroutine end_value_h4 !! !! There are N+1 unknowns and we are able to write N-1 equations. The !! boundary conditions close the system. -subroutine edge_slopes_implicit_h3( N, h, u, edge_slopes, h_neglect, answers_2018 ) +subroutine edge_slopes_implicit_h3( N, h, u, edge_slopes, h_neglect, answers_2018, answer_date ) integer, intent(in) :: N !< Number of cells real, dimension(N), intent(in) :: h !< cell widths [H] real, dimension(N), intent(in) :: u !< cell average properties in arbitrary units [A] @@ -698,6 +705,8 @@ subroutine edge_slopes_implicit_h3( N, h, u, edge_slopes, h_neglect, answers_201 !! second index is for the two edges of each cell. real, optional, intent(in) :: h_neglect !< A negligibly small width [H] logical, optional, intent(in) :: answers_2018 !< If true use older, less acccurate expressions. + integer, optional, intent(in) :: answer_date !< The vintage of the expressions to use + ! Local variables integer :: i, j ! loop indexes real :: h0, h1 ! cell widths [H or nondim] @@ -729,6 +738,7 @@ subroutine edge_slopes_implicit_h3( N, h, u, edge_slopes, h_neglect, answers_201 hNeglect = hNeglect_dflt ; if (present(h_neglect)) hNeglect = h_neglect hNeglect3 = hNeglect**3 use_2018_answers = .true. ; if (present(answers_2018)) use_2018_answers = answers_2018 + if (present(answer_date)) use_2018_answers = (answer_date < 20190101) ! Loop on cells (except last one) do i = 1,N-1 @@ -859,7 +869,7 @@ end subroutine edge_slopes_implicit_h3 !------------------------------------------------------------------------------ !> Compute ih5 edge slopes (implicit fifth order accurate) -subroutine edge_slopes_implicit_h5( N, h, u, edge_slopes, h_neglect, answers_2018 ) +subroutine edge_slopes_implicit_h5( N, h, u, edge_slopes, h_neglect, answers_2018, answer_date ) integer, intent(in) :: N !< Number of cells real, dimension(N), intent(in) :: h !< cell widths [H] real, dimension(N), intent(in) :: u !< cell average properties in arbitrary units [A] @@ -867,6 +877,8 @@ subroutine edge_slopes_implicit_h5( N, h, u, edge_slopes, h_neglect, answers_201 !! second index is for the two edges of each cell. real, optional, intent(in) :: h_neglect !< A negligibly small width [H] logical, optional, intent(in) :: answers_2018 !< If true use older, less acccurate expressions. + integer, optional, intent(in) :: answer_date !< The vintage of the expressions to use + ! ----------------------------------------------------------------------------- ! Fifth-order implicit estimates of edge slopes are based on a four-cell, ! three-edge stencil. A tridiagonal system is set up and is based on @@ -1129,7 +1141,7 @@ end subroutine edge_slopes_implicit_h5 !! become computationally expensive if regridding is carried out !! often. Figuring out closed-form expressions for these coefficients !! on nonuniform meshes turned out to be intractable. -subroutine edge_values_implicit_h6( N, h, u, edge_val, h_neglect, answers_2018 ) +subroutine edge_values_implicit_h6( N, h, u, edge_val, h_neglect, answers_2018, answer_date ) integer, intent(in) :: N !< Number of cells real, dimension(N), intent(in) :: h !< cell widths [H] real, dimension(N), intent(in) :: u !< cell average properties (size N) in arbitrary units [A] @@ -1137,6 +1149,7 @@ subroutine edge_values_implicit_h6( N, h, u, edge_val, h_neglect, answers_2018 ) !! is for the two edges of each cell. real, optional, intent(in) :: h_neglect !< A negligibly small width [H] logical, optional, intent(in) :: answers_2018 !< If true use older, less acccurate expressions. + integer, optional, intent(in) :: answer_date !< The vintage of the expressions to use ! Local variables real :: h0, h1, h2, h3 ! cell widths [H] diff --git a/src/ALE/regrid_solvers.F90 b/src/ALE/regrid_solvers.F90 index b7cc3b5402..022946a29d 100644 --- a/src/ALE/regrid_solvers.F90 +++ b/src/ALE/regrid_solvers.F90 @@ -16,12 +16,13 @@ module regrid_solvers !! This routine uses Gauss's algorithm to transform the system's original !! matrix into an upper triangular matrix. Back substitution yields the answer. !! The matrix A must be square, with the first index varing down the column. -subroutine solve_linear_system( A, R, X, N, answers_2018 ) +subroutine solve_linear_system( A, R, X, N, answers_2018, answer_date ) integer, intent(in) :: N !< The size of the system real, dimension(N,N), intent(inout) :: A !< The matrix being inverted [nondim] real, dimension(N), intent(inout) :: R !< system right-hand side [A] real, dimension(N), intent(inout) :: X !< solution vector [A] logical, optional, intent(in) :: answers_2018 !< If true or absent use older, less efficient expressions. + integer, optional, intent(in) :: answer_date !< The vintage of the expressions to use ! Local variables real, parameter :: eps = 0.0 ! Minimum pivot magnitude allowed real :: factor ! The factor that eliminates the leading nonzero element in a row. @@ -32,6 +33,7 @@ subroutine solve_linear_system( A, R, X, N, answers_2018 ) integer :: i, j, k old_answers = .true. ; if (present(answers_2018)) old_answers = answers_2018 + if (present(answer_date)) old_answers = (answer_date < 20190101) ! Loop on rows to transform the problem into multiplication by an upper-right matrix. do i = 1,N-1 @@ -173,7 +175,7 @@ end subroutine linear_solver !! !! This routine uses Thomas's algorithm to solve the tridiagonal system AX = R. !! (A is made up of lower, middle and upper diagonals) -subroutine solve_tridiagonal_system( Al, Ad, Au, R, X, N, answers_2018 ) +subroutine solve_tridiagonal_system( Al, Ad, Au, R, X, N, answers_2018, answer_date ) integer, intent(in) :: N !< The size of the system real, dimension(N), intent(in) :: Ad !< Matrix center diagonal real, dimension(N), intent(in) :: Al !< Matrix lower diagonal @@ -181,6 +183,7 @@ subroutine solve_tridiagonal_system( Al, Ad, Au, R, X, N, answers_2018 ) real, dimension(N), intent(in) :: R !< system right-hand side real, dimension(N), intent(out) :: X !< solution vector logical, optional, intent(in) :: answers_2018 !< If true use older, less acccurate expressions. + integer, optional, intent(in) :: answer_date !< The vintage of the expressions to use ! Local variables real, dimension(N) :: pivot, Al_piv real, dimension(N) :: c1 ! Au / pivot for the backward sweep @@ -189,6 +192,7 @@ subroutine solve_tridiagonal_system( Al, Ad, Au, R, X, N, answers_2018 ) logical :: old_answers ! If true, use expressions that give the original (2008 through 2018) MOM6 answers old_answers = .true. ; if (present(answers_2018)) old_answers = answers_2018 + if (present(answer_date)) old_answers = (answer_date < 20190101) if (old_answers) then ! This version gives the same answers as the original (2008 through 2018) MOM6 code diff --git a/src/framework/MOM_horizontal_regridding.F90 b/src/framework/MOM_horizontal_regridding.F90 index 05e3e393b6..bbb5ae0e15 100644 --- a/src/framework/MOM_horizontal_regridding.F90 +++ b/src/framework/MOM_horizontal_regridding.F90 @@ -81,7 +81,7 @@ end subroutine myStats !> Use ICE-9 algorithm to populate points (fill=1) with valid data (good=1). If no information !! is available, use a previous guess (prev). Optionally (smooth) blend the filled points to !! achieve a more desirable result. -subroutine fill_miss_2d(aout, good, fill, prev, G, acrit, num_pass, relc, debug, answers_2018) +subroutine fill_miss_2d(aout, good, fill, prev, G, acrit, num_pass, relc, debug, answer_date) type(ocean_grid_type), intent(inout) :: G !< The ocean's grid structure. real, dimension(SZI_(G),SZJ_(G)), & intent(inout) :: aout !< The array with missing values to fill [A] @@ -98,8 +98,9 @@ subroutine fill_miss_2d(aout, good, fill, prev, G, acrit, num_pass, relc, debug, integer, optional, intent(in) :: num_pass !< The maximum number of iterations real, optional, intent(in) :: relc !< A relaxation coefficient for Laplacian [nondim] logical, optional, intent(in) :: debug !< If true, write verbose debugging messages. - logical, optional, intent(in) :: answers_2018 !< If true, use expressions that give the same - !! answers as the code did in late 2018. Otherwise + integer, optional, intent(in) :: answer_date !< The vintage of the expressions in the code. + !! Dates before 20190101 give the same answers + !! as the code did in late 2018, while later versions !! add parentheses for rotational symmetry. real, dimension(SZI_(G),SZJ_(G)) :: a_filled ! The aout with missing values filled in [A] @@ -135,7 +136,7 @@ subroutine fill_miss_2d(aout, good, fill, prev, G, acrit, num_pass, relc, debug, relax_coeff = relc_default if (PRESENT(relc)) relax_coeff = relc - ans_2018 = .true. ; if (PRESENT(answers_2018)) ans_2018 = answers_2018 + ans_2018 = .true. ; if (PRESENT(answer_date)) ans_2018 = (answer_date < 20190101) fill_pts(:,:) = fill(:,:) @@ -251,7 +252,7 @@ end subroutine fill_miss_2d !> Extrapolate and interpolate from a file record subroutine horiz_interp_and_extrap_tracer_record(filename, varnam, conversion, recnum, G, tr_z, mask_z, & z_in, z_edges_in, missing_value, reentrant_x, tripolar_n, & - homogenize, m_to_Z, answers_2018, ongrid, tr_iter_tol) + homogenize, m_to_Z, answers_2018, ongrid, tr_iter_tol, answer_date) character(len=*), intent(in) :: filename !< Path to file containing tracer to be !! interpolated. @@ -287,6 +288,10 @@ subroutine horiz_interp_and_extrap_tracer_record(filename, varnam, conversion, real, optional, intent(in) :: tr_iter_tol !< The tolerance for changes in tracer concentrations !! between smoothing iterations that determines when to !! stop iterating [CU ~> conc] + integer, optional, intent(in) :: answer_date !< The vintage of the expressions in the code. + !! Dates before 20190101 give the same answers + !! as the code did in late 2018, while later versions + !! add parentheses for rotational symmetry. ! Local variables real, dimension(:,:), allocatable :: tr_in !< A 2-d array for holding input data on its @@ -313,6 +318,7 @@ subroutine horiz_interp_and_extrap_tracer_record(filename, varnam, conversion, real :: missing_val_in ! The missing value in the input field [conc] real :: roundoff ! The magnitude of roundoff, usually ~2e-16 [nondim] real :: add_offset, scale_factor ! File-specific conversion factors. + integer :: ans_date ! The vintage of the expressions and order of arithmetic to use logical :: found_attr logical :: add_np logical :: is_ongrid @@ -356,6 +362,10 @@ subroutine horiz_interp_and_extrap_tracer_record(filename, varnam, conversion, PI_180 = atan(1.0)/45. + ans_date = 20181231 + if (present(answers_2018)) then ; if (.not.answers_2018) ans_date = 20190101 ; endif + if (present(answer_date)) ans_date = answer_date + ! Open NetCDF file and if present, extract data and spatial coordinate information ! The convention adopted here requires that the data be written in (i,j,k) ordering. @@ -565,8 +575,7 @@ subroutine horiz_interp_and_extrap_tracer_record(filename, varnam, conversion, good2(:,:) = good(:,:) fill2(:,:) = fill(:,:) - call fill_miss_2d(tr_outf, good2, fill2, tr_prev, G, dtr_iter_stop, & - answers_2018=answers_2018) + call fill_miss_2d(tr_outf, good2, fill2, tr_prev, G, dtr_iter_stop, answer_date=ans_date) if (debug) then call myStats(tr_outf, missing_value, is, ie, js, je, k, 'field from fill_miss_2d()', scale=I_scale) endif @@ -589,7 +598,8 @@ end subroutine horiz_interp_and_extrap_tracer_record !> Extrapolate and interpolate using a FMS time interpolation handle subroutine horiz_interp_and_extrap_tracer_fms_id(fms_id, Time, conversion, G, tr_z, mask_z, & z_in, z_edges_in, missing_value, reentrant_x, tripolar_n, & - homogenize, spongeOngrid, m_to_Z, answers_2018, tr_iter_tol) + homogenize, spongeOngrid, m_to_Z, & + answers_2018, tr_iter_tol, answer_date) integer, intent(in) :: fms_id !< A unique id used by the FMS time interpolator type(time_type), intent(in) :: Time !< A FMS time type @@ -621,6 +631,10 @@ subroutine horiz_interp_and_extrap_tracer_fms_id(fms_id, Time, conversion, G, t real, optional, intent(in) :: tr_iter_tol !< The tolerance for changes in tracer concentrations !! between smoothing iterations that determines when to !! stop iterating [CU ~> conc] + integer, optional, intent(in) :: answer_date !< The vintage of the expressions in the code. + !! Dates before 20190101 give the same answers + !! as the code did in late 2018, while later versions + !! add parentheses for rotational symmetry. ! Local variables real, dimension(:,:), allocatable :: tr_in !< A 2-d array for holding input data on its @@ -658,7 +672,7 @@ subroutine horiz_interp_and_extrap_tracer_fms_id(fms_id, Time, conversion, G, t integer, dimension(4) :: fld_sz logical :: debug=.false. logical :: is_ongrid - logical :: ans_2018 + integer :: ans_date ! The vintage of the expressions and order of arithmetic to use real :: I_scale ! The inverse of the conversion factor for diagnostic output [conc CU-1 ~> 1] real :: dtr_iter_stop ! The tolerance for changes in tracer concentrations between smoothing ! iterations that determines when to stop iterating [CU ~> conc] @@ -692,7 +706,9 @@ subroutine horiz_interp_and_extrap_tracer_fms_id(fms_id, Time, conversion, G, t PI_180 = atan(1.0)/45. - ans_2018 = .true.;if (present(answers_2018)) ans_2018 = answers_2018 + ans_date = 20181231 + if (present(answers_2018)) then ; if (.not.answers_2018) ans_date = 20190101 ; endif + if (present(answer_date)) ans_date = answer_date ! Open NetCDF file and if present, extract data and spatial coordinate information ! The convention adopted here requires that the data be written in (i,j,k) ordering. @@ -872,8 +888,7 @@ subroutine horiz_interp_and_extrap_tracer_fms_id(fms_id, Time, conversion, G, t good2(:,:) = good(:,:) fill2(:,:) = fill(:,:) - call fill_miss_2d(tr_outf, good2, fill2, tr_prev, G, dtr_iter_stop, & - answers_2018=answers_2018) + call fill_miss_2d(tr_outf, good2, fill2, tr_prev, G, dtr_iter_stop, answer_date=ans_date) ! if (debug) then ! call hchksum(tr_outf, 'field from fill_miss_2d ', G%HI, scale=I_scale) @@ -895,7 +910,7 @@ subroutine horiz_interp_and_extrap_tracer_fms_id(fms_id, Time, conversion, G, t do j=js,je do i=is,ie tr_z(i,j,k) = data_in(i,j,k) * conversion - if (.not. ans_2018) mask_z(i,j,k) = 1. + if (ans_date >= 20190101) mask_z(i,j,k) = 1. if (abs(tr_z(i,j,k)-missing_value) < abs(roundoff*missing_value)) mask_z(i,j,k) = 0. enddo enddo From def5f1457dae54338de073299b728a1164698716 Mon Sep 17 00:00:00 2001 From: Robert Hallberg Date: Fri, 29 Jul 2022 16:04:13 -0400 Subject: [PATCH 20/40] +Use answer_date to specify remapping in ALE Replace the answers_2018 arguments with answer_date arguments to specify the version of expressions in a number of calls from the upper-level ALE modules, while also adding new answer_date optional arguments to several of the publicly visible remapping routines, including ALE_remap_scalar, regrid_set_params and remapping_set_params. The routine interpolate_grid, which is not called from outside of the regrid_interp module, is no longer being made publicly visible. Some comments noting parameters that are not guaranteed to be externally set or that can not be reset were also added. All answers are bitwise identical. --- src/ALE/MOM_ALE.F90 | 60 +++++++++++++++++++---------- src/ALE/MOM_regridding.F90 | 36 ++++++++++++------ src/ALE/MOM_remapping.F90 | 69 +++++++++++++++++++-------------- src/ALE/regrid_interp.F90 | 78 +++++++++++++++++++------------------- 4 files changed, 143 insertions(+), 100 deletions(-) diff --git a/src/ALE/MOM_ALE.F90 b/src/ALE/MOM_ALE.F90 index 293817e24f..e614391b4a 100644 --- a/src/ALE/MOM_ALE.F90 +++ b/src/ALE/MOM_ALE.F90 @@ -91,9 +91,11 @@ module MOM_ALE logical :: remap_after_initialization !< Indicates whether to regrid/remap after initializing the state. - logical :: answers_2018 !< If true, use the order of arithmetic and expressions for remapping - !! that recover the answers from the end of 2018. Otherwise, use more - !! robust and accurate forms of mathematically equivalent expressions. + integer :: answer_date !< The vintage of the expressions and order of arithmetic to use for + !! remapping. Values below 20190101 result in the use of older, less + !! accurate expressions that were in use at the end of 2018. Higher + !! values result inthe use of more robust and accurate forms of + !! mathematically equivalent expressions. logical :: debug !< If true, write verbose checksums for debugging purposes. logical :: show_call_tree !< For debugging @@ -163,7 +165,11 @@ subroutine ALE_init( param_file, GV, US, max_depth, CS) character(len=40) :: mdl = "MOM_ALE" ! This module's name. character(len=80) :: string, vel_string ! Temporary strings real :: filter_shallow_depth, filter_deep_depth - logical :: default_2018_answers + integer :: default_answer_date ! The default setting for the various ANSWER_DATE flags. + logical :: default_2018_answers ! The default setting for the various 2018_ANSWERS flags. + logical :: answers_2018 ! If true, use the order of arithmetic and expressions for remapping + ! that recover the answers from the end of 2018. Otherwise, use more + ! robust and accurate forms of mathematically equivalent expressions. logical :: check_reconstruction logical :: check_remapping logical :: force_bounds_in_subcell @@ -218,25 +224,33 @@ subroutine ALE_init( param_file, GV, US, max_depth, CS) call get_param(param_file, mdl, "REMAP_BOUNDARY_EXTRAP", remap_boundary_extrap, & "If true, values at the interfaces of boundary cells are "//& "extrapolated instead of piecewise constant", default=.false.) + call get_param(param_file, mdl, "DEFAULT_ANSWER_DATE", default_answer_date, & + "This sets the default value for the various _ANSWER_DATE parameters.", & + default=99991231, do_not_log=.true.) call get_param(param_file, mdl, "DEFAULT_2018_ANSWERS", default_2018_answers, & "This sets the default value for the various _2018_ANSWERS parameters.", & - default=.false.) - call get_param(param_file, mdl, "REMAPPING_2018_ANSWERS", CS%answers_2018, & + default=(default_answer_date<20190101)) + call get_param(param_file, mdl, "REMAPPING_2018_ANSWERS", answers_2018, & "If true, use the order of arithmetic and expressions that recover the "//& "answers from the end of 2018. Otherwise, use updated and more robust "//& "forms of the same expressions.", default=default_2018_answers) + if (answers_2018) then + CS%answer_date = 20181231 + else + CS%answer_date = 20190101 + endif call initialize_remapping( CS%remapCS, string, & boundary_extrapolation=remap_boundary_extrap, & check_reconstruction=check_reconstruction, & check_remapping=check_remapping, & force_bounds_in_subcell=force_bounds_in_subcell, & - answers_2018=CS%answers_2018) + answer_date=CS%answer_date) call initialize_remapping( CS%vel_remapCS, vel_string, & boundary_extrapolation=remap_boundary_extrap, & check_reconstruction=check_reconstruction, & check_remapping=check_remapping, & force_bounds_in_subcell=force_bounds_in_subcell, & - answers_2018=CS%answers_2018) + answer_date=CS%answer_date) call get_param(param_file, mdl, "PARTIAL_CELL_VELOCITY_REMAP", CS%partial_cell_vel_remap, & "If true, use partial cell thicknesses at velocity points that are masked out "//& @@ -595,8 +609,8 @@ subroutine ALE_offline_inputs(CS, G, GV, h, tv, Reg, uhtr, vhtr, Kd, debug, OBC) endif enddo ; enddo - call ALE_remap_scalar(CS%remapCS, G, GV, nk, h, tv%T, h_new, tv%T, answers_2018=CS%answers_2018) - call ALE_remap_scalar(CS%remapCS, G, GV, nk, h, tv%S, h_new, tv%S, answers_2018=CS%answers_2018) + call ALE_remap_scalar(CS%remapCS, G, GV, nk, h, tv%T, h_new, tv%T, answer_date=CS%answer_date) + call ALE_remap_scalar(CS%remapCS, G, GV, nk, h, tv%S, h_new, tv%S, answer_date=CS%answer_date) if (debug) call MOM_tracer_chkinv("After ALE_offline_inputs", G, GV, h_new, Reg%Tr, Reg%ntr) @@ -747,7 +761,7 @@ subroutine ALE_regrid_accelerated(CS, G, GV, h, tv, n_itt, u, v, OBC, Reg, dt, d if (present(dt)) & call ALE_update_regrid_weights(dt, CS) - if (.not. CS%answers_2018) then + if (CS%answer_date >= 20190101) then h_neglect = GV%H_subroundoff ; h_neglect_edge = GV%H_subroundoff elseif (GV%Boussinesq) then h_neglect = GV%m_to_H * 1.0e-30 ; h_neglect_edge = GV%m_to_H * 1.0e-10 @@ -848,7 +862,7 @@ subroutine remap_all_state_vars(CS, G, GV, h_old, h_new, Reg, OBC, & "and u/v are to be remapped") endif - if (.not.CS%answers_2018) then + if (CS%answer_date >= 20190101) then h_neglect = GV%H_subroundoff ; h_neglect_edge = GV%H_subroundoff elseif (GV%Boussinesq) then h_neglect = GV%m_to_H*1.0e-30 ; h_neglect_edge = GV%m_to_H*1.0e-10 @@ -1097,7 +1111,8 @@ end subroutine mask_near_bottom_vel !> Remaps a single scalar between grids described by thicknesses h_src and h_dst. !! h_dst must be dimensioned as a model array with GV%ke layers while h_src can !! have an arbitrary number of layers specified by nk_src. -subroutine ALE_remap_scalar(CS, G, GV, nk_src, h_src, s_src, h_dst, s_dst, all_cells, old_remap, answers_2018 ) +subroutine ALE_remap_scalar(CS, G, GV, nk_src, h_src, s_src, h_dst, s_dst, all_cells, old_remap, & + answers_2018, answer_date ) type(remapping_CS), intent(in) :: CS !< Remapping control structure type(ocean_grid_type), intent(in) :: G !< Ocean grid structure type(verticalGrid_type), intent(in) :: GV !< Ocean vertical grid structure @@ -1117,6 +1132,8 @@ subroutine ALE_remap_scalar(CS, G, GV, nk_src, h_src, s_src, h_dst, s_dst, all_c !! and expressions that recover the answers for !! remapping from the end of 2018. Otherwise, !! use more robust forms of the same expressions. + integer, optional, intent(in) :: answer_date !< The vintage of the expressions to use + !! for remapping ! Local variables integer :: i, j, k, n_points real :: dx(GV%ke+1) @@ -1129,6 +1146,7 @@ subroutine ALE_remap_scalar(CS, G, GV, nk_src, h_src, s_src, h_dst, s_dst, all_c if (present(old_remap)) use_remapping_core_w = old_remap n_points = nk_src use_2018_remap = .true. ; if (present(answers_2018)) use_2018_remap = answers_2018 + if (present(answer_date)) use_2018_remap = (answer_date < 20190101) if (.not.use_2018_remap) then h_neglect = GV%H_subroundoff ; h_neglect_edge = GV%H_subroundoff @@ -1211,7 +1229,7 @@ subroutine ALE_PLM_edge_values( CS, G, GV, h, Q, bdry_extrap, Q_t, Q_b ) real :: mslp real :: h_neglect - if (.not.CS%answers_2018) then + if (CS%answer_date >= 20190101) then h_neglect = GV%H_subroundoff elseif (GV%Boussinesq) then h_neglect = GV%m_to_H*1.0e-30 @@ -1280,7 +1298,7 @@ subroutine TS_PPM_edge_values( CS, S_t, S_b, T_t, T_b, G, GV, tv, h, bdry_extrap ppol_coefs ! Coefficients of polynomial, all in [degC] or [ppt] real :: h_neglect, h_neglect_edge ! Tiny thicknesses [H ~> m or kg m-2] - if (.not.CS%answers_2018) then + if (CS%answer_date >= 20190101) then h_neglect = GV%H_subroundoff ; h_neglect_edge = GV%H_subroundoff elseif (GV%Boussinesq) then h_neglect = GV%m_to_H*1.0e-30 ; h_neglect_edge = GV%m_to_H*1.0e-10 @@ -1300,9 +1318,9 @@ subroutine TS_PPM_edge_values( CS, S_t, S_b, T_t, T_b, G, GV, tv, h, bdry_extrap ppol_E(:,:) = 0.0 ppol_coefs(:,:) = 0.0 call edge_values_implicit_h4( GV%ke, hTmp, tmp, ppol_E, h_neglect=h_neglect_edge, & - answers_2018=CS%answers_2018 ) + answer_date=CS%answer_date ) call PPM_reconstruction( GV%ke, hTmp, tmp, ppol_E, ppol_coefs, h_neglect, & - answers_2018=CS%answers_2018 ) + answer_date=CS%answer_date ) if (bdry_extrap) & call PPM_boundary_extrapolation( GV%ke, hTmp, tmp, ppol_E, ppol_coefs, h_neglect ) @@ -1315,15 +1333,15 @@ subroutine TS_PPM_edge_values( CS, S_t, S_b, T_t, T_b, G, GV, tv, h, bdry_extrap ppol_E(:,:) = 0.0 ppol_coefs(:,:) = 0.0 tmp(:) = tv%T(i,j,:) - if (CS%answers_2018) then + if (CS%answer_date < 20190101) then call edge_values_implicit_h4( GV%ke, hTmp, tmp, ppol_E, h_neglect=1.0e-10*GV%m_to_H, & - answers_2018=CS%answers_2018 ) + answer_date=CS%answer_date ) else call edge_values_implicit_h4( GV%ke, hTmp, tmp, ppol_E, h_neglect=GV%H_subroundoff, & - answers_2018=CS%answers_2018 ) + answer_date=CS%answer_date ) endif call PPM_reconstruction( GV%ke, hTmp, tmp, ppol_E, ppol_coefs, h_neglect, & - answers_2018=CS%answers_2018 ) + answer_date=CS%answer_date ) if (bdry_extrap) & call PPM_boundary_extrapolation(GV%ke, hTmp, tmp, ppol_E, ppol_coefs, h_neglect ) diff --git a/src/ALE/MOM_regridding.F90 b/src/ALE/MOM_regridding.F90 index f093efb8dc..2f28362fb1 100644 --- a/src/ALE/MOM_regridding.F90 +++ b/src/ALE/MOM_regridding.F90 @@ -117,9 +117,10 @@ module MOM_regridding !! If false, integrate from the bottom upward, as does the rest of the model. logical :: integrate_downward_for_e = .true. - !> If true, use the order of arithmetic and expressions that recover the remapping answers from 2018. - !! If false, use more robust forms of the same remapping expressions. - logical :: remap_answers_2018 = .true. + !> The vintage of the order of arithmetic and expressions to use for remapping. + !! Values below 20190101 recover the remapping answers from 2018. + !! Higher values use more robust forms of the same remapping expressions. + integer :: remap_answer_date = 20181231 !### Change to 99991231? logical :: use_hybgen_unmix = .false. !< If true, use the hybgen unmixing code before remapping @@ -204,7 +205,9 @@ subroutine initialize_regridding(CS, GV, US, max_depth, param_file, mdl, coord_m character(len=12) :: expected_units, alt_units ! Temporary strings logical :: tmpLogical, fix_haloclines, do_sum, main_parameters logical :: coord_is_state_dependent, ierr - logical :: default_2018_answers, remap_answers_2018 + integer :: default_answer_date ! The default setting for the various ANSWER_DATE flags. + logical :: default_2018_answers ! The default setting for the various 2018_ANSWERS flags. + logical :: remap_answers_2018 real :: filt_len, strat_tol, tmpReal, P_Ref real :: maximum_depth ! The maximum depth of the ocean [m] (not in Z). real :: dz_fixed_sfc, Rho_avg_depth, nlay_sfc_int @@ -264,9 +267,12 @@ subroutine initialize_regridding(CS, GV, US, max_depth, param_file, mdl, coord_m trim(regriddingInterpSchemeDoc), default=trim(string2)) call set_regrid_params(CS, interp_scheme=string) + call get_param(param_file, mdl, "DEFAULT_ANSWER_DATE", default_answer_date, & + "This sets the default value for the various _ANSWER_DATE parameters.", & + default=99991231, do_not_log=.true.) call get_param(param_file, mdl, "DEFAULT_2018_ANSWERS", default_2018_answers, & "This sets the default value for the various _2018_ANSWERS parameters.", & - default=.false.) + default=(default_answer_date<20190101)) call get_param(param_file, mdl, "REMAPPING_2018_ANSWERS", remap_answers_2018, & "If true, use the order of arithmetic and expressions that recover the "//& "answers from the end of 2018. Otherwise, use updated and more robust "//& @@ -1381,7 +1387,7 @@ subroutine build_rho_grid( G, GV, US, h, tv, dzInterface, remapCS, CS, frac_shel #endif logical :: ice_shelf - if (.not.CS%remap_answers_2018) then + if (CS%remap_answer_date >= 20190101) then h_neglect = GV%H_subroundoff ; h_neglect_edge = GV%H_subroundoff elseif (GV%Boussinesq) then h_neglect = GV%m_to_H*1.0e-30 ; h_neglect_edge = GV%m_to_H*1.0e-10 @@ -1524,7 +1530,7 @@ subroutine build_grid_HyCOM1( G, GV, US, h, tv, h_new, dzInterface, CS, frac_she real :: z_top_col, totalThickness logical :: ice_shelf - if (.not.CS%remap_answers_2018) then + if (CS%remap_answer_date >= 20190101) then h_neglect = GV%H_subroundoff ; h_neglect_edge = GV%H_subroundoff elseif (GV%Boussinesq) then h_neglect = GV%m_to_H*1.0e-30 ; h_neglect_edge = GV%m_to_H*1.0e-10 @@ -1676,7 +1682,7 @@ subroutine build_grid_SLight(G, GV, US, h, tv, dzInterface, CS) integer :: i, j, k, nz real :: h_neglect, h_neglect_edge - if (.not.CS%remap_answers_2018) then + if (CS%remap_answer_date >= 20190101) then h_neglect = GV%H_subroundoff ; h_neglect_edge = GV%H_subroundoff elseif (GV%Boussinesq) then h_neglect = GV%m_to_H*1.0e-30 ; h_neglect_edge = GV%m_to_H*1.0e-10 @@ -2352,8 +2358,8 @@ end function getCoordinateShortName subroutine set_regrid_params( CS, boundary_extrapolation, min_thickness, old_grid_weight, & interp_scheme, depth_of_time_filter_shallow, depth_of_time_filter_deep, & compress_fraction, ref_pressure, dz_min_surface, nz_fixed_surface, Rho_ML_avg_depth, & - nlay_ML_to_interior, fix_haloclines, halocline_filt_len, & - halocline_strat_tol, integrate_downward_for_e, remap_answers_2018, & + nlay_ML_to_interior, fix_haloclines, halocline_filt_len, halocline_strat_tol, & + integrate_downward_for_e, remap_answers_2018, remap_answer_date, & adaptTimeRatio, adaptZoom, adaptZoomCoeff, adaptBuoyCoeff, adaptAlpha, adaptDoMin, adaptDrho0) type(regridding_CS), intent(inout) :: CS !< Regridding control structure logical, optional, intent(in) :: boundary_extrapolation !< Extrapolate in boundary cells @@ -2383,6 +2389,7 @@ subroutine set_regrid_params( CS, boundary_extrapolation, min_thickness, old_gri logical, optional, intent(in) :: remap_answers_2018 !< If true, use the order of arithmetic and expressions !! that recover the remapping answers from 2018. Otherwise !! use more robust but mathematically equivalent expressions. + integer, optional, intent(in) :: remap_answer_date !< The vintage of the expressions to use for remapping real, optional, intent(in) :: adaptTimeRatio !< Ratio of the ALE timestep to the grid timescale [nondim]. real, optional, intent(in) :: adaptZoom !< Depth of near-surface zooming region [H ~> m or kg m-2]. real, optional, intent(in) :: adaptZoomCoeff !< Coefficient of near-surface zooming diffusivity [nondim]. @@ -2413,7 +2420,14 @@ subroutine set_regrid_params( CS, boundary_extrapolation, min_thickness, old_gri if (present(compress_fraction)) CS%compressibility_fraction = compress_fraction if (present(ref_pressure)) CS%ref_pressure = ref_pressure if (present(integrate_downward_for_e)) CS%integrate_downward_for_e = integrate_downward_for_e - if (present(remap_answers_2018)) CS%remap_answers_2018 = remap_answers_2018 + if (present(remap_answers_2018)) then + if (remap_answers_2018) then + CS%remap_answer_date = 20181231 + else + CS%remap_answer_date = 20190101 + endif + endif + if (present(remap_answer_date)) CS%remap_answer_date = remap_answer_date select case (CS%regridding_scheme) case (REGRIDDING_ZSTAR) diff --git a/src/ALE/MOM_remapping.F90 b/src/ALE/MOM_remapping.F90 index 50e1085cf6..9979b5d39b 100644 --- a/src/ALE/MOM_remapping.F90 +++ b/src/ALE/MOM_remapping.F90 @@ -34,8 +34,9 @@ module MOM_remapping logical :: check_remapping = .false. !> If true, the intermediate values used in remapping are forced to be bounded. logical :: force_bounds_in_subcell = .false. - !> If true use older, less acccurate expressions. - logical :: answers_2018 = .true. + !> The vintage of the expressions to use for remapping. Values below 20190101 result + !! in the use of older, less accurate expressions. + integer :: answer_date = 20181231 !### Change to 99991231? end type ! The following routines are visible to the outside world @@ -93,7 +94,7 @@ module MOM_remapping !> Set parameters within remapping object subroutine remapping_set_param(CS, remapping_scheme, boundary_extrapolation, & - check_reconstruction, check_remapping, force_bounds_in_subcell, answers_2018) + check_reconstruction, check_remapping, force_bounds_in_subcell, answers_2018, answer_date) type(remapping_CS), intent(inout) :: CS !< Remapping control structure character(len=*), optional, intent(in) :: remapping_scheme !< Remapping scheme to use logical, optional, intent(in) :: boundary_extrapolation !< Indicate to extrapolate in boundary cells @@ -101,6 +102,7 @@ subroutine remapping_set_param(CS, remapping_scheme, boundary_extrapolation, & logical, optional, intent(in) :: check_remapping !< Indicate to check results of remapping logical, optional, intent(in) :: force_bounds_in_subcell !< Force subcells values to be bounded logical, optional, intent(in) :: answers_2018 !< If true use older, less acccurate expressions. + integer, optional, intent(in) :: answer_date !< The vintage of the expressions to use if (present(remapping_scheme)) then call setReconstructionType( remapping_scheme, CS ) @@ -118,8 +120,16 @@ subroutine remapping_set_param(CS, remapping_scheme, boundary_extrapolation, & CS%force_bounds_in_subcell = force_bounds_in_subcell endif if (present(answers_2018)) then - CS%answers_2018 = answers_2018 + if (answers_2018) then + CS%answer_date = 20181231 + else + CS%answer_date = 20190101 + endif endif + if (present(answer_date)) then + CS%answer_date = answer_date + endif + end subroutine remapping_set_param subroutine extract_member_remapping_CS(CS, remapping_scheme, degree, boundary_extrapolation, check_reconstruction, & @@ -424,46 +434,46 @@ subroutine build_reconstructions_1d( CS, n0, h0, u0, ppoly_r_coefs, & call PLM_boundary_extrapolation( n0, h0, u0, ppoly_r_E, ppoly_r_coefs, h_neglect ) iMethod = INTEGRATION_PLM case ( REMAPPING_PPM_H4 ) - call edge_values_explicit_h4( n0, h0, u0, ppoly_r_E, h_neglect_edge, answers_2018=CS%answers_2018 ) - call PPM_reconstruction( n0, h0, u0, ppoly_r_E, ppoly_r_coefs, h_neglect, answers_2018=CS%answers_2018 ) + call edge_values_explicit_h4( n0, h0, u0, ppoly_r_E, h_neglect_edge, answer_date=CS%answer_date ) + call PPM_reconstruction( n0, h0, u0, ppoly_r_E, ppoly_r_coefs, h_neglect, answer_date=CS%answer_date ) if ( CS%boundary_extrapolation ) then call PPM_boundary_extrapolation( n0, h0, u0, ppoly_r_E, ppoly_r_coefs, h_neglect ) endif iMethod = INTEGRATION_PPM case ( REMAPPING_PPM_IH4 ) - call edge_values_implicit_h4( n0, h0, u0, ppoly_r_E, h_neglect_edge, answers_2018=CS%answers_2018 ) - call PPM_reconstruction( n0, h0, u0, ppoly_r_E, ppoly_r_coefs, h_neglect, answers_2018=CS%answers_2018 ) + call edge_values_implicit_h4( n0, h0, u0, ppoly_r_E, h_neglect_edge, answer_date=CS%answer_date ) + call PPM_reconstruction( n0, h0, u0, ppoly_r_E, ppoly_r_coefs, h_neglect, answer_date=CS%answer_date ) if ( CS%boundary_extrapolation ) then call PPM_boundary_extrapolation( n0, h0, u0, ppoly_r_E, ppoly_r_coefs, h_neglect ) endif iMethod = INTEGRATION_PPM case ( REMAPPING_PPM_HYBGEN ) call hybgen_PPM_coefs(u0, h0, ppoly_r_E, n0, 1, h_neglect) - call PPM_reconstruction( n0, h0, u0, ppoly_r_E, ppoly_r_coefs, h_neglect, answers_2018=.false. ) + call PPM_reconstruction( n0, h0, u0, ppoly_r_E, ppoly_r_coefs, h_neglect, answer_date=99991231 ) if ( CS%boundary_extrapolation ) & call PPM_boundary_extrapolation( n0, h0, u0, ppoly_r_E, ppoly_r_coefs, h_neglect ) iMethod = INTEGRATION_PPM case ( REMAPPING_WENO_HYBGEN ) call hybgen_weno_coefs(u0, h0, ppoly_r_E, n0, 1, h_neglect) - call PPM_reconstruction( n0, h0, u0, ppoly_r_E, ppoly_r_coefs, h_neglect, answers_2018=.false. ) + call PPM_reconstruction( n0, h0, u0, ppoly_r_E, ppoly_r_coefs, h_neglect, answer_date=99991231 ) if ( CS%boundary_extrapolation ) & call PPM_boundary_extrapolation( n0, h0, u0, ppoly_r_E, ppoly_r_coefs, h_neglect ) iMethod = INTEGRATION_PPM case ( REMAPPING_PQM_IH4IH3 ) - call edge_values_implicit_h4( n0, h0, u0, ppoly_r_E, h_neglect_edge, answers_2018=CS%answers_2018 ) - call edge_slopes_implicit_h3( n0, h0, u0, ppoly_r_S, h_neglect, answers_2018=CS%answers_2018 ) + call edge_values_implicit_h4( n0, h0, u0, ppoly_r_E, h_neglect_edge, answer_date=CS%answer_date ) + call edge_slopes_implicit_h3( n0, h0, u0, ppoly_r_S, h_neglect, answer_date=CS%answer_date ) call PQM_reconstruction( n0, h0, u0, ppoly_r_E, ppoly_r_S, ppoly_r_coefs, h_neglect, & - answers_2018=CS%answers_2018 ) + answer_date=CS%answer_date ) if ( CS%boundary_extrapolation ) then call PQM_boundary_extrapolation_v1( n0, h0, u0, ppoly_r_E, ppoly_r_S, & ppoly_r_coefs, h_neglect ) endif iMethod = INTEGRATION_PQM case ( REMAPPING_PQM_IH6IH5 ) - call edge_values_implicit_h6( n0, h0, u0, ppoly_r_E, h_neglect_edge, answers_2018=CS%answers_2018 ) - call edge_slopes_implicit_h5( n0, h0, u0, ppoly_r_S, h_neglect, answers_2018=CS%answers_2018 ) + call edge_values_implicit_h6( n0, h0, u0, ppoly_r_E, h_neglect_edge, answer_date=CS%answer_date ) + call edge_slopes_implicit_h5( n0, h0, u0, ppoly_r_S, h_neglect, answer_date=CS%answer_date ) call PQM_reconstruction( n0, h0, u0, ppoly_r_E, ppoly_r_S, ppoly_r_coefs, h_neglect, & - answers_2018=CS%answers_2018 ) + answer_date=CS%answer_date ) if ( CS%boundary_extrapolation ) then call PQM_boundary_extrapolation_v1( n0, h0, u0, ppoly_r_E, ppoly_r_S, & ppoly_r_coefs, h_neglect ) @@ -1593,7 +1603,7 @@ end subroutine dzFromH1H2 !> Constructor for remapping control structure subroutine initialize_remapping( CS, remapping_scheme, boundary_extrapolation, & - check_reconstruction, check_remapping, force_bounds_in_subcell, answers_2018) + check_reconstruction, check_remapping, force_bounds_in_subcell, answers_2018, answer_date) ! Arguments type(remapping_CS), intent(inout) :: CS !< Remapping control structure character(len=*), intent(in) :: remapping_scheme !< Remapping scheme to use @@ -1602,11 +1612,12 @@ subroutine initialize_remapping( CS, remapping_scheme, boundary_extrapolation, & logical, optional, intent(in) :: check_remapping !< Indicate to check results of remapping logical, optional, intent(in) :: force_bounds_in_subcell !< Force subcells values to be bounded logical, optional, intent(in) :: answers_2018 !< If true use older, less acccurate expressions. + integer, optional, intent(in) :: answer_date !< The vintage of the expressions to use ! Note that remapping_scheme is mandatory for initialize_remapping() call remapping_set_param(CS, remapping_scheme=remapping_scheme, boundary_extrapolation=boundary_extrapolation, & check_reconstruction=check_reconstruction, check_remapping=check_remapping, & - force_bounds_in_subcell=force_bounds_in_subcell, answers_2018=answers_2018) + force_bounds_in_subcell=force_bounds_in_subcell, answers_2018=answers_2018, answer_date=answer_date) end subroutine initialize_remapping @@ -1681,15 +1692,15 @@ logical function remapping_unit_tests(verbose) data h2 /6*0.5/ ! 6 uniform layers with total depth of 3 type(remapping_CS) :: CS !< Remapping control structure real, allocatable, dimension(:,:) :: ppoly0_E, ppoly0_S, ppoly0_coefs - logical :: answers_2018 ! If true use older, less acccurate expressions. + integer :: answer_date ! The vintage of the expressions to test integer :: i real :: err, h_neglect, h_neglect_edge logical :: thisTest, v v = verbose - answers_2018 = .false. ! .true. + answer_date = 20190101 ! 20181231 h_neglect = hNeglect_dflt - h_neglect_edge = hNeglect_dflt ; if (answers_2018) h_neglect_edge = 1.0e-10 + h_neglect_edge = hNeglect_dflt ; if (answer_date < 20190101) h_neglect_edge = 1.0e-10 write(stdout,*) '==== MOM_remapping: remapping_unit_tests =================' remapping_unit_tests = .false. ! Normally return false @@ -1711,7 +1722,7 @@ logical function remapping_unit_tests(verbose) remapping_unit_tests = remapping_unit_tests .or. thisTest thisTest = .false. - call initialize_remapping(CS, 'PPM_H4', answers_2018=answers_2018) + call initialize_remapping(CS, 'PPM_H4', answer_date=answer_date) if (verbose) write(stdout,*) 'h0 (test data)' if (verbose) call dumpGrid(n0,h0,x0,u0) @@ -1735,8 +1746,8 @@ logical function remapping_unit_tests(verbose) ppoly0_S(:,:) = 0.0 ppoly0_coefs(:,:) = 0.0 - call edge_values_explicit_h4( n0, h0, u0, ppoly0_E, h_neglect=1e-10, answers_2018=answers_2018 ) - call PPM_reconstruction( n0, h0, u0, ppoly0_E, ppoly0_coefs, h_neglect, answers_2018=answers_2018 ) + call edge_values_explicit_h4( n0, h0, u0, ppoly0_E, h_neglect=1e-10, answer_date=answer_date ) + call PPM_reconstruction( n0, h0, u0, ppoly0_E, ppoly0_coefs, h_neglect, answer_date=answer_date ) call PPM_boundary_extrapolation( n0, h0, u0, ppoly0_E, ppoly0_coefs, h_neglect ) u1(:) = 0. call remapByProjection( n0, h0, u0, ppoly0_E, ppoly0_coefs, & @@ -1866,7 +1877,7 @@ logical function remapping_unit_tests(verbose) test_answer(v, 3, ppoly0_coefs(:,2), (/0.,4.,0./), 'Non-uniform line PLM: P1') call edge_values_explicit_h4( 5, (/1.,1.,1.,1.,1./), (/1.,3.,5.,7.,9./), ppoly0_E, & - h_neglect=1e-10, answers_2018=answers_2018 ) + h_neglect=1e-10, answer_date=answer_date ) ! The next two tests currently fail due to roundoff, but pass when given a reasonable tolerance. thisTest = test_answer(v, 5, ppoly0_E(:,1), (/0.,2.,4.,6.,8./), 'Line H4: left edges', tol=8.0e-15) remapping_unit_tests = remapping_unit_tests .or. thisTest @@ -1875,7 +1886,7 @@ logical function remapping_unit_tests(verbose) ppoly0_E(:,1) = (/0.,2.,4.,6.,8./) ppoly0_E(:,2) = (/2.,4.,6.,8.,10./) call PPM_reconstruction(5, (/1.,1.,1.,1.,1./), (/1.,3.,5.,7.,9./), ppoly0_E(1:5,:), & - ppoly0_coefs(1:5,:), h_neglect, answers_2018=answers_2018 ) + ppoly0_coefs(1:5,:), h_neglect, answer_date=answer_date ) remapping_unit_tests = remapping_unit_tests .or. & test_answer(v, 5, ppoly0_coefs(:,1), (/1.,2.,4.,6.,9./), 'Line PPM: P0') remapping_unit_tests = remapping_unit_tests .or. & @@ -1884,7 +1895,7 @@ logical function remapping_unit_tests(verbose) test_answer(v, 5, ppoly0_coefs(:,3), (/0.,0.,0.,0.,0./), 'Line PPM: P2') call edge_values_explicit_h4( 5, (/1.,1.,1.,1.,1./), (/1.,1.,7.,19.,37./), ppoly0_E, & - h_neglect=1e-10, answers_2018=answers_2018 ) + h_neglect=1e-10, answer_date=answer_date ) ! The next two tests are now passing when answers_2018 = .false., but otherwise only work to roundoff. thisTest = test_answer(v, 5, ppoly0_E(:,1), (/3.,0.,3.,12.,27./), 'Parabola H4: left edges', tol=2.7e-14) remapping_unit_tests = remapping_unit_tests .or. thisTest @@ -1893,7 +1904,7 @@ logical function remapping_unit_tests(verbose) ppoly0_E(:,1) = (/0.,0.,3.,12.,27./) ppoly0_E(:,2) = (/0.,3.,12.,27.,48./) call PPM_reconstruction(5, (/1.,1.,1.,1.,1./), (/0.,1.,7.,19.,37./), ppoly0_E(1:5,:), & - ppoly0_coefs(1:5,:), h_neglect, answers_2018=answers_2018 ) + ppoly0_coefs(1:5,:), h_neglect, answer_date=answer_date ) remapping_unit_tests = remapping_unit_tests .or. & test_answer(v, 5, ppoly0_E(:,1), (/0.,0.,3.,12.,37./), 'Parabola PPM: left edges') remapping_unit_tests = remapping_unit_tests .or. & @@ -1908,7 +1919,7 @@ logical function remapping_unit_tests(verbose) ppoly0_E(:,1) = (/0.,0.,6.,10.,15./) ppoly0_E(:,2) = (/0.,6.,12.,17.,15./) call PPM_reconstruction(5, (/1.,1.,1.,1.,1./), (/0.,5.,7.,16.,15./), ppoly0_E(1:5,:), & - ppoly0_coefs(1:5,:), h_neglect, answers_2018=answers_2018 ) + ppoly0_coefs(1:5,:), h_neglect, answer_date=answer_date ) remapping_unit_tests = remapping_unit_tests .or. & test_answer(v, 5, ppoly0_E(:,1), (/0.,3.,6.,16.,15./), 'Limits PPM: left edges') remapping_unit_tests = remapping_unit_tests .or. & diff --git a/src/ALE/regrid_interp.F90 b/src/ALE/regrid_interp.F90 index 21773774f6..dbe364c969 100644 --- a/src/ALE/regrid_interp.F90 +++ b/src/ALE/regrid_interp.F90 @@ -31,12 +31,12 @@ module regrid_interp !! boundary cells logical :: boundary_extrapolation - !> If true use older, less acccurate expressions. - logical :: answers_2018 = .true. + !> The vintage of the expressions to use for remapping + integer :: answer_date = 20181231 !### Change to 99991231? + !### There is no point where the value of answer_date is reset. end type interp_CS_type -public regridding_set_ppolys, interpolate_grid -public build_and_interpolate_grid +public regridding_set_ppolys, build_and_interpolate_grid public set_interp_scheme, set_interp_extrap ! List of interpolation schemes @@ -107,7 +107,7 @@ subroutine regridding_set_ppolys(CS, densities, n0, h0, ppoly0_E, ppoly0_S, & case ( INTERPOLATION_P1M_H2 ) degree = DEGREE_1 call edge_values_explicit_h2( n0, h0, densities, ppoly0_E ) - call P1M_interpolation( n0, h0, densities, ppoly0_E, ppoly0_coefs, h_neglect, answers_2018=CS%answers_2018 ) + call P1M_interpolation( n0, h0, densities, ppoly0_E, ppoly0_coefs, h_neglect, answer_date=CS%answer_date ) if (extrapolate) then call P1M_boundary_extrapolation( n0, h0, densities, ppoly0_E, ppoly0_coefs ) endif @@ -115,11 +115,11 @@ subroutine regridding_set_ppolys(CS, densities, n0, h0, ppoly0_E, ppoly0_S, & case ( INTERPOLATION_P1M_H4 ) degree = DEGREE_1 if ( n0 >= 4 ) then - call edge_values_explicit_h4( n0, h0, densities, ppoly0_E, h_neglect_edge, answers_2018=CS%answers_2018 ) + call edge_values_explicit_h4( n0, h0, densities, ppoly0_E, h_neglect_edge, answer_date=CS%answer_date ) else call edge_values_explicit_h2( n0, h0, densities, ppoly0_E ) endif - call P1M_interpolation( n0, h0, densities, ppoly0_E, ppoly0_coefs, h_neglect, answers_2018=CS%answers_2018 ) + call P1M_interpolation( n0, h0, densities, ppoly0_E, ppoly0_coefs, h_neglect, answer_date=CS%answer_date ) if (extrapolate) then call P1M_boundary_extrapolation( n0, h0, densities, ppoly0_E, ppoly0_coefs ) endif @@ -127,11 +127,11 @@ subroutine regridding_set_ppolys(CS, densities, n0, h0, ppoly0_E, ppoly0_S, & case ( INTERPOLATION_P1M_IH4 ) degree = DEGREE_1 if ( n0 >= 4 ) then - call edge_values_implicit_h4( n0, h0, densities, ppoly0_E, h_neglect_edge, answers_2018=CS%answers_2018 ) + call edge_values_implicit_h4( n0, h0, densities, ppoly0_E, h_neglect_edge, answer_date=CS%answer_date ) else call edge_values_explicit_h2( n0, h0, densities, ppoly0_E ) endif - call P1M_interpolation( n0, h0, densities, ppoly0_E, ppoly0_coefs, h_neglect, answers_2018=CS%answers_2018 ) + call P1M_interpolation( n0, h0, densities, ppoly0_E, ppoly0_coefs, h_neglect, answer_date=CS%answer_date ) if (extrapolate) then call P1M_boundary_extrapolation( n0, h0, densities, ppoly0_E, ppoly0_coefs ) endif @@ -146,8 +146,8 @@ subroutine regridding_set_ppolys(CS, densities, n0, h0, ppoly0_E, ppoly0_S, & case ( INTERPOLATION_PPM_H4 ) if ( n0 >= 4 ) then degree = DEGREE_2 - call edge_values_explicit_h4( n0, h0, densities, ppoly0_E, h_neglect_edge, answers_2018=CS%answers_2018 ) - call PPM_reconstruction( n0, h0, densities, ppoly0_E, ppoly0_coefs, h_neglect, answers_2018=CS%answers_2018 ) + call edge_values_explicit_h4( n0, h0, densities, ppoly0_E, h_neglect_edge, answer_date=CS%answer_date ) + call PPM_reconstruction( n0, h0, densities, ppoly0_E, ppoly0_coefs, h_neglect, answer_date=CS%answer_date ) if (extrapolate) then call PPM_boundary_extrapolation( n0, h0, densities, ppoly0_E, & ppoly0_coefs, h_neglect ) @@ -155,7 +155,7 @@ subroutine regridding_set_ppolys(CS, densities, n0, h0, ppoly0_E, ppoly0_S, & else degree = DEGREE_1 call edge_values_explicit_h2( n0, h0, densities, ppoly0_E ) - call P1M_interpolation( n0, h0, densities, ppoly0_E, ppoly0_coefs, h_neglect, answers_2018=CS%answers_2018 ) + call P1M_interpolation( n0, h0, densities, ppoly0_E, ppoly0_coefs, h_neglect, answer_date=CS%answer_date ) if (extrapolate) then call P1M_boundary_extrapolation( n0, h0, densities, ppoly0_E, ppoly0_coefs ) endif @@ -164,8 +164,8 @@ subroutine regridding_set_ppolys(CS, densities, n0, h0, ppoly0_E, ppoly0_S, & case ( INTERPOLATION_PPM_IH4 ) if ( n0 >= 4 ) then degree = DEGREE_2 - call edge_values_implicit_h4( n0, h0, densities, ppoly0_E, h_neglect_edge, answers_2018=CS%answers_2018 ) - call PPM_reconstruction( n0, h0, densities, ppoly0_E, ppoly0_coefs, h_neglect, answers_2018=CS%answers_2018 ) + call edge_values_implicit_h4( n0, h0, densities, ppoly0_E, h_neglect_edge, answer_date=CS%answer_date ) + call PPM_reconstruction( n0, h0, densities, ppoly0_E, ppoly0_coefs, h_neglect, answer_date=CS%answer_date ) if (extrapolate) then call PPM_boundary_extrapolation( n0, h0, densities, ppoly0_E, & ppoly0_coefs, h_neglect ) @@ -173,7 +173,7 @@ subroutine regridding_set_ppolys(CS, densities, n0, h0, ppoly0_E, ppoly0_S, & else degree = DEGREE_1 call edge_values_explicit_h2( n0, h0, densities, ppoly0_E ) - call P1M_interpolation( n0, h0, densities, ppoly0_E, ppoly0_coefs, h_neglect, answers_2018=CS%answers_2018 ) + call P1M_interpolation( n0, h0, densities, ppoly0_E, ppoly0_coefs, h_neglect, answer_date=CS%answer_date ) if (extrapolate) then call P1M_boundary_extrapolation( n0, h0, densities, ppoly0_E, ppoly0_coefs ) endif @@ -182,10 +182,10 @@ subroutine regridding_set_ppolys(CS, densities, n0, h0, ppoly0_E, ppoly0_S, & case ( INTERPOLATION_P3M_IH4IH3 ) if ( n0 >= 4 ) then degree = DEGREE_3 - call edge_values_implicit_h4( n0, h0, densities, ppoly0_E, h_neglect_edge, answers_2018=CS%answers_2018 ) - call edge_slopes_implicit_h3( n0, h0, densities, ppoly0_S, h_neglect, answers_2018=CS%answers_2018 ) + call edge_values_implicit_h4( n0, h0, densities, ppoly0_E, h_neglect_edge, answer_date=CS%answer_date ) + call edge_slopes_implicit_h3( n0, h0, densities, ppoly0_S, h_neglect, answer_date=CS%answer_date ) call P3M_interpolation( n0, h0, densities, ppoly0_E, ppoly0_S, & - ppoly0_coefs, h_neglect, answers_2018=CS%answers_2018 ) + ppoly0_coefs, h_neglect, answer_date=CS%answer_date ) if (extrapolate) then call P3M_boundary_extrapolation( n0, h0, densities, ppoly0_E, ppoly0_S, & ppoly0_coefs, h_neglect, h_neglect_edge ) @@ -193,7 +193,7 @@ subroutine regridding_set_ppolys(CS, densities, n0, h0, ppoly0_E, ppoly0_S, & else degree = DEGREE_1 call edge_values_explicit_h2( n0, h0, densities, ppoly0_E ) - call P1M_interpolation( n0, h0, densities, ppoly0_E, ppoly0_coefs, h_neglect, answers_2018=CS%answers_2018 ) + call P1M_interpolation( n0, h0, densities, ppoly0_E, ppoly0_coefs, h_neglect, answer_date=CS%answer_date ) if (extrapolate) then call P1M_boundary_extrapolation( n0, h0, densities, ppoly0_E, ppoly0_coefs ) endif @@ -202,10 +202,10 @@ subroutine regridding_set_ppolys(CS, densities, n0, h0, ppoly0_E, ppoly0_S, & case ( INTERPOLATION_P3M_IH6IH5 ) if ( n0 >= 6 ) then degree = DEGREE_3 - call edge_values_implicit_h6( n0, h0, densities, ppoly0_E, h_neglect_edge, answers_2018=CS%answers_2018 ) - call edge_slopes_implicit_h5( n0, h0, densities, ppoly0_S, h_neglect, answers_2018=CS%answers_2018 ) + call edge_values_implicit_h6( n0, h0, densities, ppoly0_E, h_neglect_edge, answer_date=CS%answer_date ) + call edge_slopes_implicit_h5( n0, h0, densities, ppoly0_S, h_neglect, answer_date=CS%answer_date ) call P3M_interpolation( n0, h0, densities, ppoly0_E, ppoly0_S, & - ppoly0_coefs, h_neglect, answers_2018=CS%answers_2018 ) + ppoly0_coefs, h_neglect, answer_date=CS%answer_date ) if (extrapolate) then call P3M_boundary_extrapolation( n0, h0, densities, ppoly0_E, ppoly0_S, & ppoly0_coefs, h_neglect, h_neglect_edge ) @@ -213,7 +213,7 @@ subroutine regridding_set_ppolys(CS, densities, n0, h0, ppoly0_E, ppoly0_S, & else degree = DEGREE_1 call edge_values_explicit_h2( n0, h0, densities, ppoly0_E ) - call P1M_interpolation( n0, h0, densities, ppoly0_E, ppoly0_coefs, h_neglect, answers_2018=CS%answers_2018 ) + call P1M_interpolation( n0, h0, densities, ppoly0_E, ppoly0_coefs, h_neglect, answer_date=CS%answer_date ) if (extrapolate) then call P1M_boundary_extrapolation( n0, h0, densities, ppoly0_E, ppoly0_coefs ) endif @@ -222,10 +222,10 @@ subroutine regridding_set_ppolys(CS, densities, n0, h0, ppoly0_E, ppoly0_S, & case ( INTERPOLATION_PQM_IH4IH3 ) if ( n0 >= 4 ) then degree = DEGREE_4 - call edge_values_implicit_h4( n0, h0, densities, ppoly0_E, h_neglect_edge, answers_2018=CS%answers_2018 ) - call edge_slopes_implicit_h3( n0, h0, densities, ppoly0_S, h_neglect, answers_2018=CS%answers_2018 ) + call edge_values_implicit_h4( n0, h0, densities, ppoly0_E, h_neglect_edge, answer_date=CS%answer_date ) + call edge_slopes_implicit_h3( n0, h0, densities, ppoly0_S, h_neglect, answer_date=CS%answer_date ) call PQM_reconstruction( n0, h0, densities, ppoly0_E, ppoly0_S, & - ppoly0_coefs, h_neglect, answers_2018=CS%answers_2018 ) + ppoly0_coefs, h_neglect, answer_date=CS%answer_date ) if (extrapolate) then call PQM_boundary_extrapolation_v1( n0, h0, densities, ppoly0_E, ppoly0_S, & ppoly0_coefs, h_neglect ) @@ -233,7 +233,7 @@ subroutine regridding_set_ppolys(CS, densities, n0, h0, ppoly0_E, ppoly0_S, & else degree = DEGREE_1 call edge_values_explicit_h2( n0, h0, densities, ppoly0_E ) - call P1M_interpolation( n0, h0, densities, ppoly0_E, ppoly0_coefs, h_neglect, answers_2018=CS%answers_2018 ) + call P1M_interpolation( n0, h0, densities, ppoly0_E, ppoly0_coefs, h_neglect, answer_date=CS%answer_date ) if (extrapolate) then call P1M_boundary_extrapolation( n0, h0, densities, ppoly0_E, ppoly0_coefs ) endif @@ -242,10 +242,10 @@ subroutine regridding_set_ppolys(CS, densities, n0, h0, ppoly0_E, ppoly0_S, & case ( INTERPOLATION_PQM_IH6IH5 ) if ( n0 >= 6 ) then degree = DEGREE_4 - call edge_values_implicit_h6( n0, h0, densities, ppoly0_E, h_neglect_edge, answers_2018=CS%answers_2018 ) - call edge_slopes_implicit_h5( n0, h0, densities, ppoly0_S, h_neglect, answers_2018=CS%answers_2018 ) + call edge_values_implicit_h6( n0, h0, densities, ppoly0_E, h_neglect_edge, answer_date=CS%answer_date ) + call edge_slopes_implicit_h5( n0, h0, densities, ppoly0_S, h_neglect, answer_date=CS%answer_date ) call PQM_reconstruction( n0, h0, densities, ppoly0_E, ppoly0_S, & - ppoly0_coefs, h_neglect, answers_2018=CS%answers_2018 ) + ppoly0_coefs, h_neglect, answer_date=CS%answer_date ) if (extrapolate) then call PQM_boundary_extrapolation_v1( n0, h0, densities, ppoly0_E, ppoly0_S, & ppoly0_coefs, h_neglect ) @@ -253,7 +253,7 @@ subroutine regridding_set_ppolys(CS, densities, n0, h0, ppoly0_E, ppoly0_S, & else degree = DEGREE_1 call edge_values_explicit_h2( n0, h0, densities, ppoly0_E ) - call P1M_interpolation( n0, h0, densities, ppoly0_E, ppoly0_coefs, h_neglect, answers_2018=CS%answers_2018 ) + call P1M_interpolation( n0, h0, densities, ppoly0_E, ppoly0_coefs, h_neglect, answer_date=CS%answer_date ) if (extrapolate) then call P1M_boundary_extrapolation( n0, h0, densities, ppoly0_E, ppoly0_coefs ) endif @@ -268,7 +268,7 @@ end subroutine regridding_set_ppolys !! 'ppoly0' (possibly discontinuous), the coordinates of the new grid 'grid1' !! are determined by finding the corresponding target interface densities. subroutine interpolate_grid( n0, h0, x0, ppoly0_E, ppoly0_coefs, & - target_values, degree, n1, h1, x1, answers_2018 ) + target_values, degree, n1, h1, x1, answer_date ) integer, intent(in) :: n0 !< Number of points on source grid integer, intent(in) :: n1 !< Number of points on target grid real, dimension(n0), intent(in) :: h0 !< Thicknesses of source grid cells [H] @@ -280,7 +280,7 @@ subroutine interpolate_grid( n0, h0, x0, ppoly0_E, ppoly0_coefs, & integer, intent(in) :: degree !< Degree of interpolating polynomials real, dimension(n1), intent(inout) :: h1 !< Thicknesses of target grid cells [H] real, dimension(n1+1), intent(inout) :: x1 !< Target interface positions [H] - logical, optional, intent(in) :: answers_2018 !< If true use older, less acccurate expressions. + integer, optional, intent(in) :: answer_date !< The vintage of the expressions to use ! Local variables integer :: k ! loop index @@ -295,7 +295,7 @@ subroutine interpolate_grid( n0, h0, x0, ppoly0_E, ppoly0_coefs, & do k = 2,n1 t = target_values(k) x1(k) = get_polynomial_coordinate ( n0, h0, x0, ppoly0_E, ppoly0_coefs, t, degree, & - answers_2018=answers_2018 ) + answer_date=answer_date ) h1(k-1) = x1(k) - x1(k-1) enddo h1(n1) = x1(n1+1) - x1(n1) @@ -329,7 +329,7 @@ subroutine build_and_interpolate_grid(CS, densities, n0, h0, x0, target_values, call regridding_set_ppolys(CS, densities, n0, h0, ppoly0_E, ppoly0_S, ppoly0_C, & degree, h_neglect, h_neglect_edge) call interpolate_grid(n0, h0, x0, ppoly0_E, ppoly0_C, target_values, degree, & - n1, h1, x1, answers_2018=CS%answers_2018) + n1, h1, x1, answer_date=CS%answer_date) end subroutine build_and_interpolate_grid !> Given a target value, find corresponding coordinate for given polynomial @@ -349,7 +349,7 @@ end subroutine build_and_interpolate_grid !! It is assumed that the number of cells defining 'grid' and 'ppoly' are the !! same. function get_polynomial_coordinate( N, h, x_g, edge_values, ppoly_coefs, & - target_value, degree, answers_2018 ) result ( x_tgt ) + target_value, degree, answer_date ) result ( x_tgt ) ! Arguments integer, intent(in) :: N !< Number of grid cells real, dimension(N), intent(in) :: h !< Grid cell thicknesses [H] @@ -358,7 +358,7 @@ function get_polynomial_coordinate( N, h, x_g, edge_values, ppoly_coefs, & real, dimension(N,DEGREE_MAX+1), intent(in) :: ppoly_coefs !< Coefficients of interpolating polynomials [A] real, intent(in) :: target_value !< Target value to find position for [A] integer, intent(in) :: degree !< Degree of the interpolating polynomials - logical, optional, intent(in) :: answers_2018 !< If true use older, less acccurate expressions. + integer, intent(in) :: answer_date !< The vintage of the expressions to use real :: x_tgt !< The position of x_g at which target_value is found [H] ! Local variables @@ -373,11 +373,11 @@ function get_polynomial_coordinate( N, h, x_g, edge_values, ppoly_coefs, & integer :: i, k, iter ! loop indices integer :: k_found ! index of target cell character(len=320) :: mesg - logical :: use_2018_answers ! If true use older, less acccurate expressions. + logical :: use_2018_answers ! If true use older, less accurate expressions. eps = NR_OFFSET k_found = -1 - use_2018_answers = .true. ; if (present(answers_2018)) use_2018_answers = answers_2018 + use_2018_answers = (answer_date < 20190101) ! If the target value is outside the range of all values, we ! force the target coordinate to be equal to the lowest or From 6b9cddf2ad704690d2afe2d072d75fabf1e63d57 Mon Sep 17 00:00:00 2001 From: Robert Hallberg Date: Fri, 29 Jul 2022 17:05:16 -0400 Subject: [PATCH 21/40] +Use answer_date to specify remapping Replace answers_2018 arguments with answer_date arguments to specify the version of expressions used in a number of vertical or horizontal regridding calls. In some cases, this also involves replacing one of the elements in an opaque type. It can also involve reading (but not yet logging) the new runtime parameter DEFAULT_ANSWER_DATE, but if it not set the results are unchanged from before. There is also a new optional argument, remap_answer_date, to wave_speed_init and wave_speed_set_param. Some comments were also added to describe real variables in VarMix_init. All answers are bitwise identical. --- src/core/MOM_open_boundary.F90 | 29 +++++--- src/diagnostics/MOM_diagnostics.F90 | 21 +++++- src/diagnostics/MOM_wave_speed.F90 | 35 +++++++--- src/framework/MOM_diag_mediator.F90 | 24 +++++-- src/framework/MOM_diag_remap.F90 | 25 +++---- .../MOM_state_initialization.F90 | 64 +++++++++++++----- .../MOM_tracer_initialization_from_Z.F90 | 34 ++++++++-- src/ocean_data_assim/MOM_oda_incupd.F90 | 3 +- .../lateral/MOM_lateral_mixing_coeffs.F90 | 33 +++++++--- .../vertical/MOM_ALE_sponge.F90 | 66 +++++++++++++------ .../vertical/MOM_tidal_mixing.F90 | 28 ++++++-- src/tracer/MOM_lateral_boundary_diffusion.F90 | 3 +- src/tracer/MOM_neutral_diffusion.F90 | 31 ++++++--- 13 files changed, 293 insertions(+), 103 deletions(-) diff --git a/src/core/MOM_open_boundary.F90 b/src/core/MOM_open_boundary.F90 index 63b9434269..5a011c9101 100644 --- a/src/core/MOM_open_boundary.F90 +++ b/src/core/MOM_open_boundary.F90 @@ -321,9 +321,10 @@ module MOM_open_boundary real :: ramp_value !< If ramp is True, where we are on the ramp from !! zero to one [nondim]. type(time_type) :: ramp_start_time !< Time when model was started. - logical :: answers_2018 !< If true, use the order of arithmetic and expressions for remapping - !! that recover the answers from the end of 2018. Otherwise, use more - !! robust and accurate forms of mathematically equivalent expressions. + integer :: remap_answer_date !< The vintage of the order of arithmetic and expressions to use + !! for remapping. Values below 20190101 recover the remapping + !! answers from 2018, while higher values use more robust + !! forms of the same remapping expressions. end type ocean_OBC_type !> Control structure for open boundaries that read from files. @@ -371,7 +372,11 @@ subroutine open_boundary_config(G, US, param_file, OBC) character(len=1024) :: segment_str ! The contents (rhs) for parameter "segment_param_str" character(len=200) :: config1 ! String for OBC_USER_CONFIG real :: Lscale_in, Lscale_out ! parameters controlling tracer values at the boundaries [L ~> m] - logical :: answers_2018, default_2018_answers + logical :: answers_2018 ! If true, use the order of arithmetic and expressions for remapping + ! that recover the answers from the end of 2018. Otherwise, use more + ! robust and accurate forms of mathematically equivalent expressions. + integer :: default_answer_date ! The default setting for the various ANSWER_DATE flags. + logical :: default_2018_answers ! The default setting for the various 2018_ANSWERS flags. logical :: check_reconstruction, check_remapping, force_bounds_in_subcell character(len=64) :: remappingScheme ! This include declares and sets the variable "version". @@ -618,18 +623,26 @@ subroutine open_boundary_config(G, US, param_file, OBC) "If true, the values on the intermediate grid used for remapping "//& "are forced to be bounded, which might not be the case due to "//& "round off.", default=.false.,do_not_log=.true.) + call get_param(param_file, mdl, "DEFAULT_ANSWER_DATE", default_answer_date, & + "This sets the default value for the various _ANSWER_DATE parameters.", & + default=99991231, do_not_log=.true.) call get_param(param_file, mdl, "DEFAULT_2018_ANSWERS", default_2018_answers, & "This sets the default value for the various _2018_ANSWERS parameters.", & - default=.false.) - call get_param(param_file, mdl, "REMAPPING_2018_ANSWERS", OBC%answers_2018, & + default=(default_answer_date<20190101)) + call get_param(param_file, mdl, "REMAPPING_2018_ANSWERS", answers_2018, & "If true, use the order of arithmetic and expressions that recover the "//& "answers from the end of 2018. Otherwise, use updated and more robust "//& "forms of the same expressions.", default=default_2018_answers) + if (answers_2018) then + OBC%remap_answer_date = 20181231 + else + OBC%remap_answer_date = 20190101 + endif allocate(OBC%remap_CS) call initialize_remapping(OBC%remap_CS, remappingScheme, boundary_extrapolation = .false., & check_reconstruction=check_reconstruction, check_remapping=check_remapping, & - force_bounds_in_subcell=force_bounds_in_subcell, answers_2018=OBC%answers_2018) + force_bounds_in_subcell=force_bounds_in_subcell, answer_date=OBC%remap_answer_date) endif ! OBC%number_of_segments > 0 @@ -3718,7 +3731,7 @@ subroutine update_OBC_segment_data(G, GV, US, OBC, tv, h, Time) if (OBC%add_tide_constituents) time_delta = US%s_to_T * time_type_to_real(Time - OBC%time_ref) - if (.not. OBC%answers_2018) then + if (OBC%remap_answer_date >= 20190101) then h_neglect = GV%H_subroundoff ; h_neglect_edge = GV%H_subroundoff elseif (GV%Boussinesq) then h_neglect = GV%m_to_H * 1.0e-30 ; h_neglect_edge = GV%m_to_H * 1.0e-10 diff --git a/src/diagnostics/MOM_diagnostics.F90 b/src/diagnostics/MOM_diagnostics.F90 index e1c4f19083..521d55115c 100644 --- a/src/diagnostics/MOM_diagnostics.F90 +++ b/src/diagnostics/MOM_diagnostics.F90 @@ -1556,7 +1556,13 @@ subroutine MOM_diagnostics_init(MIS, ADp, CDp, Time, G, GV, US, param_file, diag character(len=40) :: mdl = "MOM_diagnostics" ! This module's name. character(len=48) :: thickness_units, flux_units logical :: use_temperature, adiabatic - logical :: default_2018_answers, remap_answers_2018 + integer :: default_answer_date ! The default setting for the various ANSWER_DATE flags. + logical :: default_2018_answers ! The default setting for the various 2018_ANSWERS flags. + integer :: remap_answer_date ! The vintage of the order of arithmetic and expressions to use + ! for remapping. Values below 20190101 recover the remapping + ! answers from 2018, while higher values use more robust + ! forms of the same remapping expressions. + logical :: remap_answers_2018 CS%initialized = .true. @@ -1584,13 +1590,22 @@ subroutine MOM_diagnostics_init(MIS, ADp, CDp, Time, G, GV, US, param_file, diag call get_param(param_file, mdl, "INTERNAL_WAVE_SPEED_BETTER_EST", better_speed_est, & "If true, use a more robust estimate of the first mode wave speed as the "//& "starting point for iterations.", default=.true.) + call get_param(param_file, mdl, "DEFAULT_ANSWER_DATE", default_answer_date, & + "This sets the default value for the various _ANSWER_DATE parameters.", & + default=99991231, do_not_log=.true.) call get_param(param_file, mdl, "DEFAULT_2018_ANSWERS", default_2018_answers, & "This sets the default value for the various _2018_ANSWERS parameters.", & - default=.false.) + default=(default_answer_date<20190101)) call get_param(param_file, mdl, "REMAPPING_2018_ANSWERS", remap_answers_2018, & "If true, use the order of arithmetic and expressions that recover the "//& "answers from the end of 2018. Otherwise, use updated and more robust "//& "forms of the same expressions.", default=default_2018_answers) + if (remap_answers_2018) then + remap_answer_date = 20181231 + else + remap_answer_date = 20190101 + endif + call get_param(param_file, mdl, "SPLIT", split, default=.true., do_not_log=.true.) thickness_units = get_thickness_units(GV) @@ -1816,7 +1831,7 @@ subroutine MOM_diagnostics_init(MIS, ADp, CDp, Time, G, GV, US, param_file, diag if ((CS%id_cg1>0) .or. (CS%id_Rd1>0) .or. (CS%id_cfl_cg1>0) .or. & (CS%id_cfl_cg1_x>0) .or. (CS%id_cfl_cg1_y>0) .or. & (CS%id_cg_ebt>0) .or. (CS%id_Rd_ebt>0) .or. (CS%id_p_ebt>0)) then - call wave_speed_init(CS%wave_speed, remap_answers_2018=remap_answers_2018, & + call wave_speed_init(CS%wave_speed, remap_answer_date=remap_answer_date, & better_speed_est=better_speed_est, min_speed=wave_speed_min, & wave_speed_tol=wave_speed_tol) endif diff --git a/src/diagnostics/MOM_wave_speed.F90 b/src/diagnostics/MOM_wave_speed.F90 index 36a6d51e83..85f27d4249 100644 --- a/src/diagnostics/MOM_wave_speed.F90 +++ b/src/diagnostics/MOM_wave_speed.F90 @@ -46,9 +46,11 @@ module MOM_wave_speed !! speeds [nondim] type(remapping_CS) :: remapping_CS !< Used for vertical remapping when calculating equivalent barotropic !! mode structure. - logical :: remap_answers_2018 = .true. !< If true, use the order of arithmetic and expressions that - !! recover the remapping answers from 2018. If false, use more - !! robust forms of the same remapping expressions. + integer :: remap_answer_date = 20181231 !< The vintage of the order of arithmetic and expressions to use + !! for remapping. Values below 20190101 recover the remapping + !! answers from 2018, while higher values use more robust + !! forms of the same remapping expressions. + !### Change to 99991231? type(diag_ctrl), pointer :: diag !< Diagnostics control structure end type wave_speed_CS @@ -558,7 +560,7 @@ subroutine wave_speed(h, tv, G, GV, US, cg1, CS, full_halos, use_ebt_mode, mono_ do k = 1,kc Hc_H(k) = GV%Z_to_H * Hc(k) enddo - if (CS%remap_answers_2018) then + if (CS%remap_answer_date < 20190101) then call remapping_core_h(CS%remapping_CS, kc, Hc_H(:), mode_struct, & nz, h(i,j,:), modal_structure(i,j,:), & 1.0e-30*GV%m_to_H, 1.0e-10*GV%m_to_H) @@ -1168,7 +1170,7 @@ end subroutine tridiag_det !> Initialize control structure for MOM_wave_speed subroutine wave_speed_init(CS, use_ebt_mode, mono_N2_column_fraction, mono_N2_depth, remap_answers_2018, & - better_speed_est, min_speed, wave_speed_tol) + remap_answer_date, better_speed_est, min_speed, wave_speed_tol) type(wave_speed_CS), intent(inout) :: CS !< Wave speed control struct logical, optional, intent(in) :: use_ebt_mode !< If true, use the equivalent !! barotropic mode instead of the first baroclinic mode. @@ -1181,6 +1183,10 @@ subroutine wave_speed_init(CS, use_ebt_mode, mono_N2_column_fraction, mono_N2_de logical, optional, intent(in) :: remap_answers_2018 !< If true, use the order of arithmetic and expressions !! that recover the remapping answers from 2018. Otherwise !! use more robust but mathematically equivalent expressions. + integer, optional, intent(in) :: remap_answer_date !< The vintage of the order of arithmetic and expressions + !! to use for remapping. Values below 20190101 recover the remapping + !! answers from 2018, while higher values use more robust + !! forms of the same remapping expressions. logical, optional, intent(in) :: better_speed_est !< If true, use a more robust estimate of the first !! mode speed as the starting point for iterations. real, optional, intent(in) :: min_speed !< If present, set a floor in the first mode speed @@ -1199,15 +1205,17 @@ subroutine wave_speed_init(CS, use_ebt_mode, mono_N2_column_fraction, mono_N2_de call wave_speed_set_param(CS, use_ebt_mode=use_ebt_mode, mono_N2_column_fraction=mono_N2_column_fraction, & better_speed_est=better_speed_est, min_speed=min_speed, wave_speed_tol=wave_speed_tol) + !### Uncomment this? remap_answers_2018=remap_answers_2018, remap_answer_date=remap_answer_date) + !### The remap_answers_2018 argument is irrelevant, because remapping is hard-coded to use PLM. call initialize_remapping(CS%remapping_CS, 'PLM', boundary_extrapolation=.false., & - answers_2018=CS%remap_answers_2018) + answer_date=CS%remap_answer_date) end subroutine wave_speed_init !> Sets internal parameters for MOM_wave_speed subroutine wave_speed_set_param(CS, use_ebt_mode, mono_N2_column_fraction, mono_N2_depth, remap_answers_2018, & - better_speed_est, min_speed, wave_speed_tol) + remap_answer_date, better_speed_est, min_speed, wave_speed_tol) type(wave_speed_CS), intent(inout) :: CS !< Control structure for MOM_wave_speed logical, optional, intent(in) :: use_ebt_mode !< If true, use the equivalent @@ -1221,6 +1229,10 @@ subroutine wave_speed_set_param(CS, use_ebt_mode, mono_N2_column_fraction, mono_ logical, optional, intent(in) :: remap_answers_2018 !< If true, use the order of arithmetic and expressions !! that recover the remapping answers from 2018. Otherwise !! use more robust but mathematically equivalent expressions. + integer, optional, intent(in) :: remap_answer_date !< The vintage of the order of arithmetic and expressions + !! to use for remapping. Values below 20190101 recover the remapping + !! answers from 2018, while higher values use more robust + !! forms of the same remapping expressions. logical, optional, intent(in) :: better_speed_est !< If true, use a more robust estimate of the first !! mode speed as the starting point for iterations. real, optional, intent(in) :: min_speed !< If present, set a floor in the first mode speed @@ -1231,7 +1243,14 @@ subroutine wave_speed_set_param(CS, use_ebt_mode, mono_N2_column_fraction, mono_ if (present(use_ebt_mode)) CS%use_ebt_mode = use_ebt_mode if (present(mono_N2_column_fraction)) CS%mono_N2_column_fraction = mono_N2_column_fraction if (present(mono_N2_depth)) CS%mono_N2_depth = mono_N2_depth - if (present(remap_answers_2018)) CS%remap_answers_2018 = remap_answers_2018 + if (present(remap_answers_2018)) then + if (remap_answers_2018) then + CS%remap_answer_date = 20181231 + else + CS%remap_answer_date = 20190101 + endif + endif + if (present(remap_answer_date)) CS%remap_answer_date = remap_answer_date if (present(better_speed_est)) CS%better_cg1_est = better_speed_est if (present(min_speed)) CS%min_speed2 = min_speed**2 if (present(wave_speed_tol)) CS%wave_speed_tol = wave_speed_tol diff --git a/src/framework/MOM_diag_mediator.F90 b/src/framework/MOM_diag_mediator.F90 index 677c268ab3..65725ca59c 100644 --- a/src/framework/MOM_diag_mediator.F90 +++ b/src/framework/MOM_diag_mediator.F90 @@ -3144,7 +3144,15 @@ subroutine diag_mediator_init(G, GV, US, nz, param_file, diag_cs, doc_file_dir) ! Local variables integer :: ios, i, new_unit logical :: opened, new_file - logical :: answers_2018, default_2018_answers + logical :: remap_answers_2018 ! If true, use the order of arithmetic and expressions that + ! recover the remapping answers from 2018. If false, use more + ! robust forms of the same remapping expressions. + integer :: remap_answer_date ! The vintage of the order of arithmetic and expressions to use + ! for remapping. Values below 20190101 recover the remapping + ! answers from 2018, while higher values use more robust + ! forms of the same remapping expressions. + integer :: default_answer_date ! The default setting for the various ANSWER_DATE flags. + logical :: default_2018_answers ! The default setting for the various 2018_ANSWERS flags. character(len=8) :: this_pe character(len=240) :: doc_file, doc_file_dflt, doc_path character(len=240), allocatable :: diag_coords(:) @@ -3171,13 +3179,21 @@ subroutine diag_mediator_init(G, GV, US, nz, param_file, diag_cs, doc_file_dir) 'The number of diagnostic vertical coordinates to use. '//& 'For each coordinate, an entry in DIAG_COORDS must be provided.', & default=1) + call get_param(param_file, mdl, "DEFAULT_ANSWER_DATE", default_answer_date, & + "This sets the default value for the various _ANSWER_DATE parameters.", & + default=99991231, do_not_log=.true.) call get_param(param_file, mdl, "DEFAULT_2018_ANSWERS", default_2018_answers, & "This sets the default value for the various _2018_ANSWERS parameters.", & - default=.false.) - call get_param(param_file, mdl, "REMAPPING_2018_ANSWERS", answers_2018, & + default=(default_answer_date<20190101)) + call get_param(param_file, mdl, "REMAPPING_2018_ANSWERS", remap_answers_2018, & "If true, use the order of arithmetic and expressions that recover the "//& "answers from the end of 2018. Otherwise, use updated and more robust "//& "forms of the same expressions.", default=default_2018_answers) + if (remap_answers_2018) then + remap_answer_date = 20181231 + else + remap_answer_date = 20190101 + endif call get_param(param_file, mdl, 'USE_GRID_SPACE_DIAGNOSTIC_AXES', diag_cs%grid_space_axes, & 'If true, use a grid index coordinate convention for diagnostic axes. ',& default=.false.) @@ -3200,7 +3216,7 @@ subroutine diag_mediator_init(G, GV, US, nz, param_file, diag_cs, doc_file_dir) allocate(diag_cs%diag_remap_cs(diag_cs%num_diag_coords)) ! Initialize each diagnostic vertical coordinate do i=1, diag_cs%num_diag_coords - call diag_remap_init(diag_cs%diag_remap_cs(i), diag_coords(i), answers_2018=answers_2018) + call diag_remap_init(diag_cs%diag_remap_cs(i), diag_coords(i), answer_date=remap_answer_date) enddo deallocate(diag_coords) endif diff --git a/src/framework/MOM_diag_remap.F90 b/src/framework/MOM_diag_remap.F90 index 2f179a3825..1bdf13b41f 100644 --- a/src/framework/MOM_diag_remap.F90 +++ b/src/framework/MOM_diag_remap.F90 @@ -115,21 +115,24 @@ module MOM_diag_remap !! variables [H ~> m or kg m-2] integer :: interface_axes_id = 0 !< Vertical axes id for remapping at interfaces integer :: layer_axes_id = 0 !< Vertical axes id for remapping on layers - logical :: answers_2018 !< If true, use the order of arithmetic and expressions for remapping - !! that recover the answers from the end of 2018. Otherwise, use - !! updated more robust forms of the same expressions. + integer :: answer_date !< The vintage of the order of arithmetic and expressions + !! to use for remapping. Values below 20190101 recover + !! the answers from 2018, while higher values use more + !! robust forms of the same remapping expressions. + end type diag_remap_ctrl contains !> Initialize a diagnostic remapping type with the given vertical coordinate. -subroutine diag_remap_init(remap_cs, coord_tuple, answers_2018) +subroutine diag_remap_init(remap_cs, coord_tuple, answer_date) type(diag_remap_ctrl), intent(inout) :: remap_cs !< Diag remapping control structure character(len=*), intent(in) :: coord_tuple !< A string in form of !! MODULE_SUFFIX PARAMETER_SUFFIX COORDINATE_NAME - logical, intent(in) :: answers_2018 !< If true, use the order of arithmetic and expressions - !! for remapping that recover the answers from the end of 2018. - !! Otherwise, use more robust forms of the same expressions. + integer, intent(in) :: answer_date !< The vintage of the order of arithmetic and expressions + !! to use for remapping. Values below 20190101 recover + !! the answers from 2018, while higher values use more + !! robust forms of the same remapping expressions. remap_cs%diag_module_suffix = trim(extractWord(coord_tuple, 1)) remap_cs%diag_coord_name = trim(extractWord(coord_tuple, 2)) @@ -138,7 +141,7 @@ subroutine diag_remap_init(remap_cs, coord_tuple, answers_2018) remap_cs%configured = .false. remap_cs%initialized = .false. remap_cs%used = .false. - remap_cs%answers_2018 = answers_2018 + remap_cs%answer_date = answer_date remap_cs%nz = 0 end subroutine diag_remap_init @@ -289,7 +292,7 @@ subroutine diag_remap_update(remap_cs, G, GV, US, h, T, S, eqn_of_state, h_targe return endif - if (.not.remap_cs%answers_2018) then + if (remap_cs%answer_date >= 20190101) then h_neglect = GV%H_subroundoff ; h_neglect_edge = GV%H_subroundoff elseif (GV%Boussinesq) then h_neglect = GV%m_to_H*1.0e-30 ; h_neglect_edge = GV%m_to_H*1.0e-10 @@ -301,7 +304,7 @@ subroutine diag_remap_update(remap_cs, G, GV, US, h, T, S, eqn_of_state, h_targe if (.not. remap_cs%initialized) then ! Initialize remapping and regridding on the first call call initialize_remapping(remap_cs%remap_cs, 'PPM_IH4', boundary_extrapolation=.false., & - answers_2018=remap_cs%answers_2018) + answer_date=remap_cs%answer_date) remap_cs%initialized = .true. endif @@ -367,7 +370,7 @@ subroutine diag_remap_do_remap(remap_cs, G, GV, h, staggered_in_x, staggered_in_ call assert(size(field, 3) == size(h, 3), & 'diag_remap_do_remap: Remap field and thickness z-axes do not match.') - if (.not.remap_cs%answers_2018) then + if (remap_cs%answer_date >= 20190101) then h_neglect = GV%H_subroundoff ; h_neglect_edge = GV%H_subroundoff elseif (GV%Boussinesq) then h_neglect = GV%m_to_H*1.0e-30 ; h_neglect_edge = GV%m_to_H*1.0e-10 diff --git a/src/initialization/MOM_state_initialization.F90 b/src/initialization/MOM_state_initialization.F90 index d21c13a3e5..f6d39497a8 100644 --- a/src/initialization/MOM_state_initialization.F90 +++ b/src/initialization/MOM_state_initialization.F90 @@ -1167,7 +1167,15 @@ subroutine trim_for_ice(PF, G, GV, US, ALE_CSp, tv, h, just_read) real :: scale_factor ! A file-dependent scaling factor for the input pressure. real :: min_thickness ! The minimum layer thickness, recast into Z units [Z ~> m]. integer :: i, j, k - logical :: default_2018_answers, remap_answers_2018 + integer :: default_answer_date ! The default setting for the various ANSWER_DATE flags. + logical :: default_2018_answers ! The default setting for the various 2018_ANSWERS flags. + logical :: remap_answers_2018 ! If true, use the order of arithmetic and expressions that + ! recover the remapping answers from 2018. If false, use more + ! robust forms of the same remapping expressions. + integer :: remap_answer_date ! The vintage of the order of arithmetic and expressions to use + ! for remapping. Values below 20190101 recover the remapping + ! answers from 2018, while higher values use more robust + ! forms of the same remapping expressions. logical :: use_remapping ! If true, remap the initial conditions. type(remapping_CS), pointer :: remap_CS => NULL() @@ -1192,14 +1200,18 @@ subroutine trim_for_ice(PF, G, GV, US, ALE_CSp, tv, h, just_read) default=.false., do_not_log=just_read) remap_answers_2018 = .true. if (use_remapping) then + call get_param(PF, mdl, "DEFAULT_ANSWER_DATE", default_answer_date, & + "This sets the default value for the various _ANSWER_DATE parameters.", & + default=99991231, do_not_log=.true.) call get_param(PF, mdl, "DEFAULT_2018_ANSWERS", default_2018_answers, & "This sets the default value for the various _2018_ANSWERS parameters.", & - default=.false.) + default=(default_answer_date<20190101)) call get_param(PF, mdl, "REMAPPING_2018_ANSWERS", remap_answers_2018, & "If true, use the order of arithmetic and expressions that recover the "//& "answers from the end of 2018. Otherwise, use updated and more robust "//& "forms of the same expressions.", default=default_2018_answers) endif + remap_answer_date = 20190101 ; if (remap_answers_2018) remap_answer_date = 20181231 if (just_read) return ! All run-time parameters have been read, so return. @@ -1226,7 +1238,7 @@ subroutine trim_for_ice(PF, G, GV, US, ALE_CSp, tv, h, just_read) call cut_off_column_top(GV%ke, tv, GV, US, GV%g_Earth, G%bathyT(i,j)+G%Z_ref, & min_thickness, tv%T(i,j,:), T_t(i,j,:), T_b(i,j,:), & tv%S(i,j,:), S_t(i,j,:), S_b(i,j,:), p_surf(i,j), h(i,j,:), remap_CS, & - z_tol=1.0e-5*US%m_to_Z, remap_answers_2018=remap_answers_2018) + z_tol=1.0e-5*US%m_to_Z, remap_answer_date=remap_answer_date) enddo ; enddo end subroutine trim_for_ice @@ -1317,7 +1329,7 @@ end subroutine calc_sfc_displacement !> Adjust the layer thicknesses by removing the top of the water column above the !! depth where the hydrostatic pressure matches p_surf subroutine cut_off_column_top(nk, tv, GV, US, G_earth, depth, min_thickness, T, T_t, T_b, & - S, S_t, S_b, p_surf, h, remap_CS, z_tol, remap_answers_2018) + S, S_t, S_b, p_surf, h, remap_CS, z_tol, remap_answer_date) integer, intent(in) :: nk !< Number of layers type(thermo_var_ptrs), intent(in) :: tv !< Thermodynamics structure type(verticalGrid_type), intent(in) :: GV !< The ocean's vertical grid structure. @@ -1337,10 +1349,10 @@ subroutine cut_off_column_top(nk, tv, GV, US, G_earth, depth, min_thickness, T, !! if associated real, optional, intent(in) :: z_tol !< The tolerance with which to find the depth !! matching the specified pressure [Z ~> m]. - logical, optional, intent(in) :: remap_answers_2018 !< If true, use the order of arithmetic - !! and expressions that recover the answers for remapping - !! from the end of 2018. Otherwise, use more robust - !! forms of the same expressions. + integer, optional, intent(in) :: remap_answer_date !< The vintage of the order of arithmetic and + !! expressions to use for remapping. Values below 20190101 + !! recover the remapping answers from 2018, while higher + !! values use more robust forms of the same remapping expressions. ! Local variables real, dimension(nk+1) :: e ! Top and bottom edge values for reconstructions [Z ~> m] @@ -1350,7 +1362,7 @@ subroutine cut_off_column_top(nk, tv, GV, US, G_earth, depth, min_thickness, T, logical :: answers_2018 integer :: k - answers_2018 = .true. ; if (present(remap_answers_2018)) answers_2018 = remap_answers_2018 + answers_2018 = .true. ; if (present(remap_answer_date)) answers_2018 = (remap_answer_date < 20190101) ! Calculate original interface positions e(nk+1) = -depth @@ -2458,7 +2470,20 @@ subroutine MOM_temp_salt_initialize_from_Z(h, tv, depth_tot, G, GV, US, PF, just type(remapping_CS) :: remapCS ! Remapping parameters and work arrays logical :: homogenize, useALEremapping, remap_full_column, remap_general, remap_old_alg - logical :: answers_2018, default_2018_answers, hor_regrid_answers_2018 + integer :: default_answer_date ! The default setting for the various ANSWER_DATE flags. + logical :: default_2018_answers ! The default setting for the various 2018_ANSWERS flags. + logical :: remap_answers_2018 ! If true, use the order of arithmetic and expressions that + ! recover the remapping answers from 2018. If false, use more + ! robust forms of the same remapping expressions. + integer :: remap_answer_date ! The vintage of the order of arithmetic and expressions to use + ! for remapping. Values below 20190101 recover the remapping + ! answers from 2018, while higher values use more robust + ! forms of the same remapping expressions. + logical :: hor_regrid_answers_2018 + integer :: hor_regrid_answer_date ! The vintage of the order of arithmetic and expressions to use + ! for horizontal regridding. Values below 20190101 recover the + ! answers from 2018, while higher values use expressions that have + ! been rearranged for rotational invariance. logical :: pre_gridded logical :: separate_mixed_layer ! If true, handle the mixed layers differently. logical :: density_extrap_bug ! If true use an expression with a vertical indexing bug for @@ -2535,24 +2560,29 @@ subroutine MOM_temp_salt_initialize_from_Z(h, tv, depth_tot, G, GV, US, PF, just "If false, uses the preferred remapping algorithm for initialization. "//& "If true, use an older, less robust algorithm for remapping.", & default=.false., do_not_log=just_read) + call get_param(PF, mdl, "DEFAULT_ANSWER_DATE", default_answer_date, & + "This sets the default value for the various _ANSWER_DATE parameters.", & + default=99991231, do_not_log=.true.) call get_param(PF, mdl, "DEFAULT_2018_ANSWERS", default_2018_answers, & "This sets the default value for the various _2018_ANSWERS parameters.", & - default=.false.) + default=(default_answer_date<20190101)) call get_param(PF, mdl, "TEMP_SALT_INIT_VERTICAL_REMAP_ONLY", pre_gridded, & "If true, initial conditions are on the model horizontal grid. " //& "Extrapolation over missing ocean values is done using an ICE-9 "//& "procedure with vertical ALE remapping .", & default=.false.) if (useALEremapping) then - call get_param(PF, mdl, "REMAPPING_2018_ANSWERS", answers_2018, & + call get_param(PF, mdl, "REMAPPING_2018_ANSWERS", remap_answers_2018, & "If true, use the order of arithmetic and expressions that recover the "//& "answers from the end of 2018. Otherwise, use updated and more robust "//& "forms of the same expressions.", default=default_2018_answers) + remap_answer_date = 20190101 ; if (remap_answers_2018) remap_answer_date = 20181231 endif call get_param(PF, mdl, "HOR_REGRID_2018_ANSWERS", hor_regrid_answers_2018, & "If true, use the order of arithmetic for horizonal regridding that recovers "//& "the answers from the end of 2018. Otherwise, use rotationally symmetric "//& "forms of the same expressions.", default=default_2018_answers) + hor_regrid_answer_date = 20190101 ; if (hor_regrid_answers_2018) hor_regrid_answer_date = 20181231 if (.not.useALEremapping) then call get_param(PF, mdl, "ADJUST_THICKNESS", correct_thickness, & "If true, all mass below the bottom removed if the "//& @@ -2618,12 +2648,12 @@ subroutine MOM_temp_salt_initialize_from_Z(h, tv, depth_tot, G, GV, US, PF, just call horiz_interp_and_extrap_tracer(tfilename, potemp_var, US%degC_to_C, 1, & G, temp_z, mask_z, z_in, z_edges_in, missing_value_temp, reentrant_x, & - tripolar_n, homogenize, m_to_Z=US%m_to_Z, answers_2018=hor_regrid_answers_2018, & + tripolar_n, homogenize, m_to_Z=US%m_to_Z, answer_date=hor_regrid_answer_date, & ongrid=pre_gridded, tr_iter_tol=1.0e-3*US%degC_to_C) call horiz_interp_and_extrap_tracer(sfilename, salin_var, US%ppt_to_S, 1, & G, salt_z, mask_z, z_in, z_edges_in, missing_value_salt, reentrant_x, & - tripolar_n, homogenize, m_to_Z=US%m_to_Z, answers_2018=hor_regrid_answers_2018, & + tripolar_n, homogenize, m_to_Z=US%m_to_Z, answer_date=hor_regrid_answer_date, & ongrid=pre_gridded, tr_iter_tol=1.0e-3*US%ppt_to_S) kd = size(z_in,1) @@ -2701,7 +2731,7 @@ subroutine MOM_temp_salt_initialize_from_Z(h, tv, depth_tot, G, GV, US, PF, just endif ! Now remap from source grid to target grid, first setting reconstruction parameters - call initialize_remapping( remapCS, remappingScheme, boundary_extrapolation=.false., answers_2018=answers_2018 ) + call initialize_remapping( remapCS, remappingScheme, boundary_extrapolation=.false., answer_date=remap_answer_date ) if (remap_general) then call set_regrid_params( regridCS, min_thickness=0. ) tv_loc = tv @@ -2719,9 +2749,9 @@ subroutine MOM_temp_salt_initialize_from_Z(h, tv, depth_tot, G, GV, US, PF, just deallocate( dz_interface ) endif call ALE_remap_scalar(remapCS, G, GV, nkd, h1, tmpT1dIn, h, tv%T, all_cells=remap_full_column, & - old_remap=remap_old_alg, answers_2018=answers_2018 ) + old_remap=remap_old_alg, answer_date=remap_answer_date ) call ALE_remap_scalar(remapCS, G, GV, nkd, h1, tmpS1dIn, h, tv%S, all_cells=remap_full_column, & - old_remap=remap_old_alg, answers_2018=answers_2018 ) + old_remap=remap_old_alg, answer_date=remap_answer_date ) deallocate( h1 ) deallocate( tmpT1dIn ) diff --git a/src/initialization/MOM_tracer_initialization_from_Z.F90 b/src/initialization/MOM_tracer_initialization_from_Z.F90 index 204a1e5f35..560a3ceef7 100644 --- a/src/initialization/MOM_tracer_initialization_from_Z.F90 +++ b/src/initialization/MOM_tracer_initialization_from_Z.F90 @@ -78,7 +78,20 @@ subroutine MOM_initialize_tracer_from_Z(h, tr, G, GV, US, PF, src_file, src_var_ real :: missing_value integer :: nPoints integer :: id_clock_routine, id_clock_ALE - logical :: answers_2018, default_2018_answers, hor_regrid_answers_2018 + integer :: default_answer_date ! The default setting for the various ANSWER_DATE flags. + logical :: default_2018_answers ! The default setting for the various 2018_ANSWERS flags. + logical :: remap_answers_2018 ! If true, use the order of arithmetic and expressions that + ! recover the remapping answers from 2018. If false, use more + ! robust forms of the same remapping expressions. + integer :: remap_answer_date ! The vintage of the order of arithmetic and expressions to use + ! for remapping. Values below 20190101 recover the remapping + ! answers from 2018, while higher values use more robust + ! forms of the same remapping expressions. + logical :: hor_regrid_answers_2018 + integer :: hor_regrid_answer_date ! The vintage of the order of arithmetic and expressions to use + ! for horizontal regridding. Values below 20190101 recover the + ! answers from 2018, while higher values use expressions that have + ! been rearranged for rotational invariance. logical :: reentrant_x, tripolar_n id_clock_routine = cpu_clock_id('(Initialize tracer from Z)', grain=CLOCK_ROUTINE) @@ -100,19 +113,28 @@ subroutine MOM_initialize_tracer_from_Z(h, tr, G, GV, US, PF, src_file, src_var_ call get_param(PF, mdl, "Z_INIT_REMAPPING_SCHEME", remapScheme, & "The remapping scheme to use if using Z_INIT_ALE_REMAPPING is True.", & default="PLM") + call get_param(PF, mdl, "DEFAULT_ANSWER_DATE", default_answer_date, & + "This sets the default value for the various _ANSWER_DATE parameters.", & + default=99991231, do_not_log=.true.) call get_param(PF, mdl, "DEFAULT_2018_ANSWERS", default_2018_answers, & "This sets the default value for the various _2018_ANSWERS parameters.", & - default=.false.) + default=(default_answer_date<20190101)) if (useALE) then - call get_param(PF, mdl, "REMAPPING_2018_ANSWERS", answers_2018, & + call get_param(PF, mdl, "REMAPPING_2018_ANSWERS", remap_answers_2018, & "If true, use the order of arithmetic and expressions that recover the "//& "answers from the end of 2018. Otherwise, use updated and more robust "//& "forms of the same expressions.", default=default_2018_answers) + if (remap_answers_2018) then + remap_answer_date = 20181231 + else + remap_answer_date = 20190101 + endif endif call get_param(PF, mdl, "HOR_REGRID_2018_ANSWERS", hor_regrid_answers_2018, & "If true, use the order of arithmetic for horizonal regridding that recovers "//& "the answers from the end of 2018. Otherwise, use rotationally symmetric "//& "forms of the same expressions.", default=default_2018_answers) + hor_regrid_answer_date = 20190101 ; if (hor_regrid_answers_2018) hor_regrid_answer_date = 20181231 ! These are model grid properties, but being applied to the data grid for now. ! need to revisit this (mjh) @@ -129,7 +151,7 @@ subroutine MOM_initialize_tracer_from_Z(h, tr, G, GV, US, PF, src_file, src_var_ call horiz_interp_and_extrap_tracer(src_file, src_var_nam, convert, recnum, & G, tr_z, mask_z, z_in, z_edges_in, missing_value, reentrant_x, tripolar_n, & - homog, m_to_Z=US%m_to_Z, answers_2018=hor_regrid_answers_2018) + homog, m_to_Z=US%m_to_Z, answer_date=hor_regrid_answer_date) kd = size(z_edges_in,1)-1 call pass_var(tr_z,G%Domain) @@ -143,7 +165,7 @@ subroutine MOM_initialize_tracer_from_Z(h, tr, G, GV, US, PF, src_file, src_var_ allocate( h1(kd) ) allocate( hSrc(isd:ied,jsd:jed,kd) ) ! Set parameters for reconstructions - call initialize_remapping( remapCS, remapScheme, boundary_extrapolation=.false., answers_2018=answers_2018 ) + call initialize_remapping( remapCS, remapScheme, boundary_extrapolation=.false., answer_date=remap_answer_date ) ! Next we initialize the regridding package so that it knows about the target grid do j = js, je ; do i = is, ie @@ -168,7 +190,7 @@ subroutine MOM_initialize_tracer_from_Z(h, tr, G, GV, US, PF, src_file, src_var_ hSrc(i,j,:) = GV%Z_to_H * h1(:) enddo ; enddo - call ALE_remap_scalar(remapCS, G, GV, kd, hSrc, tr_z, h, tr, all_cells=.false., answers_2018=answers_2018 ) + call ALE_remap_scalar(remapCS, G, GV, kd, hSrc, tr_z, h, tr, all_cells=.false., answer_date=remap_answer_date ) deallocate( hSrc ) deallocate( h1 ) diff --git a/src/ocean_data_assim/MOM_oda_incupd.F90 b/src/ocean_data_assim/MOM_oda_incupd.F90 index 77f20c4f66..be57bbe748 100644 --- a/src/ocean_data_assim/MOM_oda_incupd.F90 +++ b/src/ocean_data_assim/MOM_oda_incupd.F90 @@ -230,8 +230,9 @@ subroutine initialize_oda_incupd( G, GV, US, param_file, CS, data_h, nz_data, re !### Doing a halo update here on CS%Ref_h%p would avoid needing halo updates each timestep. ! Call the constructor for remapping control structure + !### Revisit this hard-coded answer_date. call initialize_remapping(CS%remap_cs, remapScheme, boundary_extrapolation=bndExtrapolation, & - answers_2018=.false.) + answer_date=20190101) end subroutine initialize_oda_incupd diff --git a/src/parameterizations/lateral/MOM_lateral_mixing_coeffs.F90 b/src/parameterizations/lateral/MOM_lateral_mixing_coeffs.F90 index 0871737d20..0dd590c2d7 100644 --- a/src/parameterizations/lateral/MOM_lateral_mixing_coeffs.F90 +++ b/src/parameterizations/lateral/MOM_lateral_mixing_coeffs.F90 @@ -1156,17 +1156,26 @@ subroutine VarMix_init(Time, G, GV, US, param_file, diag, CS) type(param_file_type), intent(in) :: param_file !< Parameter file handles type(diag_ctrl), target, intent(inout) :: diag !< Diagnostics control structure type(VarMix_CS), intent(inout) :: CS !< Variable mixing coefficients + ! Local variables real :: KhTr_Slope_Cff, KhTh_Slope_Cff, oneOrTwo real :: N2_filter_depth ! A depth below which stratification is treated as monotonic when ! calculating the first-mode wave speed [Z ~> m] - real :: KhTr_passivity_coeff + real :: KhTr_passivity_coeff ! Coefficient setting the ratio between along-isopycnal tracer + ! mixing and interface height mixing [nondim] real :: absurdly_small_freq ! A miniscule frequency that is used to avoid division by 0 [T-1 ~> s-1]. The ! default value is roughly (pi / (the age of the universe)). logical :: Gill_equatorial_Ld, use_FGNV_streamfn, use_MEKE, in_use - logical :: default_2018_answers, remap_answers_2018 - real :: MLE_front_length - real :: Leith_Lap_const ! The non-dimensional coefficient in the Leith viscosity + integer :: default_answer_date ! The default setting for the various ANSWER_DATE flags. + logical :: default_2018_answers ! The default setting for the various 2018_ANSWERS flags. + logical :: remap_answers_2018 + integer :: remap_answer_date ! The vintage of the order of arithmetic and expressions to use + ! for remapping. Values below 20190101 recover the remapping + ! answers from 2018, while higher values use more robust + ! forms of the same remapping expressions. + real :: MLE_front_length ! The frontal-length scale used to calculate the upscaling of + ! buoyancy gradients in boundary layer parameterizations [L ~> m] + real :: Leith_Lap_const ! The non-dimensional coefficient in the Leith viscosity [nondim] real :: grid_sp_u2, grid_sp_v2 ! Intermediate quantities for Leith metrics [L2 ~> m2] real :: grid_sp_u3, grid_sp_v3 ! Intermediate quantities for Leith metrics [L3 ~> m3] real :: wave_speed_min ! A floor in the first mode speed below which 0 is returned [L T-1 ~> m s-1] @@ -1175,7 +1184,7 @@ subroutine VarMix_init(Time, G, GV, US, param_file, diag, CS) ! scaled by the resolution function. logical :: better_speed_est ! If true, use a more robust estimate of the first ! mode wave speed as the starting point for iterations. -! This include declares and sets the variable "version". + ! This include declares and sets the variable "version". # include "version_variable.h" character(len=40) :: mdl = "MOM_lateral_mixing_coeffs" ! This module's name. integer :: is, ie, js, je, Isq, Ieq, Jsq, Jeq, i, j @@ -1263,7 +1272,7 @@ subroutine VarMix_init(Time, G, GV, US, param_file, diag, CS) default=0., do_not_log=.true.) CS%calculate_Rd_dx = CS%calculate_Rd_dx .or. (KhTr_passivity_coeff>0.) call get_param(param_file, mdl, "MLE_FRONT_LENGTH", MLE_front_length, & - default=0., do_not_log=.true.) + units="m", default=0.0, scale=US%m_to_L, do_not_log=.true.) CS%calculate_Rd_dx = CS%calculate_Rd_dx .or. (MLE_front_length>0.) call get_param(param_file, mdl, "DEBUG", CS%debug, default=.false., do_not_log=.true.) @@ -1532,13 +1541,21 @@ subroutine VarMix_init(Time, G, GV, US, param_file, diag, CS) if (CS%calculate_cg1) then in_use = .true. allocate(CS%cg1(isd:ied,jsd:jed), source=0.0) + call get_param(param_file, mdl, "DEFAULT_ANSWER_DATE", default_answer_date, & + "This sets the default value for the various _ANSWER_DATE parameters.", & + default=99991231, do_not_log=.true.) call get_param(param_file, mdl, "DEFAULT_2018_ANSWERS", default_2018_answers, & "This sets the default value for the various _2018_ANSWERS parameters.", & - default=.false.) + default=(default_answer_date<20190101)) call get_param(param_file, mdl, "REMAPPING_2018_ANSWERS", remap_answers_2018, & "If true, use the order of arithmetic and expressions that recover the "//& "answers from the end of 2018. Otherwise, use updated and more robust "//& "forms of the same expressions.", default=default_2018_answers) + if (remap_answers_2018) then + remap_answer_date = 20181231 + else + remap_answer_date = 20190101 + endif call get_param(param_file, mdl, "INTERNAL_WAVE_SPEED_TOL", wave_speed_tol, & "The fractional tolerance for finding the wave speeds.", & units="nondim", default=0.001) @@ -1550,7 +1567,7 @@ subroutine VarMix_init(Time, G, GV, US, param_file, diag, CS) "If true, use a more robust estimate of the first mode wave speed as the "//& "starting point for iterations.", default=.true.) call wave_speed_init(CS%wave_speed, use_ebt_mode=CS%Resoln_use_ebt, & - mono_N2_depth=N2_filter_depth, remap_answers_2018=remap_answers_2018, & + mono_N2_depth=N2_filter_depth, remap_answer_date=remap_answer_date, & better_speed_est=better_speed_est, min_speed=wave_speed_min, & wave_speed_tol=wave_speed_tol) endif diff --git a/src/parameterizations/vertical/MOM_ALE_sponge.F90 b/src/parameterizations/vertical/MOM_ALE_sponge.F90 index 9409a07fc1..59b46b61cf 100644 --- a/src/parameterizations/vertical/MOM_ALE_sponge.F90 +++ b/src/parameterizations/vertical/MOM_ALE_sponge.F90 @@ -118,12 +118,14 @@ module MOM_ALE_sponge !! timing of diagnostic output. type(remapping_cs) :: remap_cs !< Remapping parameters and work arrays - logical :: remap_answers_2018 !< If true, use the order of arithmetic and expressions that - !! recover the answers for remapping from the end of 2018. - !! Otherwise, use more robust forms of the same expressions. - logical :: hor_regrid_answers_2018 !< If true, use the order of arithmetic for horizontal regridding - !! that recovers the answers from the end of 2018. Otherwise, use - !! rotationally symmetric forms of the same expressions. + integer :: remap_answer_date !< The vintage of the order of arithmetic and expressions to use + !! for remapping. Values below 20190101 recover the remapping + !! answers from 2018, while higher values use more robust + !! forms of the same remapping expressions. + integer :: hor_regrid_answer_date !< The vintage of the order of arithmetic and expressions to use + !! for horizontal regridding. Values below 20190101 recover the + !! answers from 2018, while higher values use expressions that have + !! been rearranged for rotational invariance. logical :: time_varying_sponges !< True if using newer sponge code logical :: spongeDataOngrid !< True if the sponge data are on the model horizontal grid @@ -173,7 +175,14 @@ subroutine initialize_ALE_sponge_fixed(Iresttime, G, GV, param_file, CS, data_h, character(len=64) :: remapScheme logical :: use_sponge logical :: bndExtrapolation = .true. ! If true, extrapolate boundaries - logical :: default_2018_answers + integer :: default_answer_date ! The default setting for the various ANSWER_DATE flags. + logical :: default_2018_answers ! The default setting for the various 2018_ANSWERS flags. + logical :: remap_answers_2018 ! If true, use the order of arithmetic and expressions that + ! recover the remapping answers from 2018. If false, use more + ! robust forms of the same remapping expressions. + logical :: hor_regrid_answers_2018 ! If true, use the order of arithmetic for horizontal regridding + ! that recovers the answers from the end of 2018. Otherwise, use + ! rotationally symmetric forms of the same expressions. integer :: i, j, k, col, total_sponge_cols, total_sponge_cols_u, total_sponge_cols_v if (associated(CS)) then @@ -208,17 +217,22 @@ subroutine initialize_ALE_sponge_fixed(Iresttime, G, GV, param_file, CS, data_h, "than PCM. E.g., if PPM is used for remapping, a "//& "PPM reconstruction will also be used within boundary cells.", & default=.false., do_not_log=.true.) + call get_param(param_file, mdl, "DEFAULT_ANSWER_DATE", default_answer_date, & + "This sets the default value for the various _ANSWER_DATE parameters.", & + default=99991231, do_not_log=.true.) call get_param(param_file, mdl, "DEFAULT_2018_ANSWERS", default_2018_answers, & "This sets the default value for the various _2018_ANSWERS parameters.", & - default=.false.) - call get_param(param_file, mdl, "REMAPPING_2018_ANSWERS", CS%remap_answers_2018, & + default=(default_answer_date<20190101)) + call get_param(param_file, mdl, "REMAPPING_2018_ANSWERS", remap_answers_2018, & "If true, use the order of arithmetic and expressions that recover the "//& "answers from the end of 2018. Otherwise, use updated and more robust "//& "forms of the same expressions.", default=default_2018_answers) - call get_param(param_file, mdl, "HOR_REGRID_2018_ANSWERS", CS%hor_regrid_answers_2018, & + CS%remap_answer_date = 20190101 ; if (remap_answers_2018) CS%remap_answer_date = 20181231 + call get_param(param_file, mdl, "HOR_REGRID_2018_ANSWERS", hor_regrid_answers_2018, & "If true, use the order of arithmetic for horizontal regridding that recovers "//& "the answers from the end of 2018. Otherwise, use rotationally symmetric "//& "forms of the same expressions.", default=default_2018_answers) + CS%hor_regrid_answer_date = 20190101 ; if (hor_regrid_answers_2018) CS%hor_regrid_answer_date = 20181231 call get_param(param_file, mdl, "REENTRANT_X", CS%reentrant_x, & "If true, the domain is zonally reentrant.", default=.true.) call get_param(param_file, mdl, "TRIPOLAR_N", CS%tripolar_N, & @@ -261,7 +275,7 @@ subroutine initialize_ALE_sponge_fixed(Iresttime, G, GV, param_file, CS, data_h, ! Call the constructor for remapping control structure call initialize_remapping(CS%remap_cs, remapScheme, boundary_extrapolation=bndExtrapolation, & - answers_2018=CS%remap_answers_2018) + answer_date=CS%remap_answer_date) call log_param(param_file, mdl, "!Total sponge columns at h points", total_sponge_cols, & "The total number of columns where sponges are applied at h points.", like_default=.true.) @@ -434,7 +448,14 @@ subroutine initialize_ALE_sponge_varying(Iresttime, G, GV, param_file, CS, Irest character(len=64) :: remapScheme logical :: use_sponge logical :: bndExtrapolation = .true. ! If true, extrapolate boundaries - logical :: default_2018_answers + integer :: default_answer_date ! The default setting for the various ANSWER_DATE flags. + logical :: default_2018_answers ! The default setting for the various 2018_ANSWERS flags. + logical :: remap_answers_2018 ! If true, use the order of arithmetic and expressions that + ! recover the remapping answers from 2018. If false, use more + ! robust forms of the same remapping expressions. + logical :: hor_regrid_answers_2018 ! If true, use the order of arithmetic for horizontal regridding + ! that recovers the answers from the end of 2018. Otherwise, use + ! rotationally symmetric forms of the same expressions. integer :: i, j, col, total_sponge_cols, total_sponge_cols_u, total_sponge_cols_v if (associated(CS)) then @@ -463,19 +484,24 @@ subroutine initialize_ALE_sponge_varying(Iresttime, G, GV, param_file, CS, Irest "than PCM. E.g., if PPM is used for remapping, a "//& "PPM reconstruction will also be used within boundary cells.", & default=.false., do_not_log=.true.) + call get_param(param_file, mdl, "DEFAULT_ANSWER_DATE", default_answer_date, & + "This sets the default value for the various _ANSWER_DATE parameters.", & + default=99991231, do_not_log=.true.) call get_param(param_file, mdl, "DEFAULT_2018_ANSWERS", default_2018_answers, & "This sets the default value for the various _2018_ANSWERS parameters.", & - default=.false.) - call get_param(param_file, mdl, "REMAPPING_2018_ANSWERS", CS%remap_answers_2018, & + default=(default_answer_date<20190101)) + call get_param(param_file, mdl, "REMAPPING_2018_ANSWERS", remap_answers_2018, & "If true, use the order of arithmetic and expressions that recover the "//& "answers from the end of 2018. Otherwise, use updated and more robust "//& "forms of the same expressions.", default=default_2018_answers) - call get_param(param_file, mdl, "HOR_REGRID_2018_ANSWERS", CS%hor_regrid_answers_2018, & + CS%remap_answer_date = 20190101 ; if (remap_answers_2018) CS%remap_answer_date = 20181231 + call get_param(param_file, mdl, "HOR_REGRID_2018_ANSWERS", hor_regrid_answers_2018, & "If true, use the order of arithmetic for horizontal regridding that recovers "//& "the answers from the end of 2018 and retain a bug in the 3-dimensional mask "//& "returned in certain cases. Otherwise, use rotationally symmetric "//& "forms of the same expressions and initialize the mask properly.", & default=default_2018_answers) + CS%hor_regrid_answer_date = 20190101 ; if (hor_regrid_answers_2018) CS%hor_regrid_answer_date = 20181231 call get_param(param_file, mdl, "SPONGE_DATA_ONGRID", CS%spongeDataOngrid, & "When defined, the incoming sponge data are "//& "assumed to be on the model grid " , & @@ -514,7 +540,7 @@ subroutine initialize_ALE_sponge_varying(Iresttime, G, GV, param_file, CS, Irest ! Call the constructor for remapping control structure call initialize_remapping(CS%remap_cs, remapScheme, boundary_extrapolation=bndExtrapolation, & - answers_2018=CS%remap_answers_2018) + answer_date=CS%remap_answer_date) call log_param(param_file, mdl, "!Total sponge columns at h points", total_sponge_cols, & "The total number of columns where sponges are applied at h points.", like_default=.true.) if (CS%sponge_uv) then @@ -868,7 +894,7 @@ subroutine apply_ALE_sponge(h, dt, G, GV, US, CS, Time) Idt = 1.0/dt - if (.not.CS%remap_answers_2018) then + if (CS%remap_answer_date >= 20190101) then h_neglect = GV%H_subroundoff ; h_neglect_edge = GV%H_subroundoff elseif (GV%Boussinesq) then h_neglect = GV%m_to_H*1.0e-30 ; h_neglect_edge = GV%m_to_H*1.0e-10 @@ -882,7 +908,7 @@ subroutine apply_ALE_sponge(h, dt, G, GV, US, CS, Time) call horiz_interp_and_extrap_tracer(CS%Ref_val(m)%id, Time, CS%Ref_val(m)%scale, G, sp_val, & mask_z, z_in, z_edges_in, missing_value, CS%reentrant_x, CS%tripolar_N, .false., & spongeOnGrid=CS%SpongeDataOngrid, m_to_Z=US%m_to_Z, & - answers_2018=CS%hor_regrid_answers_2018) + answer_date=CS%hor_regrid_answer_date) allocate( hsrc(nz_data) ) allocate( tmpT1d(nz_data) ) do c=1,CS%num_col @@ -966,7 +992,7 @@ subroutine apply_ALE_sponge(h, dt, G, GV, US, CS, Time) call horiz_interp_and_extrap_tracer(CS%Ref_val_u%id, Time, CS%Ref_val_u%scale, G, sp_val, & mask_z, z_in, z_edges_in, missing_value, CS%reentrant_x, CS%tripolar_N, .false., & spongeOnGrid=CS%SpongeDataOngrid, m_to_Z=US%m_to_Z,& - answers_2018=CS%hor_regrid_answers_2018) + answer_date=CS%hor_regrid_answer_date) ! Initialize mask_z halos to zero before pass_var, in case of no update mask_z(G%isc-1, G%jsc:G%jec, :) = 0. @@ -1015,7 +1041,7 @@ subroutine apply_ALE_sponge(h, dt, G, GV, US, CS, Time) call horiz_interp_and_extrap_tracer(CS%Ref_val_v%id, Time, CS%Ref_val_v%scale, G, sp_val, & mask_z, z_in, z_edges_in, missing_value, CS%reentrant_x, CS%tripolar_N, .false., & spongeOnGrid=CS%SpongeDataOngrid, m_to_Z=US%m_to_Z,& - answers_2018=CS%hor_regrid_answers_2018) + answer_date=CS%hor_regrid_answer_date) ! Initialize mask_z halos to zero before pass_var, in case of no update mask_z(G%isc:G%iec, G%jsc-1, :) = 0. mask_z(G%isc:G%iec, G%jec+1, :) = 0. diff --git a/src/parameterizations/vertical/MOM_tidal_mixing.F90 b/src/parameterizations/vertical/MOM_tidal_mixing.F90 index 85fc2abb7b..bd819a7a87 100644 --- a/src/parameterizations/vertical/MOM_tidal_mixing.F90 +++ b/src/parameterizations/vertical/MOM_tidal_mixing.F90 @@ -139,9 +139,11 @@ module MOM_tidal_mixing real :: tidal_diss_lim_tc !< CVMix-specific dissipation limit depth for !! tidal-energy-constituent data [Z ~> m]. type(remapping_CS) :: remap_CS !< The control structure for remapping - logical :: remap_answers_2018 = .true. !< If true, use the order of arithmetic and expressions that - !! recover the remapping answers from 2018. If false, use more - !! robust forms of the same remapping expressions. + integer :: remap_answer_date = 20181231 !< The vintage of the order of arithmetic and expressions to use + !! for remapping. Values below 20190101 recover the remapping + !! answers from 2018, while higher values use more robust + !! forms of the same remapping expressions. + !### Change to 99991231? type(int_tide_CS), pointer :: int_tide_CSp=> NULL() !< Control structure for a child module @@ -222,7 +224,11 @@ logical function tidal_mixing_init(Time, G, GV, US, param_file, int_tide_CSp, di logical :: use_CVMix_tidal logical :: int_tide_dissipation logical :: read_tideamp - logical :: default_2018_answers + integer :: default_answer_date ! The default setting for the various ANSWER_DATE flags. + logical :: default_2018_answers ! The default setting for the various 2018_ANSWERS flags. + logical :: remap_answers_2018 ! If true, use the order of arithmetic and expressions that + ! recover the remapping answers from 2018. If false, use more + ! robust forms of the same remapping expressions. character(len=20) :: tmpstr, int_tide_profile_str character(len=20) :: CVMix_tidal_scheme_str, tidal_energy_type character(len=200) :: filename, h2_file, Niku_TKE_input_file @@ -271,17 +277,25 @@ logical function tidal_mixing_init(Time, G, GV, US, param_file, int_tide_CSp, di call get_param(param_file, mdl, "INPUTDIR", CS%inputdir, default=".",do_not_log=.true.) CS%inputdir = slasher(CS%inputdir) + call get_param(param_file, mdl, "DEFAULT_ANSWER_DATE", default_answer_date, & + "This sets the default value for the various _ANSWER_DATE parameters.", & + default=99991231, do_not_log=.true.) call get_param(param_file, mdl, "DEFAULT_2018_ANSWERS", default_2018_answers, & "This sets the default value for the various _2018_ANSWERS parameters.", & - default=.false.) + default=(default_answer_date<20190101)) call get_param(param_file, mdl, "TIDAL_MIXING_2018_ANSWERS", CS%answers_2018, & "If true, use the order of arithmetic and expressions that recover the "//& "answers from the end of 2018. Otherwise, use updated and more robust "//& "forms of the same expressions.", default=default_2018_answers) - call get_param(param_file, mdl, "REMAPPING_2018_ANSWERS", CS%remap_answers_2018, & + call get_param(param_file, mdl, "REMAPPING_2018_ANSWERS", remap_answers_2018, & "If true, use the order of arithmetic and expressions that recover the "//& "answers from the end of 2018. Otherwise, use updated and more robust "//& "forms of the same expressions.", default=default_2018_answers) + if (remap_answers_2018) then + CS%remap_answer_date = 20181231 + else + CS%remap_answer_date = 20190101 + endif if (CS%int_tide_dissipation) then @@ -1651,7 +1665,7 @@ subroutine read_tidal_constituents(G, US, tidal_energy_file, CS) ! initialize input remapping: call initialize_remapping(CS%remap_cs, remapping_scheme="PLM", & boundary_extrapolation=.false., check_remapping=CS%debug, & - answers_2018=CS%remap_answers_2018) + answer_date=CS%remap_answer_date) deallocate(tc_m2) deallocate(tc_s2) diff --git a/src/tracer/MOM_lateral_boundary_diffusion.F90 b/src/tracer/MOM_lateral_boundary_diffusion.F90 index d52e2cde4c..e7e47370e1 100644 --- a/src/tracer/MOM_lateral_boundary_diffusion.F90 +++ b/src/tracer/MOM_lateral_boundary_diffusion.F90 @@ -124,8 +124,9 @@ logical function lateral_boundary_diffusion_init(Time, G, GV, param_file, diag, "for vertical remapping for all variables. "//& "It can be one of the following schemes: "//& trim(remappingSchemesDoc), default=remappingDefaultScheme) + !### Revisit this hard-coded answer_date. call initialize_remapping( CS%remap_CS, string, boundary_extrapolation = boundary_extrap ,& - check_reconstruction=.false., check_remapping=.false., answers_2018=.false.) + check_reconstruction=.false., check_remapping=.false., answer_date=20190101) call extract_member_remapping_CS(CS%remap_CS, degree=CS%deg) call get_param(param_file, mdl, "LBD_DEBUG", CS%debug, & "If true, write out verbose debugging data in the LBD module.", & diff --git a/src/tracer/MOM_neutral_diffusion.F90 b/src/tracer/MOM_neutral_diffusion.F90 index 3869610059..9cedfa8b57 100644 --- a/src/tracer/MOM_neutral_diffusion.F90 +++ b/src/tracer/MOM_neutral_diffusion.F90 @@ -100,9 +100,10 @@ module MOM_neutral_diffusion type(EOS_type), pointer :: EOS => NULL() !< Equation of state parameters type(remapping_CS) :: remap_CS !< Remapping control structure used to create sublayers - logical :: remap_answers_2018 !< If true, use the order of arithmetic and expressions that - !! recover the answers for remapping from the end of 2018. - !! Otherwise, use more robust forms of the same expressions. + integer :: remap_answer_date !< The vintage of the order of arithmetic and expressions to use + !! for remapping. Values below 20190101 recover the remapping + !! answers from 2018, while higher values use more robust + !! forms of the same remapping expressions. type(KPP_CS), pointer :: KPP_CSp => NULL() !< KPP control structure needed to get BLD type(energetic_PBL_CS), pointer :: energetic_PBL_CSp => NULL()!< ePBL control structure needed to get MLD end type neutral_diffusion_CS @@ -127,7 +128,11 @@ logical function neutral_diffusion_init(Time, G, GV, US, param_file, diag, EOS, ! Local variables character(len=80) :: string ! Temporary strings - logical :: default_2018_answers + integer :: default_answer_date ! The default setting for the various ANSWER_DATE flags. + logical :: default_2018_answers ! The default setting for the various 2018_ANSWERS flags. + logical :: remap_answers_2018 ! If true, use the order of arithmetic and expressions that + ! recover the answers for remapping from the end of 2018. + ! Otherwise, use more robust forms of the same expressions. logical :: boundary_extrap if (associated(CS)) then @@ -183,15 +188,23 @@ logical function neutral_diffusion_init(Time, G, GV, US, param_file, diag, EOS, "for vertical remapping for all variables. "//& "It can be one of the following schemes: "//& trim(remappingSchemesDoc), default=remappingDefaultScheme) + call get_param(param_file, mdl, "DEFAULT_ANSWER_DATE", default_answer_date, & + "This sets the default value for the various _ANSWER_DATE parameters.", & + default=99991231, do_not_log=.true.) call get_param(param_file, mdl, "DEFAULT_2018_ANSWERS", default_2018_answers, & "This sets the default value for the various _2018_ANSWERS parameters.", & - default=.false.) - call get_param(param_file, mdl, "REMAPPING_2018_ANSWERS", CS%remap_answers_2018, & + default=(default_answer_date<20190101)) + call get_param(param_file, mdl, "REMAPPING_2018_ANSWERS", remap_answers_2018, & "If true, use the order of arithmetic and expressions that recover the "//& "answers from the end of 2018. Otherwise, use updated and more robust "//& "forms of the same expressions.", default=default_2018_answers) + if (remap_answers_2018) then + CS%remap_answer_date = 20181231 + else + CS%remap_answer_date = 20190101 + endif call initialize_remapping( CS%remap_CS, string, boundary_extrapolation=boundary_extrap, & - answers_2018=CS%remap_answers_2018 ) + answer_date=CS%remap_answer_date ) call extract_member_remapping_CS(CS%remap_CS, degree=CS%deg) call get_param(param_file, mdl, "NEUTRAL_POS_METHOD", CS%neutral_pos_method, & "Method used to find the neutral position \n"// & @@ -333,7 +346,7 @@ subroutine neutral_diffusion_calc_coeffs(G, GV, US, h, T, S, CS, p_surf) h_neglect = GV%H_subroundoff ; h_neglect_edge = GV%H_subroundoff if (.not. CS%continuous_reconstruction) then - if (CS%remap_answers_2018) then + if (CS%remap_answer_date < 20190101) then if (GV%Boussinesq) then h_neglect = GV%m_to_H*1.0e-30 ; h_neglect_edge = GV%m_to_H*1.0e-10 else @@ -577,7 +590,7 @@ subroutine neutral_diffusion(G, GV, h, Coef_x, Coef_y, dt, Reg, US, CS) h_neglect = GV%H_subroundoff ; h_neglect_edge = GV%H_subroundoff if (.not. CS%continuous_reconstruction) then - if (CS%remap_answers_2018) then + if (CS%remap_answer_date < 20190101) then h_neglect = GV%m_to_H*1.0e-30 ; h_neglect_edge = GV%m_to_H*1.0e-10 endif endif From 3258b43c428774c6d58740fbf2779043e0e10670 Mon Sep 17 00:00:00 2001 From: Robert Hallberg Date: Sat, 30 Jul 2022 05:54:24 -0400 Subject: [PATCH 22/40] +Eliminate unused answers_2018 optional arguments Eliminated the now unused answers_2018 optional arguments in a variety of the ALE-related subroutines that are only called from code in the ALE directory. The functionality previously provided by answers_2018 is now provided by the more flexible answer_date arguments. A handful of spelling errors were also corrected in comments in the files that were edited. All answers are bitwise identical. --- src/ALE/MOM_remapping.F90 | 2 +- src/ALE/P1M_functions.F90 | 8 +++--- src/ALE/P3M_functions.F90 | 12 ++++----- src/ALE/PCM_functions.F90 | 2 +- src/ALE/PLM_functions.F90 | 4 +-- src/ALE/PPM_functions.F90 | 12 ++++----- src/ALE/PQM_functions.F90 | 14 +++++----- src/ALE/regrid_edge_values.F90 | 48 ++++++++++++++-------------------- src/ALE/regrid_solvers.F90 | 12 +++------ 9 files changed, 46 insertions(+), 68 deletions(-) diff --git a/src/ALE/MOM_remapping.F90 b/src/ALE/MOM_remapping.F90 index 9979b5d39b..faed4ac6be 100644 --- a/src/ALE/MOM_remapping.F90 +++ b/src/ALE/MOM_remapping.F90 @@ -1896,7 +1896,7 @@ logical function remapping_unit_tests(verbose) call edge_values_explicit_h4( 5, (/1.,1.,1.,1.,1./), (/1.,1.,7.,19.,37./), ppoly0_E, & h_neglect=1e-10, answer_date=answer_date ) - ! The next two tests are now passing when answers_2018 = .false., but otherwise only work to roundoff. + ! The next two tests are now passing when answer_date >= 20190101, but otherwise only work to roundoff. thisTest = test_answer(v, 5, ppoly0_E(:,1), (/3.,0.,3.,12.,27./), 'Parabola H4: left edges', tol=2.7e-14) remapping_unit_tests = remapping_unit_tests .or. thisTest thisTest = test_answer(v, 5, ppoly0_E(:,2), (/0.,3.,12.,27.,48./), 'Parabola H4: right edges', tol=4.8e-14) diff --git a/src/ALE/P1M_functions.F90 b/src/ALE/P1M_functions.F90 index 281971cca4..b17b35c85c 100644 --- a/src/ALE/P1M_functions.F90 +++ b/src/ALE/P1M_functions.F90 @@ -24,7 +24,7 @@ module P1M_functions !! !! It is assumed that the size of the array 'u' is equal to the number of cells !! defining 'grid' and 'ppoly'. No consistency check is performed here. -subroutine P1M_interpolation( N, h, u, edge_values, ppoly_coef, h_neglect, answers_2018, answer_date ) +subroutine P1M_interpolation( N, h, u, edge_values, ppoly_coef, h_neglect, answer_date ) integer, intent(in) :: N !< Number of cells real, dimension(:), intent(in) :: h !< cell widths (size N) [H] real, dimension(:), intent(in) :: u !< cell average properties (size N) [A] @@ -32,7 +32,6 @@ subroutine P1M_interpolation( N, h, u, edge_values, ppoly_coef, h_neglect, answe real, dimension(:,:), intent(inout) :: ppoly_coef !< Potentially modified !! piecewise polynomial coefficients, mainly [A] real, optional, intent(in) :: h_neglect !< A negligibly small width [H] - logical, optional, intent(in) :: answers_2018 !< If true use older, less acccurate expressions. integer, optional, intent(in) :: answer_date !< The vintage of the expressions to use ! Local variables @@ -40,8 +39,7 @@ subroutine P1M_interpolation( N, h, u, edge_values, ppoly_coef, h_neglect, answe real :: u0_l, u0_r ! edge values (left and right) ! Bound edge values (routine found in 'edge_values.F90') - call bound_edge_values( N, h, u, edge_values, h_neglect, & - answers_2018=answers_2018, answer_date=answer_date ) + call bound_edge_values( N, h, u, edge_values, h_neglect, answer_date=answer_date ) ! Systematically average discontinuous edge values (routine found in ! 'edge_values.F90') @@ -155,7 +153,7 @@ end subroutine P1M_boundary_extrapolation !! linearly interpolating between them. ! !! Once the edge values are estimated, the limiting process takes care of -!! ensuring that (1) edge values are bounded by neighoring cell averages +!! ensuring that (1) edge values are bounded by neighboring cell averages !! and (2) discontinuous edge values are averaged in order to provide a !! fully continuous interpolant throughout the domain. This last step is !! essential for the regridding problem to yield a unique solution. diff --git a/src/ALE/P3M_functions.F90 b/src/ALE/P3M_functions.F90 index 4d39542337..6039b197fb 100644 --- a/src/ALE/P3M_functions.F90 +++ b/src/ALE/P3M_functions.F90 @@ -25,7 +25,7 @@ module P3M_functions !! !! It is assumed that the size of the array 'u' is equal to the number of cells !! defining 'grid' and 'ppoly'. No consistency check is performed here. -subroutine P3M_interpolation( N, h, u, edge_values, ppoly_S, ppoly_coef, h_neglect, answers_2018, answer_date ) +subroutine P3M_interpolation( N, h, u, edge_values, ppoly_S, ppoly_coef, h_neglect, answer_date ) integer, intent(in) :: N !< Number of cells real, dimension(:), intent(in) :: h !< cell widths (size N) [H] real, dimension(:), intent(in) :: u !< cell averages (size N) in arbitrary units [A] @@ -34,7 +34,6 @@ subroutine P3M_interpolation( N, h, u, edge_values, ppoly_S, ppoly_coef, h_negle real, dimension(:,:), intent(inout) :: ppoly_coef !< Coefficients of polynomial [A] real, optional, intent(in) :: h_neglect !< A negligibly small width for the !! purpose of cell reconstructions [H] - logical, optional, intent(in) :: answers_2018 !< If true use older, less acccurate expressions. integer, optional, intent(in) :: answer_date !< The vintage of the expressions to use ! Call the limiter for p3m, which takes care of everything from @@ -43,7 +42,7 @@ subroutine P3M_interpolation( N, h, u, edge_values, ppoly_S, ppoly_coef, h_negle ! 'P3M_interpolation' first but we do that to provide an homogeneous ! interface. call P3M_limiter( N, h, u, edge_values, ppoly_S, ppoly_coef, h_neglect, & - answers_2018=answers_2018, answer_date=answer_date ) + answer_date=answer_date ) end subroutine P3M_interpolation @@ -60,7 +59,7 @@ end subroutine P3M_interpolation !! c. If not, monotonize cubic curve and rebuild it !! !! Step 3 of the monotonization process leaves all edge values unchanged. -subroutine P3M_limiter( N, h, u, edge_values, ppoly_S, ppoly_coef, h_neglect, answers_2018, answer_date ) +subroutine P3M_limiter( N, h, u, edge_values, ppoly_S, ppoly_coef, h_neglect, answer_date ) integer, intent(in) :: N !< Number of cells real, dimension(:), intent(in) :: h !< cell widths (size N) [H] real, dimension(:), intent(in) :: u !< cell averages (size N) in arbitrary units [A] @@ -69,7 +68,6 @@ subroutine P3M_limiter( N, h, u, edge_values, ppoly_S, ppoly_coef, h_neglect, an real, dimension(:,:), intent(inout) :: ppoly_coef !< Coefficients of polynomial [A] real, optional, intent(in) :: h_neglect !< A negligibly small width for !! the purpose of cell reconstructions [H] - logical, optional, intent(in) :: answers_2018 !< If true use older, less acccurate expressions. integer, optional, intent(in) :: answer_date !< The vintage of the expressions to use ! Local variables @@ -89,7 +87,7 @@ subroutine P3M_limiter( N, h, u, edge_values, ppoly_S, ppoly_coef, h_neglect, an eps = 1e-10 ! 1. Bound edge values (boundary cells are assumed to be local extrema) - call bound_edge_values( N, h, u, edge_values, hNeglect, answers_2018=answers_2018, answer_date=answer_date ) + call bound_edge_values( N, h, u, edge_values, hNeglect, answer_date=answer_date ) ! 2. Systematically average discontinuous edge values call average_discontinuous_edge_values( N, edge_values ) @@ -386,7 +384,7 @@ end subroutine build_cubic_interpolant !! Hence, we check whether the roots (if any) lie inside this interval. If there !! is no root or if both roots lie outside this interval, the cubic is monotonic. logical function is_cubic_monotonic( ppoly_coef, k ) - real, dimension(:,:), intent(in) :: ppoly_coef !< Coefficients of cubic polynomial in arbitary units [A] + real, dimension(:,:), intent(in) :: ppoly_coef !< Coefficients of cubic polynomial in arbitrary units [A] integer, intent(in) :: k !< The index of the cell to work on ! Local variables real :: a, b, c ! Coefficients of the first derivative of the cubic [A] diff --git a/src/ALE/PCM_functions.F90 b/src/ALE/PCM_functions.F90 index 6608e85eda..4f64e4a96d 100644 --- a/src/ALE/PCM_functions.F90 +++ b/src/ALE/PCM_functions.F90 @@ -42,7 +42,7 @@ end subroutine PCM_reconstruction !! Date of creation: 2008.06.06 !! L. White !! -!! This module contains routines that handle one-dimensionnal finite volume +!! This module contains routines that handle one-dimensional finite volume !! reconstruction using the piecewise constant method (PCM). end module PCM_functions diff --git a/src/ALE/PLM_functions.F90 b/src/ALE/PLM_functions.F90 index 9defeb9215..bc7f100a04 100644 --- a/src/ALE/PLM_functions.F90 +++ b/src/ALE/PLM_functions.F90 @@ -156,7 +156,7 @@ real elemental pure function PLM_monotonized_slope(u_l, u_c, u_r, s_l, s_c, s_r) end function PLM_monotonized_slope !> Returns a PLM slope using h2 extrapolation from a cell to the left. -!! Use the negative to extrapolate from the a cell to the right. +!! Use the negative to extrapolate from the cell to the right. real elemental pure function PLM_extrapolate_slope(h_l, h_c, h_neglect, u_l, u_c) real, intent(in) :: h_l !< Thickness of left cell [units of grid thickness] real, intent(in) :: h_c !< Thickness of center cell [units of grid thickness] @@ -305,7 +305,7 @@ end subroutine PLM_boundary_extrapolation !! Date of creation: 2008.06.06 !! L. White !! -!! This module contains routines that handle one-dimensionnal finite volume +!! This module contains routines that handle one-dimensional finite volume !! reconstruction using the piecewise linear method (PLM). end module PLM_functions diff --git a/src/ALE/PPM_functions.F90 b/src/ALE/PPM_functions.F90 index 16441565ac..aa24806d68 100644 --- a/src/ALE/PPM_functions.F90 +++ b/src/ALE/PPM_functions.F90 @@ -25,14 +25,13 @@ module PPM_functions contains !> Builds quadratic polynomials coefficients from cell mean and edge values. -subroutine PPM_reconstruction( N, h, u, edge_values, ppoly_coef, h_neglect, answers_2018, answer_date) +subroutine PPM_reconstruction( N, h, u, edge_values, ppoly_coef, h_neglect, answer_date) integer, intent(in) :: N !< Number of cells real, dimension(N), intent(in) :: h !< Cell widths [H] real, dimension(N), intent(in) :: u !< Cell averages [A] real, dimension(N,2), intent(inout) :: edge_values !< Edge values [A] real, dimension(N,3), intent(inout) :: ppoly_coef !< Polynomial coefficients, mainly [A] real, optional, intent(in) :: h_neglect !< A negligibly small width [H] - logical, optional, intent(in) :: answers_2018 !< If true use older, less acccurate expressions. integer, optional, intent(in) :: answer_date !< The vintage of the expressions to use ! Local variables @@ -40,7 +39,7 @@ subroutine PPM_reconstruction( N, h, u, edge_values, ppoly_coef, h_neglect, answ real :: edge_l, edge_r ! Edge values (left and right) ! PPM limiter - call PPM_limiter_standard( N, h, u, edge_values, h_neglect, answers_2018=answers_2018, answer_date=answer_date ) + call PPM_limiter_standard( N, h, u, edge_values, h_neglect, answer_date=answer_date ) ! Loop over all cells do k = 1,N @@ -60,13 +59,12 @@ end subroutine PPM_reconstruction !> Adjusts edge values using the standard PPM limiter (Colella & Woodward, JCP 1984) !! after first checking that the edge values are bounded by neighbors cell averages !! and that the edge values are monotonic between cell averages. -subroutine PPM_limiter_standard( N, h, u, edge_values, h_neglect, answers_2018, answer_date ) +subroutine PPM_limiter_standard( N, h, u, edge_values, h_neglect, answer_date ) integer, intent(in) :: N !< Number of cells real, dimension(:), intent(in) :: h !< cell widths (size N) [H] real, dimension(:), intent(in) :: u !< cell average properties (size N) [A] real, dimension(:,:), intent(inout) :: edge_values !< Potentially modified edge values [A] real, optional, intent(in) :: h_neglect !< A negligibly small width [H] - logical, optional, intent(in) :: answers_2018 !< If true use older, less acccurate expressions. integer, optional, intent(in) :: answer_date !< The vintage of the expressions to use ! Local variables @@ -76,7 +74,7 @@ subroutine PPM_limiter_standard( N, h, u, edge_values, h_neglect, answers_2018, real :: expr1, expr2 ! Bound edge values - call bound_edge_values( N, h, u, edge_values, h_neglect, answers_2018=answers_2018, answer_date=answer_date ) + call bound_edge_values( N, h, u, edge_values, h_neglect, answer_date=answer_date ) ! Make discontinuous edge values monotonic call check_discontinuous_edge_values( N, u, edge_values ) @@ -112,7 +110,7 @@ subroutine PPM_limiter_standard( N, h, u, edge_values, h_neglect, answers_2018, endif ! This checks that the difference in edge values is representable ! and avoids overshoot problems due to round off. - !### The 1.e-60 needs to have units of [A], so this dimensionally inconsisent. + !### The 1.e-60 needs to have units of [A], so this dimensionally inconsistent. if ( abs( edge_r - edge_l ) Edge value estimation for high-order resconstruction +!> Edge value estimation for high-order reconstruction module regrid_edge_values ! This file is part of MOM6. See LICENSE.md for the license. @@ -40,26 +40,24 @@ module regrid_edge_values !! !! Both boundary edge values are set equal to the boundary cell averages. !! Any extrapolation scheme is applied after this routine has been called. -!! Therefore, boundary cells are treated as if they were local extrama. -subroutine bound_edge_values( N, h, u, edge_val, h_neglect, answers_2018, answer_date ) +!! Therefore, boundary cells are treated as if they were local extrema. +subroutine bound_edge_values( N, h, u, edge_val, h_neglect, answer_date ) integer, intent(in) :: N !< Number of cells real, dimension(N), intent(in) :: h !< cell widths [H] real, dimension(N), intent(in) :: u !< cell average properties in arbitrary units [A] real, dimension(N,2), intent(inout) :: edge_val !< Potentially modified edge values [A]; the !! second index is for the two edges of each cell. real, optional, intent(in) :: h_neglect !< A negligibly small width [H] - logical, optional, intent(in) :: answers_2018 !< If true use older, less acccurate expressions. integer, optional, intent(in) :: answer_date !< The vintage of the expressions to use ! Local variables real :: sigma_l, sigma_c, sigma_r ! left, center and right van Leer slopes [A H-1] or [A] real :: slope_x_h ! retained PLM slope times half grid step [A] real :: hNeglect ! A negligible thickness [H]. - logical :: use_2018_answers ! If true use older, less acccurate expressions. + logical :: use_2018_answers ! If true use older, less accurate expressions. integer :: k, km1, kp1 ! Loop index and the values to either side. - use_2018_answers = .true. ; if (present(answers_2018)) use_2018_answers = answers_2018 - if (present(answer_date)) use_2018_answers = (answer_date < 20190101) + use_2018_answers = .true. ; if (present(answer_date)) use_2018_answers = (answer_date < 20190101) if (use_2018_answers) then hNeglect = hNeglect_dflt ; if (present(h_neglect)) hNeglect = h_neglect endif @@ -221,23 +219,22 @@ end subroutine edge_values_explicit_h2 !! available interpolant. !! !! For this fourth-order scheme, at least four cells must exist. -subroutine edge_values_explicit_h4( N, h, u, edge_val, h_neglect, answers_2018, answer_date ) +subroutine edge_values_explicit_h4( N, h, u, edge_val, h_neglect, answer_date ) integer, intent(in) :: N !< Number of cells real, dimension(N), intent(in) :: h !< cell widths [H] real, dimension(N), intent(in) :: u !< cell average properties in arbitrary units [A] real, dimension(N,2), intent(inout) :: edge_val !< Returned edge values [A]; the second index !! is for the two edges of each cell. real, optional, intent(in) :: h_neglect !< A negligibly small width [H] - logical, optional, intent(in) :: answers_2018 !< If true use older, less acccurate expressions. integer, optional, intent(in) :: answer_date !< The vintage of the expressions to use ! Local variables real :: h0, h1, h2, h3 ! temporary thicknesses [H] real :: h_min ! A minimal cell width [H] real :: f1, f2, f3 ! auxiliary variables with various units - real :: et1, et2, et3 ! terms the expresson for edge values [A H] + real :: et1, et2, et3 ! terms the expression for edge values [A H] real :: I_h12 ! The inverse of the sum of the two central thicknesses [H-1] - real :: I_h012, I_h123 ! Inverses of sums of three succesive thicknesses [H-1] + real :: I_h012, I_h123 ! Inverses of sums of three successive thicknesses [H-1] real :: I_den_et2, I_den_et3 ! Inverses of denominators in edge value terms [H-2] real, dimension(5) :: x ! Coordinate system with 0 at edges [H] real, dimension(4) :: dz ! A temporary array of limited layer thicknesses [H] @@ -248,10 +245,9 @@ subroutine edge_values_explicit_h4( N, h, u, edge_val, h_neglect, answers_2018, real, dimension(4) :: B, C real :: hNeglect ! A negligible thickness in the same units as h. integer :: i, j - logical :: use_2018_answers ! If true use older, less acccurate expressions. + logical :: use_2018_answers ! If true use older, less accurate expressions. - use_2018_answers = .true. ; if (present(answers_2018)) use_2018_answers = answers_2018 - if (present(answer_date)) use_2018_answers = (answer_date < 20190101) + use_2018_answers = .true. ; if (present(answer_date)) use_2018_answers = (answer_date < 20190101) if (use_2018_answers) then hNeglect = hNeglect_edge_dflt ; if (present(h_neglect)) hNeglect = h_neglect else @@ -387,14 +383,13 @@ end subroutine edge_values_explicit_h4 !! !! There are N+1 unknowns and we are able to write N-1 equations. The !! boundary conditions close the system. -subroutine edge_values_implicit_h4( N, h, u, edge_val, h_neglect, answers_2018, answer_date ) +subroutine edge_values_implicit_h4( N, h, u, edge_val, h_neglect, answer_date ) integer, intent(in) :: N !< Number of cells real, dimension(N), intent(in) :: h !< cell widths [H] real, dimension(N), intent(in) :: u !< cell average properties in arbitrary units [A] real, dimension(N,2), intent(inout) :: edge_val !< Returned edge values [A]; the second index !! is for the two edges of each cell. real, optional, intent(in) :: h_neglect !< A negligibly small width [H] - logical, optional, intent(in) :: answers_2018 !< If true use older, less acccurate expressions. integer, optional, intent(in) :: answer_date !< The vintage of the expressions to use ! Local variables @@ -421,10 +416,9 @@ subroutine edge_values_implicit_h4( N, h, u, edge_val, h_neglect, answers_2018, tri_b, & ! tridiagonal system (right hand side) [A] tri_x ! tridiagonal system (solution vector) [A] real :: hNeglect ! A negligible thickness [H] - logical :: use_2018_answers ! If true use older, less acccurate expressions. + logical :: use_2018_answers ! If true use older, less accurate expressions. - use_2018_answers = .true. ; if (present(answers_2018)) use_2018_answers = answers_2018 - if (present(answer_date)) use_2018_answers = (answer_date < 20190101) + use_2018_answers = .true. ; if (present(answer_date)) use_2018_answers = (answer_date < 20190101) if (use_2018_answers) then hNeglect = hNeglect_edge_dflt ; if (present(h_neglect)) hNeglect = h_neglect else @@ -590,7 +584,7 @@ subroutine end_value_h4(dz, u, Csys) ! Csys(4) = ((u(2)-u(1)) - 2.0 * (u(3)-u(2)) + (u(4)-u(3))) * (0.5*C1_3) ! else - ! Express the coefficients as sums of the differences between properties of succesive layers. + ! Express the coefficients as sums of the differences between properties of successive layers. h1 = dz(1) ; h2 = dz(2) ; h3 = dz(3) ; h4 = dz(4) ! Some of the weights used below are proportional to (h1/(h2+h3))**2 or (h1/(h2+h3))*(h2/(h3+h4)) @@ -697,14 +691,13 @@ end subroutine end_value_h4 !! !! There are N+1 unknowns and we are able to write N-1 equations. The !! boundary conditions close the system. -subroutine edge_slopes_implicit_h3( N, h, u, edge_slopes, h_neglect, answers_2018, answer_date ) +subroutine edge_slopes_implicit_h3( N, h, u, edge_slopes, h_neglect, answer_date ) integer, intent(in) :: N !< Number of cells real, dimension(N), intent(in) :: h !< cell widths [H] real, dimension(N), intent(in) :: u !< cell average properties in arbitrary units [A] real, dimension(N,2), intent(inout) :: edge_slopes !< Returned edge slopes [A H-1]; the !! second index is for the two edges of each cell. real, optional, intent(in) :: h_neglect !< A negligibly small width [H] - logical, optional, intent(in) :: answers_2018 !< If true use older, less acccurate expressions. integer, optional, intent(in) :: answer_date !< The vintage of the expressions to use ! Local variables @@ -733,12 +726,11 @@ subroutine edge_slopes_implicit_h3( N, h, u, edge_slopes, h_neglect, answers_201 tri_x ! tridiagonal system (solution vector) [A H-1] real :: hNeglect ! A negligible thickness [H]. real :: hNeglect3 ! hNeglect^3 [H3]. - logical :: use_2018_answers ! If true use older, less acccurate expressions. + logical :: use_2018_answers ! If true use older, less accurate expressions. hNeglect = hNeglect_dflt ; if (present(h_neglect)) hNeglect = h_neglect hNeglect3 = hNeglect**3 - use_2018_answers = .true. ; if (present(answers_2018)) use_2018_answers = answers_2018 - if (present(answer_date)) use_2018_answers = (answer_date < 20190101) + use_2018_answers = .true. ; if (present(answer_date)) use_2018_answers = (answer_date < 20190101) ! Loop on cells (except last one) do i = 1,N-1 @@ -869,14 +861,13 @@ end subroutine edge_slopes_implicit_h3 !------------------------------------------------------------------------------ !> Compute ih5 edge slopes (implicit fifth order accurate) -subroutine edge_slopes_implicit_h5( N, h, u, edge_slopes, h_neglect, answers_2018, answer_date ) +subroutine edge_slopes_implicit_h5( N, h, u, edge_slopes, h_neglect, answer_date ) integer, intent(in) :: N !< Number of cells real, dimension(N), intent(in) :: h !< cell widths [H] real, dimension(N), intent(in) :: u !< cell average properties in arbitrary units [A] real, dimension(N,2), intent(inout) :: edge_slopes !< Returned edge slopes [A H-1]; the !! second index is for the two edges of each cell. real, optional, intent(in) :: h_neglect !< A negligibly small width [H] - logical, optional, intent(in) :: answers_2018 !< If true use older, less acccurate expressions. integer, optional, intent(in) :: answer_date !< The vintage of the expressions to use ! ----------------------------------------------------------------------------- @@ -1141,14 +1132,13 @@ end subroutine edge_slopes_implicit_h5 !! become computationally expensive if regridding is carried out !! often. Figuring out closed-form expressions for these coefficients !! on nonuniform meshes turned out to be intractable. -subroutine edge_values_implicit_h6( N, h, u, edge_val, h_neglect, answers_2018, answer_date ) +subroutine edge_values_implicit_h6( N, h, u, edge_val, h_neglect, answer_date ) integer, intent(in) :: N !< Number of cells real, dimension(N), intent(in) :: h !< cell widths [H] real, dimension(N), intent(in) :: u !< cell average properties (size N) in arbitrary units [A] real, dimension(N,2), intent(inout) :: edge_val !< Returned edge values [A]; the second index !! is for the two edges of each cell. real, optional, intent(in) :: h_neglect !< A negligibly small width [H] - logical, optional, intent(in) :: answers_2018 !< If true use older, less acccurate expressions. integer, optional, intent(in) :: answer_date !< The vintage of the expressions to use ! Local variables diff --git a/src/ALE/regrid_solvers.F90 b/src/ALE/regrid_solvers.F90 index 022946a29d..0655d31062 100644 --- a/src/ALE/regrid_solvers.F90 +++ b/src/ALE/regrid_solvers.F90 @@ -16,12 +16,11 @@ module regrid_solvers !! This routine uses Gauss's algorithm to transform the system's original !! matrix into an upper triangular matrix. Back substitution yields the answer. !! The matrix A must be square, with the first index varing down the column. -subroutine solve_linear_system( A, R, X, N, answers_2018, answer_date ) +subroutine solve_linear_system( A, R, X, N, answer_date ) integer, intent(in) :: N !< The size of the system real, dimension(N,N), intent(inout) :: A !< The matrix being inverted [nondim] real, dimension(N), intent(inout) :: R !< system right-hand side [A] real, dimension(N), intent(inout) :: X !< solution vector [A] - logical, optional, intent(in) :: answers_2018 !< If true or absent use older, less efficient expressions. integer, optional, intent(in) :: answer_date !< The vintage of the expressions to use ! Local variables real, parameter :: eps = 0.0 ! Minimum pivot magnitude allowed @@ -32,8 +31,7 @@ subroutine solve_linear_system( A, R, X, N, answers_2018, answer_date ) logical :: old_answers ! If true, use expressions that give the original (2008 through 2018) MOM6 answers integer :: i, j, k - old_answers = .true. ; if (present(answers_2018)) old_answers = answers_2018 - if (present(answer_date)) old_answers = (answer_date < 20190101) + old_answers = .true. ; if (present(answer_date)) old_answers = (answer_date < 20190101) ! Loop on rows to transform the problem into multiplication by an upper-right matrix. do i = 1,N-1 @@ -175,14 +173,13 @@ end subroutine linear_solver !! !! This routine uses Thomas's algorithm to solve the tridiagonal system AX = R. !! (A is made up of lower, middle and upper diagonals) -subroutine solve_tridiagonal_system( Al, Ad, Au, R, X, N, answers_2018, answer_date ) +subroutine solve_tridiagonal_system( Al, Ad, Au, R, X, N, answer_date ) integer, intent(in) :: N !< The size of the system real, dimension(N), intent(in) :: Ad !< Matrix center diagonal real, dimension(N), intent(in) :: Al !< Matrix lower diagonal real, dimension(N), intent(in) :: Au !< Matrix upper diagonal real, dimension(N), intent(in) :: R !< system right-hand side real, dimension(N), intent(out) :: X !< solution vector - logical, optional, intent(in) :: answers_2018 !< If true use older, less acccurate expressions. integer, optional, intent(in) :: answer_date !< The vintage of the expressions to use ! Local variables real, dimension(N) :: pivot, Al_piv @@ -191,8 +188,7 @@ subroutine solve_tridiagonal_system( Al, Ad, Au, R, X, N, answers_2018, answer_d integer :: k ! Loop index logical :: old_answers ! If true, use expressions that give the original (2008 through 2018) MOM6 answers - old_answers = .true. ; if (present(answers_2018)) old_answers = answers_2018 - if (present(answer_date)) old_answers = (answer_date < 20190101) + old_answers = .true. ; if (present(answer_date)) old_answers = (answer_date < 20190101) if (old_answers) then ! This version gives the same answers as the original (2008 through 2018) MOM6 code From 51cc772fa72133d2fbf01ca40d94464d48138883 Mon Sep 17 00:00:00 2001 From: Robert Hallberg Date: Sat, 30 Jul 2022 09:18:07 -0400 Subject: [PATCH 23/40] +Add the runtime parameter REMAPPING_ANSWER_DATE Added the new runtime parameter REMAPPING_ANSWER_DATE, which takes precedence over the older parameter REMAPPING_2018_ANSWERS. There are 11 files with get_param calls for this new parameter. Also started logging the value of DEFAULT_ANSWER_DATE. All answers are bitwise identical, but there are new entries in the MOM_parameter_doc.all files. --- src/ALE/MOM_ALE.F90 | 20 +++++++----- src/ALE/MOM_regridding.F90 | 15 +++++++-- src/core/MOM_open_boundary.F90 | 17 ++++++---- src/diagnostics/MOM_diagnostics.F90 | 17 ++++++---- src/framework/MOM_diag_mediator.F90 | 17 ++++++---- .../MOM_state_initialization.F90 | 31 ++++++++++++++++--- .../MOM_tracer_initialization_from_Z.F90 | 19 ++++++++---- .../lateral/MOM_lateral_mixing_coeffs.F90 | 18 +++++++---- .../vertical/MOM_ALE_sponge.F90 | 30 +++++++++++++++--- .../vertical/MOM_tidal_mixing.F90 | 19 ++++++++---- src/tracer/MOM_neutral_diffusion.F90 | 17 ++++++---- 11 files changed, 160 insertions(+), 60 deletions(-) diff --git a/src/ALE/MOM_ALE.F90 b/src/ALE/MOM_ALE.F90 index e614391b4a..ca3b9d54de 100644 --- a/src/ALE/MOM_ALE.F90 +++ b/src/ALE/MOM_ALE.F90 @@ -94,7 +94,7 @@ module MOM_ALE integer :: answer_date !< The vintage of the expressions and order of arithmetic to use for !! remapping. Values below 20190101 result in the use of older, less !! accurate expressions that were in use at the end of 2018. Higher - !! values result inthe use of more robust and accurate forms of + !! values result in the use of more robust and accurate forms of !! mathematically equivalent expressions. logical :: debug !< If true, write verbose checksums for debugging purposes. @@ -226,7 +226,7 @@ subroutine ALE_init( param_file, GV, US, max_depth, CS) "extrapolated instead of piecewise constant", default=.false.) call get_param(param_file, mdl, "DEFAULT_ANSWER_DATE", default_answer_date, & "This sets the default value for the various _ANSWER_DATE parameters.", & - default=99991231, do_not_log=.true.) + default=99991231) call get_param(param_file, mdl, "DEFAULT_2018_ANSWERS", default_2018_answers, & "This sets the default value for the various _2018_ANSWERS parameters.", & default=(default_answer_date<20190101)) @@ -234,11 +234,17 @@ subroutine ALE_init( param_file, GV, US, max_depth, CS) "If true, use the order of arithmetic and expressions that recover the "//& "answers from the end of 2018. Otherwise, use updated and more robust "//& "forms of the same expressions.", default=default_2018_answers) - if (answers_2018) then - CS%answer_date = 20181231 - else - CS%answer_date = 20190101 - endif + ! Revise inconsistent default answer dates for remapping. + if (answers_2018 .and. (default_answer_date >= 20190101)) default_answer_date = 20181231 + if (.not.answers_2018 .and. (default_answer_date < 20190101)) default_answer_date = 20190101 + call get_param(param_file, mdl, "REMAPPING_ANSWER_DATE", CS%answer_date, & + "The vintage of the expressions and order of arithmetic to use for remapping. "//& + "Values below 20190101 result in the use of older, less accurate expressions "//& + "that were in use at the end of 2018. Higher values result in the use of more "//& + "robust and accurate forms of mathematically equivalent expressions. "//& + "If both REMAPPING_2018_ANSWERS and REMAPPING_ANSWER_DATE are specified, the "//& + "latter takes precedence.", default=default_answer_date) + call initialize_remapping( CS%remapCS, string, & boundary_extrapolation=remap_boundary_extrap, & check_reconstruction=check_reconstruction, & diff --git a/src/ALE/MOM_regridding.F90 b/src/ALE/MOM_regridding.F90 index 2f28362fb1..e5ce4019ba 100644 --- a/src/ALE/MOM_regridding.F90 +++ b/src/ALE/MOM_regridding.F90 @@ -208,6 +208,7 @@ subroutine initialize_regridding(CS, GV, US, max_depth, param_file, mdl, coord_m integer :: default_answer_date ! The default setting for the various ANSWER_DATE flags. logical :: default_2018_answers ! The default setting for the various 2018_ANSWERS flags. logical :: remap_answers_2018 + integer :: remap_answer_date ! The vintage of the remapping expressions to use. real :: filt_len, strat_tol, tmpReal, P_Ref real :: maximum_depth ! The maximum depth of the ocean [m] (not in Z). real :: dz_fixed_sfc, Rho_avg_depth, nlay_sfc_int @@ -269,7 +270,7 @@ subroutine initialize_regridding(CS, GV, US, max_depth, param_file, mdl, coord_m call get_param(param_file, mdl, "DEFAULT_ANSWER_DATE", default_answer_date, & "This sets the default value for the various _ANSWER_DATE parameters.", & - default=99991231, do_not_log=.true.) + default=99991231) call get_param(param_file, mdl, "DEFAULT_2018_ANSWERS", default_2018_answers, & "This sets the default value for the various _2018_ANSWERS parameters.", & default=(default_answer_date<20190101)) @@ -277,7 +278,17 @@ subroutine initialize_regridding(CS, GV, US, max_depth, param_file, mdl, coord_m "If true, use the order of arithmetic and expressions that recover the "//& "answers from the end of 2018. Otherwise, use updated and more robust "//& "forms of the same expressions.", default=default_2018_answers) - call set_regrid_params(CS, remap_answers_2018=remap_answers_2018) + ! Revise inconsistent default answer dates for remapping. + if (remap_answers_2018 .and. (default_answer_date >= 20190101)) default_answer_date = 20181231 + if (.not.remap_answers_2018 .and. (default_answer_date < 20190101)) default_answer_date = 20190101 + call get_param(param_file, mdl, "REMAPPING_ANSWER_DATE", remap_answer_date, & + "The vintage of the expressions and order of arithmetic to use for remapping. "//& + "Values below 20190101 result in the use of older, less accurate expressions "//& + "that were in use at the end of 2018. Higher values result in the use of more "//& + "robust and accurate forms of mathematically equivalent expressions. "//& + "If both REMAPPING_2018_ANSWERS and REMAPPING_ANSWER_DATE are specified, the "//& + "latter takes precedence.", default=default_answer_date) + call set_regrid_params(CS, remap_answer_date=remap_answer_date) endif if (main_parameters .and. coord_is_state_dependent) then diff --git a/src/core/MOM_open_boundary.F90 b/src/core/MOM_open_boundary.F90 index 5a011c9101..edaa2bc1d8 100644 --- a/src/core/MOM_open_boundary.F90 +++ b/src/core/MOM_open_boundary.F90 @@ -625,7 +625,7 @@ subroutine open_boundary_config(G, US, param_file, OBC) "round off.", default=.false.,do_not_log=.true.) call get_param(param_file, mdl, "DEFAULT_ANSWER_DATE", default_answer_date, & "This sets the default value for the various _ANSWER_DATE parameters.", & - default=99991231, do_not_log=.true.) + default=99991231) call get_param(param_file, mdl, "DEFAULT_2018_ANSWERS", default_2018_answers, & "This sets the default value for the various _2018_ANSWERS parameters.", & default=(default_answer_date<20190101)) @@ -633,11 +633,16 @@ subroutine open_boundary_config(G, US, param_file, OBC) "If true, use the order of arithmetic and expressions that recover the "//& "answers from the end of 2018. Otherwise, use updated and more robust "//& "forms of the same expressions.", default=default_2018_answers) - if (answers_2018) then - OBC%remap_answer_date = 20181231 - else - OBC%remap_answer_date = 20190101 - endif + ! Revise inconsistent default answer dates for remapping. + if (answers_2018 .and. (default_answer_date >= 20190101)) default_answer_date = 20181231 + if (.not.answers_2018 .and. (default_answer_date < 20190101)) default_answer_date = 20190101 + call get_param(param_file, mdl, "REMAPPING_ANSWER_DATE", OBC%remap_answer_date, & + "The vintage of the expressions and order of arithmetic to use for remapping. "//& + "Values below 20190101 result in the use of older, less accurate expressions "//& + "that were in use at the end of 2018. Higher values result in the use of more "//& + "robust and accurate forms of mathematically equivalent expressions. "//& + "If both REMAPPING_2018_ANSWERS and REMAPPING_ANSWER_DATE are specified, the "//& + "latter takes precedence.", default=default_answer_date) allocate(OBC%remap_CS) call initialize_remapping(OBC%remap_CS, remappingScheme, boundary_extrapolation = .false., & diff --git a/src/diagnostics/MOM_diagnostics.F90 b/src/diagnostics/MOM_diagnostics.F90 index 521d55115c..ad51ecfe5e 100644 --- a/src/diagnostics/MOM_diagnostics.F90 +++ b/src/diagnostics/MOM_diagnostics.F90 @@ -1592,7 +1592,7 @@ subroutine MOM_diagnostics_init(MIS, ADp, CDp, Time, G, GV, US, param_file, diag "starting point for iterations.", default=.true.) call get_param(param_file, mdl, "DEFAULT_ANSWER_DATE", default_answer_date, & "This sets the default value for the various _ANSWER_DATE parameters.", & - default=99991231, do_not_log=.true.) + default=99991231) call get_param(param_file, mdl, "DEFAULT_2018_ANSWERS", default_2018_answers, & "This sets the default value for the various _2018_ANSWERS parameters.", & default=(default_answer_date<20190101)) @@ -1600,11 +1600,16 @@ subroutine MOM_diagnostics_init(MIS, ADp, CDp, Time, G, GV, US, param_file, diag "If true, use the order of arithmetic and expressions that recover the "//& "answers from the end of 2018. Otherwise, use updated and more robust "//& "forms of the same expressions.", default=default_2018_answers) - if (remap_answers_2018) then - remap_answer_date = 20181231 - else - remap_answer_date = 20190101 - endif + ! Revise inconsistent default answer dates for remapping. + if (remap_answers_2018 .and. (default_answer_date >= 20190101)) default_answer_date = 20181231 + if (.not.remap_answers_2018 .and. (default_answer_date < 20190101)) default_answer_date = 20190101 + call get_param(param_file, mdl, "REMAPPING_ANSWER_DATE", remap_answer_date, & + "The vintage of the expressions and order of arithmetic to use for remapping. "//& + "Values below 20190101 result in the use of older, less accurate expressions "//& + "that were in use at the end of 2018. Higher values result in the use of more "//& + "robust and accurate forms of mathematically equivalent expressions. "//& + "If both REMAPPING_2018_ANSWERS and REMAPPING_ANSWER_DATE are specified, the "//& + "latter takes precedence.", default=default_answer_date) call get_param(param_file, mdl, "SPLIT", split, default=.true., do_not_log=.true.) diff --git a/src/framework/MOM_diag_mediator.F90 b/src/framework/MOM_diag_mediator.F90 index 65725ca59c..fbfd4e3976 100644 --- a/src/framework/MOM_diag_mediator.F90 +++ b/src/framework/MOM_diag_mediator.F90 @@ -3181,7 +3181,7 @@ subroutine diag_mediator_init(G, GV, US, nz, param_file, diag_cs, doc_file_dir) default=1) call get_param(param_file, mdl, "DEFAULT_ANSWER_DATE", default_answer_date, & "This sets the default value for the various _ANSWER_DATE parameters.", & - default=99991231, do_not_log=.true.) + default=99991231) call get_param(param_file, mdl, "DEFAULT_2018_ANSWERS", default_2018_answers, & "This sets the default value for the various _2018_ANSWERS parameters.", & default=(default_answer_date<20190101)) @@ -3189,11 +3189,16 @@ subroutine diag_mediator_init(G, GV, US, nz, param_file, diag_cs, doc_file_dir) "If true, use the order of arithmetic and expressions that recover the "//& "answers from the end of 2018. Otherwise, use updated and more robust "//& "forms of the same expressions.", default=default_2018_answers) - if (remap_answers_2018) then - remap_answer_date = 20181231 - else - remap_answer_date = 20190101 - endif + ! Revise inconsistent default answer dates for remapping. + if (remap_answers_2018 .and. (default_answer_date >= 20190101)) default_answer_date = 20181231 + if (.not.remap_answers_2018 .and. (default_answer_date < 20190101)) default_answer_date = 20190101 + call get_param(param_file, mdl, "REMAPPING_ANSWER_DATE", remap_answer_date, & + "The vintage of the expressions and order of arithmetic to use for remapping. "//& + "Values below 20190101 result in the use of older, less accurate expressions "//& + "that were in use at the end of 2018. Higher values result in the use of more "//& + "robust and accurate forms of mathematically equivalent expressions. "//& + "If both REMAPPING_2018_ANSWERS and REMAPPING_ANSWER_DATE are specified, the "//& + "latter takes precedence.", default=default_answer_date) call get_param(param_file, mdl, 'USE_GRID_SPACE_DIAGNOSTIC_AXES', diag_cs%grid_space_axes, & 'If true, use a grid index coordinate convention for diagnostic axes. ',& default=.false.) diff --git a/src/initialization/MOM_state_initialization.F90 b/src/initialization/MOM_state_initialization.F90 index f6d39497a8..ab163a55fd 100644 --- a/src/initialization/MOM_state_initialization.F90 +++ b/src/initialization/MOM_state_initialization.F90 @@ -1198,11 +1198,10 @@ subroutine trim_for_ice(PF, G, GV, US, ALE_CSp, tv, h, just_read) call get_param(PF, mdl, "TRIMMING_USES_REMAPPING", use_remapping, & 'When trimming the column, also remap T and S.', & default=.false., do_not_log=just_read) - remap_answers_2018 = .true. if (use_remapping) then call get_param(PF, mdl, "DEFAULT_ANSWER_DATE", default_answer_date, & "This sets the default value for the various _ANSWER_DATE parameters.", & - default=99991231, do_not_log=.true.) + default=99991231) call get_param(PF, mdl, "DEFAULT_2018_ANSWERS", default_2018_answers, & "This sets the default value for the various _2018_ANSWERS parameters.", & default=(default_answer_date<20190101)) @@ -1210,8 +1209,19 @@ subroutine trim_for_ice(PF, G, GV, US, ALE_CSp, tv, h, just_read) "If true, use the order of arithmetic and expressions that recover the "//& "answers from the end of 2018. Otherwise, use updated and more robust "//& "forms of the same expressions.", default=default_2018_answers) + ! Revise inconsistent default answer dates for remapping. + if (remap_answers_2018 .and. (default_answer_date >= 20190101)) default_answer_date = 20181231 + if (.not.remap_answers_2018 .and. (default_answer_date < 20190101)) default_answer_date = 20190101 + call get_param(PF, mdl, "REMAPPING_ANSWER_DATE", remap_answer_date, & + "The vintage of the expressions and order of arithmetic to use for remapping. "//& + "Values below 20190101 result in the use of older, less accurate expressions "//& + "that were in use at the end of 2018. Higher values result in the use of more "//& + "robust and accurate forms of mathematically equivalent expressions. "//& + "If both REMAPPING_2018_ANSWERS and REMAPPING_ANSWER_DATE are specified, the "//& + "latter takes precedence.", default=default_answer_date) + else + remap_answer_date = 20181231 endif - remap_answer_date = 20190101 ; if (remap_answers_2018) remap_answer_date = 20181231 if (just_read) return ! All run-time parameters have been read, so return. @@ -2475,6 +2485,7 @@ subroutine MOM_temp_salt_initialize_from_Z(h, tv, depth_tot, G, GV, US, PF, just logical :: remap_answers_2018 ! If true, use the order of arithmetic and expressions that ! recover the remapping answers from 2018. If false, use more ! robust forms of the same remapping expressions. + integer :: default_remap_ans_date ! The default setting for remap_answer_date integer :: remap_answer_date ! The vintage of the order of arithmetic and expressions to use ! for remapping. Values below 20190101 recover the remapping ! answers from 2018, while higher values use more robust @@ -2562,7 +2573,7 @@ subroutine MOM_temp_salt_initialize_from_Z(h, tv, depth_tot, G, GV, US, PF, just default=.false., do_not_log=just_read) call get_param(PF, mdl, "DEFAULT_ANSWER_DATE", default_answer_date, & "This sets the default value for the various _ANSWER_DATE parameters.", & - default=99991231, do_not_log=.true.) + default=99991231) call get_param(PF, mdl, "DEFAULT_2018_ANSWERS", default_2018_answers, & "This sets the default value for the various _2018_ANSWERS parameters.", & default=(default_answer_date<20190101)) @@ -2576,7 +2587,17 @@ subroutine MOM_temp_salt_initialize_from_Z(h, tv, depth_tot, G, GV, US, PF, just "If true, use the order of arithmetic and expressions that recover the "//& "answers from the end of 2018. Otherwise, use updated and more robust "//& "forms of the same expressions.", default=default_2018_answers) - remap_answer_date = 20190101 ; if (remap_answers_2018) remap_answer_date = 20181231 + ! Revise inconsistent default answer dates for remapping. + default_remap_ans_date = default_answer_date + if (remap_answers_2018 .and. (default_remap_ans_date >= 20190101)) default_remap_ans_date = 20181231 + if (.not.remap_answers_2018 .and. (default_remap_ans_date < 20190101)) default_remap_ans_date = 20190101 + call get_param(PF, mdl, "REMAPPING_ANSWER_DATE", remap_answer_date, & + "The vintage of the expressions and order of arithmetic to use for remapping. "//& + "Values below 20190101 result in the use of older, less accurate expressions "//& + "that were in use at the end of 2018. Higher values result in the use of more "//& + "robust and accurate forms of mathematically equivalent expressions. "//& + "If both REMAPPING_2018_ANSWERS and REMAPPING_ANSWER_DATE are specified, the "//& + "latter takes precedence.", default=default_remap_ans_date) endif call get_param(PF, mdl, "HOR_REGRID_2018_ANSWERS", hor_regrid_answers_2018, & "If true, use the order of arithmetic for horizonal regridding that recovers "//& diff --git a/src/initialization/MOM_tracer_initialization_from_Z.F90 b/src/initialization/MOM_tracer_initialization_from_Z.F90 index 560a3ceef7..591c4db33c 100644 --- a/src/initialization/MOM_tracer_initialization_from_Z.F90 +++ b/src/initialization/MOM_tracer_initialization_from_Z.F90 @@ -83,6 +83,7 @@ subroutine MOM_initialize_tracer_from_Z(h, tr, G, GV, US, PF, src_file, src_var_ logical :: remap_answers_2018 ! If true, use the order of arithmetic and expressions that ! recover the remapping answers from 2018. If false, use more ! robust forms of the same remapping expressions. + integer :: default_remap_ans_date ! The default setting for remap_answer_date integer :: remap_answer_date ! The vintage of the order of arithmetic and expressions to use ! for remapping. Values below 20190101 recover the remapping ! answers from 2018, while higher values use more robust @@ -115,7 +116,7 @@ subroutine MOM_initialize_tracer_from_Z(h, tr, G, GV, US, PF, src_file, src_var_ default="PLM") call get_param(PF, mdl, "DEFAULT_ANSWER_DATE", default_answer_date, & "This sets the default value for the various _ANSWER_DATE parameters.", & - default=99991231, do_not_log=.true.) + default=99991231) call get_param(PF, mdl, "DEFAULT_2018_ANSWERS", default_2018_answers, & "This sets the default value for the various _2018_ANSWERS parameters.", & default=(default_answer_date<20190101)) @@ -124,11 +125,17 @@ subroutine MOM_initialize_tracer_from_Z(h, tr, G, GV, US, PF, src_file, src_var_ "If true, use the order of arithmetic and expressions that recover the "//& "answers from the end of 2018. Otherwise, use updated and more robust "//& "forms of the same expressions.", default=default_2018_answers) - if (remap_answers_2018) then - remap_answer_date = 20181231 - else - remap_answer_date = 20190101 - endif + ! Revise inconsistent default answer dates for remapping. + default_remap_ans_date = default_answer_date + if (remap_answers_2018 .and. (default_remap_ans_date >= 20190101)) default_remap_ans_date = 20181231 + if (.not.remap_answers_2018 .and. (default_remap_ans_date < 20190101)) default_remap_ans_date = 20190101 + call get_param(PF, mdl, "REMAPPING_ANSWER_DATE", remap_answer_date, & + "The vintage of the expressions and order of arithmetic to use for remapping. "//& + "Values below 20190101 result in the use of older, less accurate expressions "//& + "that were in use at the end of 2018. Higher values result in the use of more "//& + "robust and accurate forms of mathematically equivalent expressions. "//& + "If both REMAPPING_2018_ANSWERS and REMAPPING_ANSWER_DATE are specified, the "//& + "latter takes precedence.", default=default_remap_ans_date) endif call get_param(PF, mdl, "HOR_REGRID_2018_ANSWERS", hor_regrid_answers_2018, & "If true, use the order of arithmetic for horizonal regridding that recovers "//& diff --git a/src/parameterizations/lateral/MOM_lateral_mixing_coeffs.F90 b/src/parameterizations/lateral/MOM_lateral_mixing_coeffs.F90 index 0dd590c2d7..dc23042916 100644 --- a/src/parameterizations/lateral/MOM_lateral_mixing_coeffs.F90 +++ b/src/parameterizations/lateral/MOM_lateral_mixing_coeffs.F90 @@ -1543,7 +1543,7 @@ subroutine VarMix_init(Time, G, GV, US, param_file, diag, CS) allocate(CS%cg1(isd:ied,jsd:jed), source=0.0) call get_param(param_file, mdl, "DEFAULT_ANSWER_DATE", default_answer_date, & "This sets the default value for the various _ANSWER_DATE parameters.", & - default=99991231, do_not_log=.true.) + default=99991231) call get_param(param_file, mdl, "DEFAULT_2018_ANSWERS", default_2018_answers, & "This sets the default value for the various _2018_ANSWERS parameters.", & default=(default_answer_date<20190101)) @@ -1551,11 +1551,17 @@ subroutine VarMix_init(Time, G, GV, US, param_file, diag, CS) "If true, use the order of arithmetic and expressions that recover the "//& "answers from the end of 2018. Otherwise, use updated and more robust "//& "forms of the same expressions.", default=default_2018_answers) - if (remap_answers_2018) then - remap_answer_date = 20181231 - else - remap_answer_date = 20190101 - endif + ! Revise inconsistent default answer dates for remapping. + if (remap_answers_2018 .and. (default_answer_date >= 20190101)) default_answer_date = 20181231 + if (.not.remap_answers_2018 .and. (default_answer_date < 20190101)) default_answer_date = 20190101 + call get_param(param_file, mdl, "REMAPPING_ANSWER_DATE", remap_answer_date, & + "The vintage of the expressions and order of arithmetic to use for remapping. "//& + "Values below 20190101 result in the use of older, less accurate expressions "//& + "that were in use at the end of 2018. Higher values result in the use of more "//& + "robust and accurate forms of mathematically equivalent expressions. "//& + "If both REMAPPING_2018_ANSWERS and REMAPPING_ANSWER_DATE are specified, the "//& + "latter takes precedence.", default=default_answer_date) + call get_param(param_file, mdl, "INTERNAL_WAVE_SPEED_TOL", wave_speed_tol, & "The fractional tolerance for finding the wave speeds.", & units="nondim", default=0.001) diff --git a/src/parameterizations/vertical/MOM_ALE_sponge.F90 b/src/parameterizations/vertical/MOM_ALE_sponge.F90 index 59b46b61cf..8b35f3f1e1 100644 --- a/src/parameterizations/vertical/MOM_ALE_sponge.F90 +++ b/src/parameterizations/vertical/MOM_ALE_sponge.F90 @@ -180,6 +180,7 @@ subroutine initialize_ALE_sponge_fixed(Iresttime, G, GV, param_file, CS, data_h, logical :: remap_answers_2018 ! If true, use the order of arithmetic and expressions that ! recover the remapping answers from 2018. If false, use more ! robust forms of the same remapping expressions. + integer :: default_remap_ans_date ! The default setting for remap_answer_date logical :: hor_regrid_answers_2018 ! If true, use the order of arithmetic for horizontal regridding ! that recovers the answers from the end of 2018. Otherwise, use ! rotationally symmetric forms of the same expressions. @@ -219,7 +220,7 @@ subroutine initialize_ALE_sponge_fixed(Iresttime, G, GV, param_file, CS, data_h, default=.false., do_not_log=.true.) call get_param(param_file, mdl, "DEFAULT_ANSWER_DATE", default_answer_date, & "This sets the default value for the various _ANSWER_DATE parameters.", & - default=99991231, do_not_log=.true.) + default=99991231) call get_param(param_file, mdl, "DEFAULT_2018_ANSWERS", default_2018_answers, & "This sets the default value for the various _2018_ANSWERS parameters.", & default=(default_answer_date<20190101)) @@ -227,7 +228,17 @@ subroutine initialize_ALE_sponge_fixed(Iresttime, G, GV, param_file, CS, data_h, "If true, use the order of arithmetic and expressions that recover the "//& "answers from the end of 2018. Otherwise, use updated and more robust "//& "forms of the same expressions.", default=default_2018_answers) - CS%remap_answer_date = 20190101 ; if (remap_answers_2018) CS%remap_answer_date = 20181231 + ! Revise inconsistent default answer dates for remapping. + default_remap_ans_date = default_answer_date + if (remap_answers_2018 .and. (default_remap_ans_date >= 20190101)) default_remap_ans_date = 20181231 + if (.not.remap_answers_2018 .and. (default_remap_ans_date < 20190101)) default_remap_ans_date = 20190101 + call get_param(param_file, mdl, "REMAPPING_ANSWER_DATE", CS%remap_answer_date, & + "The vintage of the expressions and order of arithmetic to use for remapping. "//& + "Values below 20190101 result in the use of older, less accurate expressions "//& + "that were in use at the end of 2018. Higher values result in the use of more "//& + "robust and accurate forms of mathematically equivalent expressions. "//& + "If both REMAPPING_2018_ANSWERS and REMAPPING_ANSWER_DATE are specified, the "//& + "latter takes precedence.", default=default_remap_ans_date) call get_param(param_file, mdl, "HOR_REGRID_2018_ANSWERS", hor_regrid_answers_2018, & "If true, use the order of arithmetic for horizontal regridding that recovers "//& "the answers from the end of 2018. Otherwise, use rotationally symmetric "//& @@ -453,6 +464,7 @@ subroutine initialize_ALE_sponge_varying(Iresttime, G, GV, param_file, CS, Irest logical :: remap_answers_2018 ! If true, use the order of arithmetic and expressions that ! recover the remapping answers from 2018. If false, use more ! robust forms of the same remapping expressions. + integer :: default_remap_ans_date ! The default setting for remap_answer_date logical :: hor_regrid_answers_2018 ! If true, use the order of arithmetic for horizontal regridding ! that recovers the answers from the end of 2018. Otherwise, use ! rotationally symmetric forms of the same expressions. @@ -486,7 +498,7 @@ subroutine initialize_ALE_sponge_varying(Iresttime, G, GV, param_file, CS, Irest default=.false., do_not_log=.true.) call get_param(param_file, mdl, "DEFAULT_ANSWER_DATE", default_answer_date, & "This sets the default value for the various _ANSWER_DATE parameters.", & - default=99991231, do_not_log=.true.) + default=99991231) call get_param(param_file, mdl, "DEFAULT_2018_ANSWERS", default_2018_answers, & "This sets the default value for the various _2018_ANSWERS parameters.", & default=(default_answer_date<20190101)) @@ -494,7 +506,17 @@ subroutine initialize_ALE_sponge_varying(Iresttime, G, GV, param_file, CS, Irest "If true, use the order of arithmetic and expressions that recover the "//& "answers from the end of 2018. Otherwise, use updated and more robust "//& "forms of the same expressions.", default=default_2018_answers) - CS%remap_answer_date = 20190101 ; if (remap_answers_2018) CS%remap_answer_date = 20181231 + ! Revise inconsistent default answer dates for remapping. + default_remap_ans_date = default_answer_date + if (remap_answers_2018 .and. (default_remap_ans_date >= 20190101)) default_remap_ans_date = 20181231 + if (.not.remap_answers_2018 .and. (default_remap_ans_date < 20190101)) default_remap_ans_date = 20190101 + call get_param(param_file, mdl, "REMAPPING_ANSWER_DATE", CS%remap_answer_date, & + "The vintage of the expressions and order of arithmetic to use for remapping. "//& + "Values below 20190101 result in the use of older, less accurate expressions "//& + "that were in use at the end of 2018. Higher values result in the use of more "//& + "robust and accurate forms of mathematically equivalent expressions. "//& + "If both REMAPPING_2018_ANSWERS and REMAPPING_ANSWER_DATE are specified, the "//& + "latter takes precedence.", default=default_remap_ans_date) call get_param(param_file, mdl, "HOR_REGRID_2018_ANSWERS", hor_regrid_answers_2018, & "If true, use the order of arithmetic for horizontal regridding that recovers "//& "the answers from the end of 2018 and retain a bug in the 3-dimensional mask "//& diff --git a/src/parameterizations/vertical/MOM_tidal_mixing.F90 b/src/parameterizations/vertical/MOM_tidal_mixing.F90 index bd819a7a87..1d74b104d7 100644 --- a/src/parameterizations/vertical/MOM_tidal_mixing.F90 +++ b/src/parameterizations/vertical/MOM_tidal_mixing.F90 @@ -229,6 +229,7 @@ logical function tidal_mixing_init(Time, G, GV, US, param_file, int_tide_CSp, di logical :: remap_answers_2018 ! If true, use the order of arithmetic and expressions that ! recover the remapping answers from 2018. If false, use more ! robust forms of the same remapping expressions. + integer :: default_remap_ans_date ! The default setting for remap_answer_date character(len=20) :: tmpstr, int_tide_profile_str character(len=20) :: CVMix_tidal_scheme_str, tidal_energy_type character(len=200) :: filename, h2_file, Niku_TKE_input_file @@ -279,7 +280,7 @@ logical function tidal_mixing_init(Time, G, GV, US, param_file, int_tide_CSp, di call get_param(param_file, mdl, "DEFAULT_ANSWER_DATE", default_answer_date, & "This sets the default value for the various _ANSWER_DATE parameters.", & - default=99991231, do_not_log=.true.) + default=99991231) call get_param(param_file, mdl, "DEFAULT_2018_ANSWERS", default_2018_answers, & "This sets the default value for the various _2018_ANSWERS parameters.", & default=(default_answer_date<20190101)) @@ -291,11 +292,17 @@ logical function tidal_mixing_init(Time, G, GV, US, param_file, int_tide_CSp, di "If true, use the order of arithmetic and expressions that recover the "//& "answers from the end of 2018. Otherwise, use updated and more robust "//& "forms of the same expressions.", default=default_2018_answers) - if (remap_answers_2018) then - CS%remap_answer_date = 20181231 - else - CS%remap_answer_date = 20190101 - endif + ! Revise inconsistent default answer dates for remapping. + default_remap_ans_date = default_answer_date + if (remap_answers_2018 .and. (default_remap_ans_date >= 20190101)) default_remap_ans_date = 20181231 + if (.not.remap_answers_2018 .and. (default_remap_ans_date < 20190101)) default_remap_ans_date = 20190101 + call get_param(param_file, mdl, "REMAPPING_ANSWER_DATE", CS%remap_answer_date, & + "The vintage of the expressions and order of arithmetic to use for remapping. "//& + "Values below 20190101 result in the use of older, less accurate expressions "//& + "that were in use at the end of 2018. Higher values result in the use of more "//& + "robust and accurate forms of mathematically equivalent expressions. "//& + "If both REMAPPING_2018_ANSWERS and REMAPPING_ANSWER_DATE are specified, the "//& + "latter takes precedence.", default=default_remap_ans_date) if (CS%int_tide_dissipation) then diff --git a/src/tracer/MOM_neutral_diffusion.F90 b/src/tracer/MOM_neutral_diffusion.F90 index 9cedfa8b57..9ef59821e3 100644 --- a/src/tracer/MOM_neutral_diffusion.F90 +++ b/src/tracer/MOM_neutral_diffusion.F90 @@ -190,7 +190,7 @@ logical function neutral_diffusion_init(Time, G, GV, US, param_file, diag, EOS, trim(remappingSchemesDoc), default=remappingDefaultScheme) call get_param(param_file, mdl, "DEFAULT_ANSWER_DATE", default_answer_date, & "This sets the default value for the various _ANSWER_DATE parameters.", & - default=99991231, do_not_log=.true.) + default=99991231) call get_param(param_file, mdl, "DEFAULT_2018_ANSWERS", default_2018_answers, & "This sets the default value for the various _2018_ANSWERS parameters.", & default=(default_answer_date<20190101)) @@ -198,11 +198,16 @@ logical function neutral_diffusion_init(Time, G, GV, US, param_file, diag, EOS, "If true, use the order of arithmetic and expressions that recover the "//& "answers from the end of 2018. Otherwise, use updated and more robust "//& "forms of the same expressions.", default=default_2018_answers) - if (remap_answers_2018) then - CS%remap_answer_date = 20181231 - else - CS%remap_answer_date = 20190101 - endif + ! Revise inconsistent default answer dates for remapping. + if (remap_answers_2018 .and. (default_answer_date >= 20190101)) default_answer_date = 20181231 + if (.not.remap_answers_2018 .and. (default_answer_date < 20190101)) default_answer_date = 20190101 + call get_param(param_file, mdl, "REMAPPING_ANSWER_DATE", CS%remap_answer_date, & + "The vintage of the expressions and order of arithmetic to use for remapping. "//& + "Values below 20190101 result in the use of older, less accurate expressions "//& + "that were in use at the end of 2018. Higher values result in the use of more "//& + "robust and accurate forms of mathematically equivalent expressions. "//& + "If both REMAPPING_2018_ANSWERS and REMAPPING_ANSWER_DATE are specified, the "//& + "latter takes precedence.", default=default_answer_date) call initialize_remapping( CS%remap_CS, string, boundary_extrapolation=boundary_extrap, & answer_date=CS%remap_answer_date ) call extract_member_remapping_CS(CS%remap_CS, degree=CS%deg) From a95d1f6e9dfd355ddf250575a3fddc15bae8f45f Mon Sep 17 00:00:00 2001 From: Robert Hallberg Date: Sat, 30 Jul 2022 10:39:17 -0400 Subject: [PATCH 24/40] +Add the runtime parameter HOR_REGRID_ANSWER_DATE Added the new runtime parameter HOR_REGRID_ANSWER_DATE, which takes precedence over the older parameter HOR_REGRID_2018_ANSWERS. There are 3 files with get_param calls for this new parameter. All answers are bitwise identical, but there are new entries in the MOM_parameter_doc.all files. --- .../MOM_state_initialization.F90 | 13 +++++++++- .../MOM_tracer_initialization_from_Z.F90 | 12 +++++++++- .../vertical/MOM_ALE_sponge.F90 | 24 +++++++++++++++++-- 3 files changed, 45 insertions(+), 4 deletions(-) diff --git a/src/initialization/MOM_state_initialization.F90 b/src/initialization/MOM_state_initialization.F90 index ab163a55fd..3064d52035 100644 --- a/src/initialization/MOM_state_initialization.F90 +++ b/src/initialization/MOM_state_initialization.F90 @@ -2491,6 +2491,7 @@ subroutine MOM_temp_salt_initialize_from_Z(h, tv, depth_tot, G, GV, US, PF, just ! answers from 2018, while higher values use more robust ! forms of the same remapping expressions. logical :: hor_regrid_answers_2018 + integer :: default_hor_reg_ans_date ! The default setting for hor_regrid_answer_date integer :: hor_regrid_answer_date ! The vintage of the order of arithmetic and expressions to use ! for horizontal regridding. Values below 20190101 recover the ! answers from 2018, while higher values use expressions that have @@ -2603,7 +2604,17 @@ subroutine MOM_temp_salt_initialize_from_Z(h, tv, depth_tot, G, GV, US, PF, just "If true, use the order of arithmetic for horizonal regridding that recovers "//& "the answers from the end of 2018. Otherwise, use rotationally symmetric "//& "forms of the same expressions.", default=default_2018_answers) - hor_regrid_answer_date = 20190101 ; if (hor_regrid_answers_2018) hor_regrid_answer_date = 20181231 + ! Revise inconsistent default answer dates for horizontal regridding. + default_hor_reg_ans_date = default_answer_date + if (hor_regrid_answers_2018 .and. (default_hor_reg_ans_date >= 20190101)) default_hor_reg_ans_date = 20181231 + if (.not.hor_regrid_answers_2018 .and. (default_hor_reg_ans_date < 20190101)) default_hor_reg_ans_date = 20190101 + call get_param(PF, mdl, "HOR_REGRID_ANSWER_DATE", hor_regrid_answer_date, & + "The vintage of the order of arithmetic for horizontal regridding. "//& + "Dates before 20190101 give the same answers as the code did in late 2018, "//& + "while later versions add parentheses for rotational symmetry. "//& + "If both HOR_REGRID_2018_ANSWERS and HOR_REGRID_ANSWER_DATE are specified, the "//& + "latter takes precedence.", default=default_hor_reg_ans_date) + if (.not.useALEremapping) then call get_param(PF, mdl, "ADJUST_THICKNESS", correct_thickness, & "If true, all mass below the bottom removed if the "//& diff --git a/src/initialization/MOM_tracer_initialization_from_Z.F90 b/src/initialization/MOM_tracer_initialization_from_Z.F90 index 591c4db33c..04c03a5b43 100644 --- a/src/initialization/MOM_tracer_initialization_from_Z.F90 +++ b/src/initialization/MOM_tracer_initialization_from_Z.F90 @@ -89,6 +89,7 @@ subroutine MOM_initialize_tracer_from_Z(h, tr, G, GV, US, PF, src_file, src_var_ ! answers from 2018, while higher values use more robust ! forms of the same remapping expressions. logical :: hor_regrid_answers_2018 + integer :: default_hor_reg_ans_date ! The default setting for hor_regrid_answer_date integer :: hor_regrid_answer_date ! The vintage of the order of arithmetic and expressions to use ! for horizontal regridding. Values below 20190101 recover the ! answers from 2018, while higher values use expressions that have @@ -141,7 +142,16 @@ subroutine MOM_initialize_tracer_from_Z(h, tr, G, GV, US, PF, src_file, src_var_ "If true, use the order of arithmetic for horizonal regridding that recovers "//& "the answers from the end of 2018. Otherwise, use rotationally symmetric "//& "forms of the same expressions.", default=default_2018_answers) - hor_regrid_answer_date = 20190101 ; if (hor_regrid_answers_2018) hor_regrid_answer_date = 20181231 + ! Revise inconsistent default answer dates for horizontal regridding. + default_hor_reg_ans_date = default_answer_date + if (hor_regrid_answers_2018 .and. (default_hor_reg_ans_date >= 20190101)) default_hor_reg_ans_date = 20181231 + if (.not.hor_regrid_answers_2018 .and. (default_hor_reg_ans_date < 20190101)) default_hor_reg_ans_date = 20190101 + call get_param(PF, mdl, "HOR_REGRID_ANSWER_DATE", hor_regrid_answer_date, & + "The vintage of the order of arithmetic for horizontal regridding. "//& + "Dates before 20190101 give the same answers as the code did in late 2018, "//& + "while later versions add parentheses for rotational symmetry. "//& + "If both HOR_REGRID_2018_ANSWERS and HOR_REGRID_ANSWER_DATE are specified, the "//& + "latter takes precedence.", default=default_hor_reg_ans_date) ! These are model grid properties, but being applied to the data grid for now. ! need to revisit this (mjh) diff --git a/src/parameterizations/vertical/MOM_ALE_sponge.F90 b/src/parameterizations/vertical/MOM_ALE_sponge.F90 index 8b35f3f1e1..1631a76dd6 100644 --- a/src/parameterizations/vertical/MOM_ALE_sponge.F90 +++ b/src/parameterizations/vertical/MOM_ALE_sponge.F90 @@ -184,6 +184,7 @@ subroutine initialize_ALE_sponge_fixed(Iresttime, G, GV, param_file, CS, data_h, logical :: hor_regrid_answers_2018 ! If true, use the order of arithmetic for horizontal regridding ! that recovers the answers from the end of 2018. Otherwise, use ! rotationally symmetric forms of the same expressions. + integer :: default_hor_reg_ans_date ! The default setting for hor_regrid_answer_date integer :: i, j, k, col, total_sponge_cols, total_sponge_cols_u, total_sponge_cols_v if (associated(CS)) then @@ -243,7 +244,16 @@ subroutine initialize_ALE_sponge_fixed(Iresttime, G, GV, param_file, CS, data_h, "If true, use the order of arithmetic for horizontal regridding that recovers "//& "the answers from the end of 2018. Otherwise, use rotationally symmetric "//& "forms of the same expressions.", default=default_2018_answers) - CS%hor_regrid_answer_date = 20190101 ; if (hor_regrid_answers_2018) CS%hor_regrid_answer_date = 20181231 + ! Revise inconsistent default answer dates for horizontal regridding. + default_hor_reg_ans_date = default_answer_date + if (hor_regrid_answers_2018 .and. (default_hor_reg_ans_date >= 20190101)) default_hor_reg_ans_date = 20181231 + if (.not.hor_regrid_answers_2018 .and. (default_hor_reg_ans_date < 20190101)) default_hor_reg_ans_date = 20190101 + call get_param(param_file, mdl, "HOR_REGRID_ANSWER_DATE", CS%hor_regrid_answer_date, & + "The vintage of the order of arithmetic for horizontal regridding. "//& + "Dates before 20190101 give the same answers as the code did in late 2018, "//& + "while later versions add parentheses for rotational symmetry. "//& + "If both HOR_REGRID_2018_ANSWERS and HOR_REGRID_ANSWER_DATE are specified, the "//& + "latter takes precedence.", default=default_hor_reg_ans_date) call get_param(param_file, mdl, "REENTRANT_X", CS%reentrant_x, & "If true, the domain is zonally reentrant.", default=.true.) call get_param(param_file, mdl, "TRIPOLAR_N", CS%tripolar_N, & @@ -468,6 +478,7 @@ subroutine initialize_ALE_sponge_varying(Iresttime, G, GV, param_file, CS, Irest logical :: hor_regrid_answers_2018 ! If true, use the order of arithmetic for horizontal regridding ! that recovers the answers from the end of 2018. Otherwise, use ! rotationally symmetric forms of the same expressions. + integer :: default_hor_reg_ans_date ! The default setting for hor_regrid_answer_date integer :: i, j, col, total_sponge_cols, total_sponge_cols_u, total_sponge_cols_v if (associated(CS)) then @@ -523,7 +534,16 @@ subroutine initialize_ALE_sponge_varying(Iresttime, G, GV, param_file, CS, Irest "returned in certain cases. Otherwise, use rotationally symmetric "//& "forms of the same expressions and initialize the mask properly.", & default=default_2018_answers) - CS%hor_regrid_answer_date = 20190101 ; if (hor_regrid_answers_2018) CS%hor_regrid_answer_date = 20181231 + ! Revise inconsistent default answer dates for horizontal regridding. + default_hor_reg_ans_date = default_answer_date + if (hor_regrid_answers_2018 .and. (default_hor_reg_ans_date >= 20190101)) default_hor_reg_ans_date = 20181231 + if (.not.hor_regrid_answers_2018 .and. (default_hor_reg_ans_date < 20190101)) default_hor_reg_ans_date = 20190101 + call get_param(param_file, mdl, "HOR_REGRID_ANSWER_DATE", CS%hor_regrid_answer_date, & + "The vintage of the order of arithmetic for horizontal regridding. "//& + "Dates before 20190101 give the same answers as the code did in late 2018, "//& + "while later versions add parentheses for rotational symmetry. "//& + "If both HOR_REGRID_2018_ANSWERS and HOR_REGRID_ANSWER_DATE are specified, the "//& + "latter takes precedence.", default=default_hor_reg_ans_date) call get_param(param_file, mdl, "SPONGE_DATA_ONGRID", CS%spongeDataOngrid, & "When defined, the incoming sponge data are "//& "assumed to be on the model grid " , & From 83acd4381e49f36f66a5d1ac6f03d216153dd2c5 Mon Sep 17 00:00:00 2001 From: Robert Hallberg Date: Sat, 30 Jul 2022 14:25:54 -0400 Subject: [PATCH 25/40] +Added 9 ..._ANSWER_DATE runtime parameters Added 9 ..._ANSWER_DATE runtime parameters controlling the expressions and order of arithmetic in the parameterizations modules, which take precedence over their older ..._ANSWERS_2018 counterparts. The new runtime parameters are HOR_VISC_ANSWER_DATE, MEKE_GEOMETRIC_ANSWER_DATE, EPBL_ANSWER_DATE, OPTICS_ANSWER_DATE, REGULARIZE_LAYERS_ANSWER_DATE, SET_DIFF_ANSWER_DATE, SET_VISC_ANSWER_DATE, TIDAL_MIXING_ANSWER_DATE and VERT_FRICTION_ANSWER_DATE. All answers are bitwise identical, but there are numerous new entries in the MOM_parameter_doc.all files. --- .../lateral/MOM_hor_visc.F90 | 32 +++++++++++--- .../lateral/MOM_thickness_diffuse.F90 | 29 ++++++++++--- .../vertical/MOM_energetic_PBL.F90 | 39 ++++++++++++----- .../vertical/MOM_opacity.F90 | 38 +++++++++++----- .../vertical/MOM_regularize_layers.F90 | 34 +++++++++++---- .../vertical/MOM_set_diffusivity.F90 | 35 +++++++++++---- .../vertical/MOM_set_viscosity.F90 | 29 ++++++++++--- .../vertical/MOM_tidal_mixing.F90 | 43 +++++++++++++------ .../vertical/MOM_vert_friction.F90 | 38 ++++++++++++---- 9 files changed, 237 insertions(+), 80 deletions(-) diff --git a/src/parameterizations/lateral/MOM_hor_visc.F90 b/src/parameterizations/lateral/MOM_hor_visc.F90 index f7235998a6..4339a699e5 100644 --- a/src/parameterizations/lateral/MOM_hor_visc.F90 +++ b/src/parameterizations/lateral/MOM_hor_visc.F90 @@ -92,9 +92,10 @@ module MOM_hor_visc logical :: res_scale_MEKE !< If true, the viscosity contribution from MEKE is scaled by !! the resolution function. logical :: use_GME !< If true, use GME backscatter scheme. - logical :: answers_2018 !< If true, use the order of arithmetic and expressions that recover the - !! answers from the end of 2018. Otherwise, use updated and more robust - !! forms of the same expressions. + integer :: answer_date !< The vintage of the order of arithmetic and expressions in the + !! horizontal viscosity calculations. Values below 20190101 recover + !! the answers from the end of 2018, while higher values use updated + !! and more robust forms of the same expressions. real :: GME_h0 !< The strength of GME tapers quadratically to zero when the bathymetric !! depth is shallower than GME_H0 [Z ~> m] real :: GME_efficiency !< The nondimensional prefactor multiplying the GME coefficient [nondim] @@ -1549,7 +1550,7 @@ subroutine horizontal_viscosity(u, v, h, diffu, diffv, MEKE, VarMix, G, GV, US, Shear_mag_bc = sqrt(sh_xx(i,j) * sh_xx(i,j) + & 0.25*((sh_xy(I-1,J-1)*sh_xy(I-1,J-1) + sh_xy(I,J)*sh_xy(I,J)) + & (sh_xy(I-1,J)*sh_xy(I-1,J) + sh_xy(I,J-1)*sh_xy(I,J-1)))) - if (CS%answers_2018) then + if (CS%answer_date > 20190101) then FatH = (US%s_to_T*FatH)**MEKE%backscatter_Ro_pow ! f^n ! Note the hard-coded dimensional constant in the following line that can not ! be rescaled for dimensional consistency. @@ -1724,7 +1725,11 @@ subroutine hor_visc_init(Time, G, GV, US, param_file, diag, CS, ADp) logical :: split ! If true, use the split time stepping scheme. ! If false and USE_GME = True, issue a FATAL error. logical :: use_MEKE ! If true, the MEKE parameterization is in use. - logical :: default_2018_answers + logical :: answers_2018 ! If true, use the order of arithmetic and expressions that recover the + ! answers from the end of 2018. Otherwise, use updated and more robust + ! forms of the same expressions. + integer :: default_answer_date ! The default setting for the various ANSWER_DATE flags + logical :: default_2018_answers ! The default setting for the various 2018_ANSWERS flags character(len=64) :: inputdir, filename real :: deg2rad ! Converts degrees to radians real :: slat_fn ! sin(lat)**Kh_pwr_of_sine @@ -1748,13 +1753,26 @@ subroutine hor_visc_init(Time, G, GV, US, param_file, diag, CS, ADp) call log_version(param_file, mdl, version, "") ! All parameters are read in all cases to enable parameter spelling checks. + call get_param(param_file, mdl, "DEFAULT_ANSWER_DATE", default_answer_date, & + "This sets the default value for the various _ANSWER_DATE parameters.", & + default=99991231) call get_param(param_file, mdl, "DEFAULT_2018_ANSWERS", default_2018_answers, & "This sets the default value for the various _2018_ANSWERS parameters.", & - default=.false.) - call get_param(param_file, mdl, "HOR_VISC_2018_ANSWERS", CS%answers_2018, & + default=(default_answer_date<20190101)) + call get_param(param_file, mdl, "HOR_VISC_2018_ANSWERS", answers_2018, & "If true, use the order of arithmetic and expressions that recover the "//& "answers from the end of 2018. Otherwise, use updated and more robust "//& "forms of the same expressions.", default=default_2018_answers) + ! Revise inconsistent default answer dates for horizontal viscosity. + if (answers_2018 .and. (default_answer_date >= 20190101)) default_answer_date = 20181231 + if (.not.answers_2018 .and. (default_answer_date < 20190101)) default_answer_date = 20190101 + call get_param(param_file, mdl, "HOR_VISC_ANSWER_DATE", CS%answer_date, & + "The vintage of the order of arithmetic and expressions in the horizontal "//& + "viscosity calculations. Values below 20190101 recover the answers from the "//& + "end of 2018, while higher values use updated and more robust forms of the "//& + "same expressions. If both HOR_VISC_2018_ANSWERS and HOR_VISC_ANSWER_DATE are "//& + "specified, the latter takes precedence.", default=default_answer_date) + call get_param(param_file, mdl, "DEBUG", CS%debug, default=.false.) call get_param(param_file, mdl, "LAPLACIAN", CS%Laplacian, & "If true, use a Laplacian horizontal viscosity.", & diff --git a/src/parameterizations/lateral/MOM_thickness_diffuse.F90 b/src/parameterizations/lateral/MOM_thickness_diffuse.F90 index a058d536d3..3cab1030da 100644 --- a/src/parameterizations/lateral/MOM_thickness_diffuse.F90 +++ b/src/parameterizations/lateral/MOM_thickness_diffuse.F90 @@ -72,9 +72,10 @@ module MOM_thickness_diffuse !! the GEOMETRIC thickness diffusion [nondim] real :: MEKE_GEOMETRIC_epsilon !< Minimum Eady growth rate for the GEOMETRIC thickness !! diffusivity [T-1 ~> s-1]. - logical :: MEKE_GEOM_answers_2018 !< If true, use expressions in the MEKE_GEOMETRIC calculation - !! that recover the answers from the original implementation. - !! Otherwise, use expressions that satisfy rotational symmetry. + integer :: MEKE_GEOM_answer_date !< The vintage of the expressions in the MEKE_GEOMETRIC + !! calculation. Values below 20190101 recover the answers from the + !! original implementation, while higher values use expressions that + !! satisfy rotational symmetry. logical :: Use_KH_in_MEKE !< If true, uses the thickness diffusivity calculated here to diffuse MEKE. logical :: GM_src_alt !< If true, use the GM energy conversion form S^2*N^2*kappa rather !! than the streamfunction for the GM source term. @@ -392,7 +393,7 @@ subroutine thickness_diffuse(h, uhtr, vhtr, tv, dt, G, GV, US, MEKE, VarMix, CDp if (allocated(MEKE%Kh)) then if (CS%MEKE_GEOMETRIC) then - if (CS%MEKE_GEOM_answers_2018) then + if (CS%MEKE_GEOM_answer_date < 20190101) then !$OMP do do j=js,je ; do I=is,ie ! This does not give bitwise rotational symmetry. @@ -1950,7 +1951,11 @@ subroutine thickness_diffuse_init(Time, G, GV, US, param_file, diag, CDp, CS) real :: strat_floor ! A floor for buoyancy frequency in the Ferrari et al. 2010, ! streamfunction formulation, expressed as a fraction of planetary ! rotation [nondim]. + integer :: default_answer_date ! The default setting for the various ANSWER_DATE flags. logical :: default_2018_answers ! The default setting for the various 2018_ANSWERS flags. + logical :: MEKE_GEOM_answers_2018 ! If true, use expressions in the MEKE_GEOMETRIC calculation + ! that recover the answers from the original implementation. + ! Otherwise, use expressions that satisfy rotational symmetry. integer :: i, j CS%initialized = .true. @@ -2068,13 +2073,25 @@ subroutine thickness_diffuse_init(Time, G, GV, US, param_file, diag, CDp, CS) "The nondimensional coefficient governing the efficiency of the GEOMETRIC "//& "thickness diffusion.", units="nondim", default=0.05) + call get_param(param_file, mdl, "DEFAULT_ANSWER_DATE", default_answer_date, & + "This sets the default value for the various _ANSWER_DATE parameters.", & + default=99991231) call get_param(param_file, mdl, "DEFAULT_2018_ANSWERS", default_2018_answers, & "This sets the default value for the various _2018_ANSWERS parameters.", & - default=.false.) - call get_param(param_file, mdl, "MEKE_GEOMETRIC_2018_ANSWERS", CS%MEKE_GEOM_answers_2018, & + default=(default_answer_date<20190101)) + call get_param(param_file, mdl, "MEKE_GEOMETRIC_2018_ANSWERS", MEKE_GEOM_answers_2018, & "If true, use expressions in the MEKE_GEOMETRIC calculation that recover the "//& "answers from the original implementation. Otherwise, use expressions that "//& "satisfy rotational symmetry.", default=default_2018_answers) + ! Revise inconsistent default answer dates for MEKE_geometric. + if (MEKE_GEOM_answers_2018 .and. (default_answer_date >= 20190101)) default_answer_date = 20181231 + if (.not.MEKE_GEOM_answers_2018 .and. (default_answer_date < 20190101)) default_answer_date = 20190101 + call get_param(param_file, mdl, "MEKE_GEOMETRIC_ANSWER_DATE", CS%MEKE_GEOM_answer_date, & + "The vintage of the expressions in the MEKE_GEOMETRIC calculation. "//& + "Values below 20190101 recover the answers from the original implementation, "//& + "while higher values use expressions that satisfy rotational symmetry. "//& + "If both MEKE_GEOMETRIC_2018_ANSWERS and MEKE_GEOMETRIC_ANSWER_DATE are "//& + "specified, the latter takes precedence.", default=default_answer_date) endif call get_param(param_file, mdl, "USE_KH_IN_MEKE", CS%Use_KH_in_MEKE, & diff --git a/src/parameterizations/vertical/MOM_energetic_PBL.F90 b/src/parameterizations/vertical/MOM_energetic_PBL.F90 index bb4b4a2f36..0e090b12e3 100644 --- a/src/parameterizations/vertical/MOM_energetic_PBL.F90 +++ b/src/parameterizations/vertical/MOM_energetic_PBL.F90 @@ -158,9 +158,10 @@ module MOM_energetic_PBL type(time_type), pointer :: Time=>NULL() !< A pointer to the ocean model's clock. logical :: TKE_diagnostics = .false. !< If true, diagnostics of the TKE budget are being calculated. - logical :: answers_2018 !< If true, use the order of arithmetic and expressions that recover the - !! answers from the end of 2018. Otherwise, use updated and more robust - !! forms of the same expressions. + integer :: answer_date !< The vintage of the order of arithmetic and expressions in the ePBL + !! calculations. Values below 20190101 recover the answers from the + !! end of 2018, while higher values use updated and more robust forms + !! of the same expressions. logical :: orig_PE_calc !< If true, the ePBL code uses the original form of the !! potential energy change code. Otherwise, it uses a newer version !! that can work with successive increments to the diffusivity in @@ -828,7 +829,7 @@ subroutine ePBL_column(h, u, v, T0, S0, dSV_dT, dSV_dS, TKE_forcing, B_flux, abs endif !/ Apply MStar to get mech_TKE - if ((CS%answers_2018) .and. (CS%mstar_scheme==Use_Fixed_MStar)) then + if ((CS%answer_date < 20190101) .and. (CS%mstar_scheme==Use_Fixed_MStar)) then mech_TKE = (dt*MSTAR_total*GV%Rho0) * u_star**3 else mech_TKE = MSTAR_total * (dt*GV%Rho0* u_star**3) @@ -1760,7 +1761,7 @@ subroutine find_mstar(CS, US, Buoyancy_Flux, UStar, UStar_Mean,& !/ 1. Get mstar elseif (CS%mstar_scheme == MStar_from_Ekman) then - if (CS%answers_2018) then + if (CS%answer_date < 20190101) then ! The limit for the balance of rotation and stabilizing is f(L_Ekman,L_Obukhov) MStar_S = CS%MStar_coef*sqrt(max(0.0,Buoyancy_Flux) / UStar**2 / & (Abs_Coriolis + 1.e-10*US%T_to_s) ) @@ -1778,7 +1779,7 @@ subroutine find_mstar(CS, US, Buoyancy_Flux, UStar, UStar_Mean,& MStar = max(MStar_S, min(1.25, MStar_N)) if (CS%MStar_Cap > 0.0) MStar = min( CS%MStar_Cap,MStar ) elseif ( CS%mstar_scheme == MStar_from_RH18 ) then - if (CS%answers_2018) then + if (CS%answer_date < 20190101) then MStar_N = CS%RH18_MStar_cn1 * ( 1.0 - 1.0 / ( 1. + CS%RH18_MStar_cn2 * & exp( CS%RH18_mstar_CN3 * BLD * Abs_Coriolis / UStar) ) ) else @@ -1791,7 +1792,7 @@ subroutine find_mstar(CS, US, Buoyancy_Flux, UStar, UStar_Mean,& endif !/ 2. Adjust mstar to account for convective turbulence - if (CS%answers_2018) then + if (CS%answer_date < 20190101) then MStar_Conv_Red = 1. - CS%MStar_Convect_coef * (-min(0.0,Buoyancy_Flux) + 1.e-10*US%T_to_s**3*US%m_to_Z**2) / & ( (-min(0.0,Buoyancy_Flux) + 1.e-10*US%T_to_s**3*US%m_to_Z**2) + & 2.0 *MStar * UStar**3 / BLD ) @@ -1851,7 +1852,7 @@ subroutine Mstar_Langmuir(CS, US, Abs_Coriolis, Buoyancy_Flux, UStar, BLD, Langm if (CS%LT_Enhance_Form /= No_Langmuir) then ! a. Get parameters for modified LA - if (CS%answers_2018) then + if (CS%answer_date < 20190101) then iL_Ekman = Abs_Coriolis / Ustar iL_Obukhov = Buoyancy_Flux*CS%vonkar / Ustar**3 Ekman_Obukhov_stab = abs(max(0., iL_Obukhov / (iL_Ekman + 1.e-10*US%Z_to_m))) @@ -1942,7 +1943,11 @@ subroutine energetic_PBL_init(Time, G, GV, US, param_file, diag, CS) real :: omega_frac_dflt integer :: isd, ied, jsd, jed integer :: mstar_mode, LT_enhance, wT_mode - logical :: default_2018_answers + integer :: default_answer_date ! The default setting for the various ANSWER_DATE flags. + logical :: default_2018_answers ! The default setting for the various 2018_ANSWERS flags. + logical :: answers_2018 ! If true, use the order of arithmetic and expressions that recover the + ! answers from the end of 2018. Otherwise, use updated and more robust + ! forms of the same expressions. logical :: use_temperature, use_omega logical :: use_la_windsea isd = G%isd ; ied = G%ied ; jsd = G%jsd ; jed = G%jed @@ -1977,13 +1982,25 @@ subroutine energetic_PBL_init(Time, G, GV, US, param_file, diag, CS) "A nondimensional scaling factor controlling the inhibition "//& "of the diffusive length scale by rotation. Making this larger "//& "decreases the PBL diffusivity.", units="nondim", default=1.0) + call get_param(param_file, mdl, "DEFAULT_ANSWER_DATE", default_answer_date, & + "This sets the default value for the various _ANSWER_DATE parameters.", & + default=99991231) call get_param(param_file, mdl, "DEFAULT_2018_ANSWERS", default_2018_answers, & "This sets the default value for the various _2018_ANSWERS parameters.", & - default=.false.) - call get_param(param_file, mdl, "EPBL_2018_ANSWERS", CS%answers_2018, & + default=(default_answer_date<20190101)) + call get_param(param_file, mdl, "EPBL_2018_ANSWERS", answers_2018, & "If true, use the order of arithmetic and expressions that recover the "//& "answers from the end of 2018. Otherwise, use updated and more robust "//& "forms of the same expressions.", default=default_2018_answers) + ! Revise inconsistent default answer dates for horizontal viscosity. + if (answers_2018 .and. (default_answer_date >= 20190101)) default_answer_date = 20181231 + if (.not.answers_2018 .and. (default_answer_date < 20190101)) default_answer_date = 20190101 + call get_param(param_file, mdl, "EPBL_ANSWER_DATE", CS%answer_date, & + "The vintage of the order of arithmetic and expressions in the energetic "//& + "PBL calculations. Values below 20190101 recover the answers from the "//& + "end of 2018, while higher values use updated and more robust forms of the "//& + "same expressions. If both EPBL_2018_ANSWERS and EPBL_ANSWER_DATE are "//& + "specified, the latter takes precedence.", default=default_answer_date) call get_param(param_file, mdl, "EPBL_ORIGINAL_PE_CALC", CS%orig_PE_calc, & diff --git a/src/parameterizations/vertical/MOM_opacity.F90 b/src/parameterizations/vertical/MOM_opacity.F90 index 7f9f61a1dc..ccedb5c607 100644 --- a/src/parameterizations/vertical/MOM_opacity.F90 +++ b/src/parameterizations/vertical/MOM_opacity.F90 @@ -41,9 +41,10 @@ module MOM_opacity !! sufficiently thick layer [H degC T-1 ~> degC m s-1 or degC kg m-2 s-1]. real :: PenSW_absorb_Invlen !< The inverse of the thickness that is used to absorb the remaining !! shortwave heat flux when it drops below PEN_SW_FLUX_ABSORB [H ~> m or kg m-2]. - logical :: answers_2018 !< If true, use the order of arithmetic and expressions that recover the - !! answers from the end of 2018. Otherwise, use updated and more robust - !! forms of the same expressions. + integer :: answer_date !< The vintage of the order of arithmetic and expressions in the optics + !! calculations. Values below 20190101 recover the answers from the + !! end of 2018, while higher values use updated and more robust + !! forms of the same expressions. end type optics_type @@ -631,7 +632,7 @@ subroutine absorbRemainingSW(G, GV, US, h, opacity_band, nsw, optics, j, dt, H_l TKE_calc = (present(TKE) .and. present(dSV_dT)) - if (optics%answers_2018) then + if (optics%answer_date < 20190101) then g_Hconv2 = (US%L_to_Z**2*GV%g_Earth * GV%H_to_RZ) * GV%H_to_RZ else g_Hconv2 = US%L_to_Z**2*GV%g_Earth * GV%H_to_RZ**2 @@ -661,7 +662,7 @@ subroutine absorbRemainingSW(G, GV, US, h, opacity_band, nsw, optics, j, dt, H_l ! Heating at a very small rate can be absorbed by a sufficiently thick layer or several ! thin layers without further penetration. - if (optics%answers_2018) then + if (optics%answer_date < 20190101) then if (nsw*Pen_SW_bnd(n,i)*SW_trans < min_SW_heat*min(1.0, I_Habs*h(i,k)) ) SW_trans = 0.0 elseif ((nsw*Pen_SW_bnd(n,i)*SW_trans < min_SW_heat) .and. (h(i,k) > h_min_heat)) then if (nsw*Pen_SW_bnd(n,i) <= min_SW_heat * (I_Habs*(h(i,k) - h_min_heat))) then @@ -881,7 +882,7 @@ subroutine sumSWoverBands(G, GV, US, h, nsw, optics, j, dt, & ! Heating at a very small rate can be absorbed by a sufficiently thick layer or several ! thin layers without further penetration. - if (optics%answers_2018) then + if (optics%answer_date < 20190101) then if (nsw*Pen_SW_bnd(n,i)*SW_trans < min_SW_heat*min(1.0, I_Habs*h(i,k)) ) SW_trans = 0.0 elseif ((nsw*Pen_SW_bnd(n,i)*SW_trans < min_SW_heat) .and. (h(i,k) > h_min_heat)) then if (nsw*Pen_SW_bnd(n,i) <= min_SW_heat * (I_Habs*(h(i,k) - h_min_heat))) then @@ -958,7 +959,11 @@ subroutine opacity_init(Time, G, GV, US, param_file, diag, CS, optics) real :: PenSW_absorb_minthick ! A thickness that is used to absorb the remaining shortwave heat ! flux when that flux drops below PEN_SW_FLUX_ABSORB [H ~> m or kg m-2] real :: PenSW_minthick_dflt ! The default for PenSW_absorb_minthick [m] - logical :: default_2018_answers + logical :: answers_2018 ! If true, use the order of arithmetic and expressions that recover the + ! answers from the end of 2018. Otherwise, use updated and more robust + ! forms of the same expressions. + integer :: default_answer_date ! The default setting for the various ANSWER_DATE flags + logical :: default_2018_answers ! The default setting for the various 2018_ANSWERS flags integer :: isd, ied, jsd, jed, nz, n isd = G%isd ; ied = G%ied ; jsd = G%jsd ; jed = G%jed ; nz = GV%ke @@ -1056,14 +1061,27 @@ subroutine opacity_init(Time, G, GV, US, param_file, diag, CS, optics) "set_opacity: \Cannot use a single_exp opacity scheme with nbands!=1.") endif + call get_param(param_file, mdl, "DEFAULT_ANSWER_DATE", default_answer_date, & + "This sets the default value for the various _ANSWER_DATE parameters.", & + default=99991231) call get_param(param_file, mdl, "DEFAULT_2018_ANSWERS", default_2018_answers, & "This sets the default value for the various _2018_ANSWERS parameters.", & - default=.false.) - call get_param(param_file, mdl, "OPTICS_2018_ANSWERS", optics%answers_2018, & + default=(default_answer_date<20190101)) + call get_param(param_file, mdl, "OPTICS_2018_ANSWERS", answers_2018, & "If true, use the order of arithmetic and expressions that recover the "//& "answers from the end of 2018. Otherwise, use updated expressions for "//& "handling the absorption of small remaining shortwave fluxes.", & default=default_2018_answers) + ! Revise inconsistent default answer dates for optics. + if (answers_2018 .and. (default_answer_date >= 20190101)) default_answer_date = 20181231 + if (.not.answers_2018 .and. (default_answer_date < 20190101)) default_answer_date = 20190101 + call get_param(param_file, mdl, "OPTICS_ANSWER_DATE", optics%answer_date, & + "The vintage of the order of arithmetic and expressions in the optics calculations. "//& + "Values below 20190101 recover the answers from the end of 2018, while "//& + "higher values use updated and more robust forms of the same expressions. "//& + "If both OPTICS_2018_ANSWERS and OPTICS_ANSWER_DATE are "//& + "specified, the latter takes precedence.", default=default_answer_date) + call get_param(param_file, mdl, "PEN_SW_FLUX_ABSORB", optics%PenSW_flux_absorb, & "A minimum remaining shortwave heating rate that will be simply absorbed in "//& @@ -1072,7 +1090,7 @@ subroutine opacity_init(Time, G, GV, US, param_file, diag, CS, optics) "or 0.08 degC m century-1, but 0 is also a valid value.", & default=2.5e-11, units="degC m s-1", scale=US%degC_to_C*GV%m_to_H*US%T_to_s) - if (optics%answers_2018) then ; PenSW_minthick_dflt = 0.001 ; else ; PenSW_minthick_dflt = 1.0 ; endif + if (optics%answer_date < 20190101) then ; PenSW_minthick_dflt = 0.001 ; else ; PenSW_minthick_dflt = 1.0 ; endif call get_param(param_file, mdl, "PEN_SW_ABSORB_MINTHICK", PenSW_absorb_minthick, & "A thickness that is used to absorb the remaining penetrating shortwave heat "//& "flux when it drops below PEN_SW_FLUX_ABSORB.", & diff --git a/src/parameterizations/vertical/MOM_regularize_layers.F90 b/src/parameterizations/vertical/MOM_regularize_layers.F90 index 3791ad26aa..deb1c90ca9 100644 --- a/src/parameterizations/vertical/MOM_regularize_layers.F90 +++ b/src/parameterizations/vertical/MOM_regularize_layers.F90 @@ -50,9 +50,10 @@ module MOM_regularize_layers type(time_type), pointer :: Time => NULL() !< A pointer to the ocean model's clock. type(diag_ctrl), pointer :: diag => NULL() !< A structure that is used to !! regulate the timing of diagnostic output. - logical :: answers_2018 !< If true, use the order of arithmetic and expressions that recover the - !! answers from the end of 2018. Otherwise, use updated and more robust - !! forms of the same expressions. + integer :: answer_date !< The vintage of the order of arithmetic and expressions in this module's + !! calculations. Values below 20190101 recover the answers from the + !! end of 2018, while higher values use updated and more robust forms + !! of the same expressions. logical :: debug !< If true, do more thorough checks for debugging purposes. integer :: id_def_rat = -1 !< A diagnostic ID @@ -303,7 +304,7 @@ subroutine regularize_surface(h, tv, dt, ea, eb, G, GV, US, CS) else h_add = e_2d(i,nkmb+1) - e_filt(i,nkmb+1) h_2d(i,k) = h_2d(i,k) - h_add - if (CS%answers_2018) then + if (CS%answer_date < 20190101) then e_2d(i,nkmb+1) = e_2d(i,nkmb+1) - h_add else e_2d(i,nkmb+1) = e_filt(i,nkmb+1) @@ -709,9 +710,13 @@ subroutine regularize_layers_init(Time, G, GV, param_file, diag, CS) !! diagnostic output. type(regularize_layers_CS), intent(inout) :: CS !< Regularize layer control struct -#include "version_variable.h" +# include "version_variable.h" character(len=40) :: mdl = "MOM_regularize_layers" ! This module's name. - logical :: default_2018_answers + integer :: default_answer_date ! The default setting for the various ANSWER_DATE flags + logical :: default_2018_answers ! The default setting for the various 2018_ANSWERS flags + logical :: answers_2018 ! If true, use the order of arithmetic and expressions that recover the + ! answers from the end of 2018. Otherwise, use updated and more robust + ! forms of the same expressions. logical :: just_read integer :: isd, ied, jsd, jed isd = G%isd ; ied = G%ied ; jsd = G%jsd ; jed = G%jed @@ -741,13 +746,26 @@ subroutine regularize_layers_init(Time, G, GV, param_file, diag, CS) "densities during detrainment when regularizing the near-surface layers. The "//& "default of 0.6 gives 20% overlaps in density", & units="nondim", default=0.6, do_not_log=just_read) + call get_param(param_file, mdl, "DEFAULT_ANSWER_DATE", default_answer_date, & + "This sets the default value for the various _ANSWER_DATE parameters.", & + default=99991231, do_not_log=just_read) call get_param(param_file, mdl, "DEFAULT_2018_ANSWERS", default_2018_answers, & "This sets the default value for the various _2018_ANSWERS parameters.", & - default=.false., do_not_log=just_read) - call get_param(param_file, mdl, "REGULARIZE_LAYERS_2018_ANSWERS", CS%answers_2018, & + default=(default_answer_date<20190101), do_not_log=just_read) + call get_param(param_file, mdl, "REGULARIZE_LAYERS_2018_ANSWERS", answers_2018, & "If true, use the order of arithmetic and expressions that recover the answers "//& "from the end of 2018. Otherwise, use updated and more robust forms of the "//& "same expressions.", default=default_2018_answers, do_not_log=just_read) + ! Revise inconsistent default answer dates. + if (answers_2018 .and. (default_answer_date >= 20190101)) default_answer_date = 20181231 + if (.not.answers_2018 .and. (default_answer_date < 20190101)) default_answer_date = 20190101 + call get_param(param_file, mdl, "REGULARIZE_LAYERS_ANSWER_DATE", CS%answer_date, & + "The vintage of the order of arithmetic and expressions in the regularize "//& + "layers calculations. Values below 20190101 recover the answers from the "//& + "end of 2018, while higher values use updated and more robust forms of the "//& + "same expressions. If both REGULARIZE_LAYERS_2018_ANSWERS and "//& + "REGULARIZE_LAYERS_ANSWER_DATE are specified, the latter takes precedence.", & + default=default_answer_date) endif call get_param(param_file, mdl, "HMIX_MIN", CS%Hmix_min, & diff --git a/src/parameterizations/vertical/MOM_set_diffusivity.F90 b/src/parameterizations/vertical/MOM_set_diffusivity.F90 index eff9d7ff72..2e27877350 100644 --- a/src/parameterizations/vertical/MOM_set_diffusivity.F90 +++ b/src/parameterizations/vertical/MOM_set_diffusivity.F90 @@ -151,9 +151,10 @@ module MOM_set_diffusivity real :: Max_salt_diff_salt_fingers !< max salt diffusivity for salt fingers [Z2 T-1 ~> m2 s-1] real :: Kv_molecular !< molecular visc for double diff convect [Z2 T-1 ~> m2 s-1] - logical :: answers_2018 !< If true, use the order of arithmetic and expressions that recover the - !! answers from the end of 2018. Otherwise, use updated and more robust - !! forms of the same expressions. + integer :: answer_date !< The vintage of the order of arithmetic and expressions in this module's + !! calculations. Values below 20190101 recover the answers from the + !! end of 2018, while higher values use updated and more robust forms + !! of the same expressions. character(len=200) :: inputdir !< The directory in which input files are found type(user_change_diff_CS), pointer :: user_change_diff_CSp => NULL() !< Control structure for a child module @@ -286,7 +287,7 @@ subroutine set_diffusivity(u, v, h, u_h, v_h, tv, fluxes, optics, visc, dt, Kd_i if (.not.CS%initialized) call MOM_error(FATAL,"set_diffusivity: "//& "Module must be initialized before it is used.") - if (CS%answers_2018) then + if (CS%answer_date < 20190101) then ! These hard-coded dimensional parameters are being replaced. kappa_dt_fill = US%m_to_Z**2 * 1.e-3 * 7200. else @@ -719,7 +720,7 @@ subroutine find_TKE_to_Kd(h, tv, dRho_int, N2_lay, j, dt, G, GV, US, CS, & Omega2 = CS%omega**2 H_neglect = GV%H_subroundoff G_Rho0 = (US%L_to_Z**2 * GV%g_Earth) / (GV%Rho0) - if (CS%answers_2018) then + if (CS%answer_date < 20190101) then I_Rho0 = 1.0 / (GV%Rho0) G_IRho0 = (US%L_to_Z**2 * GV%g_Earth) * I_Rho0 else @@ -801,7 +802,7 @@ subroutine find_TKE_to_Kd(h, tv, dRho_int, N2_lay, j, dt, G, GV, US, CS, & if (k == kb(i)) then maxEnt(i,kb(i)) = mFkb(i) elseif (k > kb(i)) then - if (CS%answers_2018) then + if (CS%answer_date < 20190101) then maxEnt(i,k) = (1.0/dsp1_ds(i,k))*(maxEnt(i,k-1) + htot(i)) else maxEnt(i,k) = ds_dsp1(i,k)*(maxEnt(i,k-1) + htot(i)) @@ -1981,7 +1982,11 @@ subroutine set_diffusivity_init(Time, G, GV, US, param_file, diag, CS, int_tide_ ! Local variables real :: decay_length logical :: ML_use_omega - logical :: default_2018_answers + integer :: default_answer_date ! The default setting for the various ANSWER_DATE flags. + logical :: default_2018_answers ! The default setting for the various 2018_ANSWERS flags. + logical :: answers_2018 ! If true, use the order of arithmetic and expressions that recover the + ! answers from the end of 2018. Otherwise, use updated and more robust + ! forms of the same expressions. ! This include declares and sets the variable "version". # include "version_variable.h" character(len=40) :: mdl = "MOM_set_diffusivity" ! This module's name. @@ -2029,13 +2034,25 @@ subroutine set_diffusivity_init(Time, G, GV, US, param_file, diag, CS, int_tide_ call get_param(param_file, mdl, "OMEGA", CS%omega, & "The rotation rate of the earth.", units="s-1", default=7.2921e-5, scale=US%T_to_s) + call get_param(param_file, mdl, "DEFAULT_ANSWER_DATE", default_answer_date, & + "This sets the default value for the various _ANSWER_DATE parameters.", & + default=99991231) call get_param(param_file, mdl, "DEFAULT_2018_ANSWERS", default_2018_answers, & "This sets the default value for the various _2018_ANSWERS parameters.", & - default=.false.) - call get_param(param_file, mdl, "SET_DIFF_2018_ANSWERS", CS%answers_2018, & + default=(default_answer_date<20190101)) + call get_param(param_file, mdl, "SET_DIFF_2018_ANSWERS", answers_2018, & "If true, use the order of arithmetic and expressions that recover the "//& "answers from the end of 2018. Otherwise, use updated and more robust "//& "forms of the same expressions.", default=default_2018_answers) + ! Revise inconsistent default answer dates. + if (answers_2018 .and. (default_answer_date >= 20190101)) default_answer_date = 20181231 + if (.not.answers_2018 .and. (default_answer_date < 20190101)) default_answer_date = 20190101 + call get_param(param_file, mdl, "SET_DIFF_ANSWER_DATE", CS%answer_date, & + "The vintage of the order of arithmetic and expressions in the set diffusivity "//& + "calculations. Values below 20190101 recover the answers from the end of 2018, "//& + "while higher values use updated and more robust forms of the same expressions. "//& + "If both SET_DIFF_2018_ANSWERS and SET_DIFF_ANSWER_DATE are specified, the "//& + "latter takes precedence.", default=default_answer_date) ! CS%use_tidal_mixing is set to True if an internal tidal dissipation scheme is to be used. CS%use_tidal_mixing = tidal_mixing_init(Time, G, GV, US, param_file, & diff --git a/src/parameterizations/vertical/MOM_set_viscosity.F90 b/src/parameterizations/vertical/MOM_set_viscosity.F90 index 7c6d96dede..9bd995633f 100644 --- a/src/parameterizations/vertical/MOM_set_viscosity.F90 +++ b/src/parameterizations/vertical/MOM_set_viscosity.F90 @@ -93,8 +93,9 @@ module MOM_set_visc real :: omega_frac !< When setting the decay scale for turbulence, use !! this fraction of the absolute rotation rate blended !! with the local value of f, as sqrt((1-of)*f^2 + of*4*omega^2). - logical :: answers_2018 !< If true, use the order of arithmetic and expressions that recover the - !! answers from the end of 2018. Otherwise, use updated and more robust + integer :: answer_date !< The vintage of the order of arithmetic and expressions in the set + !! viscosity calculations. Values below 20190101 recover the answers + !! from the end of 2018, while higher values use updated and more robust !! forms of the same expressions. logical :: debug !< If true, write verbose checksums for debugging purposes. logical :: BBL_use_tidal_bg !< If true, use a tidal background amplitude for the bottom velocity @@ -867,7 +868,7 @@ subroutine set_viscous_BBL(u, v, h, tv, visc, G, GV, US, CS, pbv) use_L0 = .false. do_one_L_iter = .false. - if (CS%answers_2018) then + if (CS%answer_date < 20190101) then curv_tol = GV%Angstrom_H*dV_dL2**2 & * (0.25 * dV_dL2 * GV%Angstrom_H - a * L0 * dVol) do_one_L_iter = (a * a * dVol**3) < curv_tol @@ -1964,7 +1965,11 @@ subroutine set_visc_init(Time, G, GV, US, param_file, diag, visc, CS, restart_CS ! representation in a restart file to the internal representation in this run. integer :: i, j, k, is, ie, js, je integer :: isd, ied, jsd, jed, IsdB, IedB, JsdB, JedB, nz - logical :: default_2018_answers + integer :: default_answer_date ! The default setting for the various ANSWER_DATE flags. + logical :: default_2018_answers ! The default setting for the various 2018_ANSWERS flags. + logical :: answers_2018 ! If true, use the order of arithmetic and expressions that recover the + ! answers from the end of 2018. Otherwise, use updated and more robust + ! forms of the same expressions. logical :: adiabatic, use_omega, MLE_use_PBL_MLD logical :: use_KPP logical :: use_regridding ! If true, use the ALE algorithm rather than layered @@ -1990,13 +1995,25 @@ subroutine set_visc_init(Time, G, GV, US, param_file, diag, visc, CS, restart_CS CS%RiNo_mix = .false. call get_param(param_file, mdl, "INPUTDIR", CS%inputdir, default=".") CS%inputdir = slasher(CS%inputdir) + call get_param(param_file, mdl, "DEFAULT_ANSWER_DATE", default_answer_date, & + "This sets the default value for the various _ANSWER_DATE parameters.", & + default=99991231) call get_param(param_file, mdl, "DEFAULT_2018_ANSWERS", default_2018_answers, & "This sets the default value for the various _2018_ANSWERS parameters.", & - default=.false.) - call get_param(param_file, mdl, "SET_VISC_2018_ANSWERS", CS%answers_2018, & + default=(default_answer_date<20190101)) + call get_param(param_file, mdl, "SET_VISC_2018_ANSWERS", answers_2018, & "If true, use the order of arithmetic and expressions that recover the "//& "answers from the end of 2018. Otherwise, use updated and more robust "//& "forms of the same expressions.", default=default_2018_answers) + ! Revise inconsistent default answer dates. + if (answers_2018 .and. (default_answer_date >= 20190101)) default_answer_date = 20181231 + if (.not.answers_2018 .and. (default_answer_date < 20190101)) default_answer_date = 20190101 + call get_param(param_file, mdl, "SET_VISC_ANSWER_DATE", CS%answer_date, & + "The vintage of the order of arithmetic and expressions in the set viscosity "//& + "calculations. Values below 20190101 recover the answers from the end of 2018, "//& + "while higher values use updated and more robust forms of the same expressions. "//& + "If both SET_VISC_2018_ANSWERS and SET_VISC_ANSWER_DATE are specified, "//& + "the latter takes precedence.", default=default_answer_date) call get_param(param_file, mdl, "BOTTOMDRAGLAW", CS%bottomdraglaw, & "If true, the bottom stress is calculated with a drag "//& "law of the form c_drag*|u|*u. The velocity magnitude "//& diff --git a/src/parameterizations/vertical/MOM_tidal_mixing.F90 b/src/parameterizations/vertical/MOM_tidal_mixing.F90 index 1d74b104d7..645a6ef491 100644 --- a/src/parameterizations/vertical/MOM_tidal_mixing.F90 +++ b/src/parameterizations/vertical/MOM_tidal_mixing.F90 @@ -139,11 +139,14 @@ module MOM_tidal_mixing real :: tidal_diss_lim_tc !< CVMix-specific dissipation limit depth for !! tidal-energy-constituent data [Z ~> m]. type(remapping_CS) :: remap_CS !< The control structure for remapping - integer :: remap_answer_date = 20181231 !< The vintage of the order of arithmetic and expressions to use - !! for remapping. Values below 20190101 recover the remapping - !! answers from 2018, while higher values use more robust - !! forms of the same remapping expressions. - !### Change to 99991231? + integer :: remap_answer_date !< The vintage of the order of arithmetic and expressions to use + !! for remapping. Values below 20190101 recover the remapping + !! answers from 2018, while higher values use more robust + !! forms of the same remapping expressions. + integer :: tidal_answer_date !< The vintage of the order of arithmetic and expressions in the tidal + !! mixing calculations. Values below 20190101 recover the answers + !! from the end of 2018, while higher values use updated and more robust + !! forms of the same expressions. type(int_tide_CS), pointer :: int_tide_CSp=> NULL() !< Control structure for a child module @@ -163,9 +166,6 @@ module MOM_tidal_mixing !! TODO: make this E(x,y) only real, allocatable :: tidal_qe_3d_in(:,:,:) !< q*E(x,y,z) with the Schmittner parameterization [W m-3?] - logical :: answers_2018 !< If true, use the order of arithmetic and expressions that recover the - !! answers from the end of 2018. Otherwise, use updated and more robust - !! forms of the same expressions. ! Diagnostics type(diag_ctrl), pointer :: diag => NULL() !< structure to regulate diagnostic output timing @@ -230,6 +230,10 @@ logical function tidal_mixing_init(Time, G, GV, US, param_file, int_tide_CSp, di ! recover the remapping answers from 2018. If false, use more ! robust forms of the same remapping expressions. integer :: default_remap_ans_date ! The default setting for remap_answer_date + integer :: default_tide_ans_date ! The default setting for tides_answer_date + logical :: tide_answers_2018 ! If true, use the order of arithmetic and expressions that recover the + ! answers from the end of 2018. Otherwise, use updated and more robust + ! forms of the same expressions. character(len=20) :: tmpstr, int_tide_profile_str character(len=20) :: CVMix_tidal_scheme_str, tidal_energy_type character(len=200) :: filename, h2_file, Niku_TKE_input_file @@ -284,10 +288,21 @@ logical function tidal_mixing_init(Time, G, GV, US, param_file, int_tide_CSp, di call get_param(param_file, mdl, "DEFAULT_2018_ANSWERS", default_2018_answers, & "This sets the default value for the various _2018_ANSWERS parameters.", & default=(default_answer_date<20190101)) - call get_param(param_file, mdl, "TIDAL_MIXING_2018_ANSWERS", CS%answers_2018, & + call get_param(param_file, mdl, "TIDAL_MIXING_2018_ANSWERS", tide_answers_2018, & "If true, use the order of arithmetic and expressions that recover the "//& "answers from the end of 2018. Otherwise, use updated and more robust "//& "forms of the same expressions.", default=default_2018_answers) + ! Revise inconsistent default answer dates for the tidal mixing. + default_tide_ans_date = default_answer_date + if (tide_answers_2018 .and. (default_tide_ans_date >= 20190101)) default_tide_ans_date = 20181231 + if (.not.tide_answers_2018 .and. (default_tide_ans_date < 20190101)) default_tide_ans_date = 20190101 + call get_param(param_file, mdl, "TIDAL_MIXING_ANSWER_DATE", CS%tidal_answer_date, & + "The vintage of the order of arithmetic and expressions in the tidal mixing "//& + "calculations. Values below 20190101 recover the answers from the end of 2018, "//& + "while higher values use updated and more robust forms of the same expressions. "//& + "If both TIDAL_MIXING_2018_ANSWERS and TIDAL_MIXING_ANSWER_DATE are specified, "//& + "the latter takes precedence.", default=default_tide_ans_date) + call get_param(param_file, mdl, "REMAPPING_2018_ANSWERS", remap_answers_2018, & "If true, use the order of arithmetic and expressions that recover the "//& "answers from the end of 2018. Otherwise, use updated and more robust "//& @@ -502,7 +517,7 @@ logical function tidal_mixing_init(Time, G, GV, US, param_file, int_tide_CSp, di CS%tideamp(i,j) = CS%tideamp(i,j) * CS%mask_itidal(i,j) * G%mask2dT(i,j) ! Restrict rms topo to a fraction (often 10 percent) of the column depth. - if (CS%answers_2018 .and. (max_frac_rough >= 0.0)) then + if ((CS%tidal_answer_date < 20190101) .and. (max_frac_rough >= 0.0)) then hamp = min(max_frac_rough*(G%bathyT(i,j)+G%Z_ref), sqrt(CS%h2(i,j))) CS%h2(i,j) = hamp*hamp else @@ -1121,7 +1136,7 @@ subroutine add_int_tide_diffusivity(h, j, N2_bot, N2_lay, TKE_to_Kd, max_TKE, & do i=is,ie CS%Nb(i,j) = sqrt(N2_bot(i)) - if (CS%answers_2018) then + if (CS%tidal_answer_date < 20190101) then if ((CS%tideamp(i,j) > 0.0) .and. & (CS%kappa_itides**2 * CS%h2(i,j) * CS%Nb(i,j)**3 > 1.0e-14*US%T_to_s**3) ) then z0_polzin(i) = CS%Polzin_decay_scale_factor * CS%Nu_Polzin * & @@ -1166,7 +1181,7 @@ subroutine add_int_tide_diffusivity(h, j, N2_bot, N2_lay, TKE_to_Kd, max_TKE, & if (allocated(CS%dd%N2_bot)) & CS%dd%N2_bot(i,j) = CS%Nb(i,j)*CS%Nb(i,j) - if (CS%answers_2018) then + if (CS%tidal_answer_date < 20190101) then ! These expressions use dimensional constants to avoid NaN values. if ( CS%Int_tide_dissipation .and. (CS%int_tide_profile == POLZIN_09) ) then if (htot_WKB(i) > 1.0e-14*US%m_to_Z) & @@ -1199,7 +1214,7 @@ subroutine add_int_tide_diffusivity(h, j, N2_bot, N2_lay, TKE_to_Kd, max_TKE, & z_from_bot(i) = GV%H_to_Z*h(i,j,nz) ! Use the new formulation for WKB scaling. N2 is referenced to its vertical mean. - if (CS%answers_2018) then + if (CS%tidal_answer_date < 20190101) then if (N2_meanz(i) > 1.0e-14*US%T_to_s**2 ) then z_from_bot_WKB(i) = GV%H_to_Z*h(i,j,nz) * N2_lay(i,nz) / N2_meanz(i) else ; z_from_bot_WKB(i) = 0 ; endif @@ -1331,7 +1346,7 @@ subroutine add_int_tide_diffusivity(h, j, N2_bot, N2_lay, TKE_to_Kd, max_TKE, & do k=nz-1,2,-1 ; do i=is,ie if (max_TKE(i,k) <= 0.0) cycle z_from_bot(i) = z_from_bot(i) + GV%H_to_Z*h(i,j,k) - if (CS%answers_2018) then + if (CS%tidal_answer_date < 20190101) then if (N2_meanz(i) > 1.0e-14*US%T_to_s**2 ) then z_from_bot_WKB(i) = z_from_bot_WKB(i) & + GV%H_to_Z * h(i,j,k) * N2_lay(i,k) / N2_meanz(i) diff --git a/src/parameterizations/vertical/MOM_vert_friction.F90 b/src/parameterizations/vertical/MOM_vert_friction.F90 index 855d563efc..21ae10fef2 100644 --- a/src/parameterizations/vertical/MOM_vert_friction.F90 +++ b/src/parameterizations/vertical/MOM_vert_friction.F90 @@ -100,9 +100,10 @@ module MOM_vert_friction !! calculation, perhaps based on a bulk Richardson !! number criterion, to determine the mixed layer !! thickness for viscosity. - logical :: answers_2018 !< If true, use the order of arithmetic and expressions that recover the - !! answers from the end of 2018. Otherwise, use expressions that do not - !! use an arbitrary and hard-coded maximum viscous coupling coefficient + integer :: answer_date !< The vintage of the order of arithmetic and expressions in the viscous + !! calculations. Values below 20190101 recover the answers from the end + !! of 2018, while higher values use expressions that do not use an + !! arbitrary and hard-coded maximum viscous coupling coefficient !! between layers. logical :: debug !< If true, write verbose checksums for debugging purposes. integer :: nkml !< The number of layers in the mixed layer. @@ -1192,7 +1193,7 @@ subroutine find_coupling_coef(a_cpl, hvel, do_i, h_harm, bbl_thick, kv_bbl, z_i, nz = GV%ke h_neglect = GV%H_subroundoff - if (CS%answers_2018) then + if (CS%answer_date < 20190101) then ! The maximum coupling coefficient was originally introduced to avoid ! truncation error problems in the tridiagonal solver. Effectively, the 1e-10 ! sets the maximum coupling coefficient increment to 1e10 m per timestep. @@ -1626,10 +1627,15 @@ subroutine vertvisc_init(MIS, Time, G, GV, US, param_file, diag, ADp, dirs, & real :: Kv_dflt ! A default viscosity [m2 s-1]. real :: Hmix_m ! A boundary layer thickness [m]. - logical :: default_2018_answers + integer :: default_answer_date ! The default setting for the various ANSWER_DATE flags. + logical :: default_2018_answers ! The default setting for the various 2018_ANSWERS flags. + logical :: answers_2018 !< If true, use the order of arithmetic and expressions that recover the + !! answers from the end of 2018. Otherwise, use expressions that do not + !! use an arbitrary and hard-coded maximum viscous coupling coefficient + !! between layers. integer :: isd, ied, jsd, jed, IsdB, IedB, JsdB, JedB, nz -! This include declares and sets the variable "version". -#include "version_variable.h" + ! This include declares and sets the variable "version". +# include "version_variable.h" character(len=40) :: mdl = "MOM_vert_friction" ! This module's name. character(len=40) :: thickness_units @@ -1652,14 +1658,28 @@ subroutine vertvisc_init(MIS, Time, G, GV, US, param_file, diag, ADp, dirs, & ! Default, read and log parameters call log_version(param_file, mdl, version, "", log_to_all=.true., debugging=.true.) + call get_param(param_file, mdl, "DEFAULT_ANSWER_DATE", default_answer_date, & + "This sets the default value for the various _ANSWER_DATE parameters.", & + default=99991231) call get_param(param_file, mdl, "DEFAULT_2018_ANSWERS", default_2018_answers, & "This sets the default value for the various _2018_ANSWERS parameters.", & - default=.false.) - call get_param(param_file, mdl, "VERT_FRICTION_2018_ANSWERS", CS%answers_2018, & + default=(default_answer_date<20190101)) + call get_param(param_file, mdl, "VERT_FRICTION_2018_ANSWERS", answers_2018, & "If true, use the order of arithmetic and expressions that recover the answers "//& "from the end of 2018. Otherwise, use expressions that do not use an arbitrary "//& "hard-coded maximum viscous coupling coefficient between layers.", & default=default_2018_answers) + ! Revise inconsistent default answer dates. + if (answers_2018 .and. (default_answer_date >= 20190101)) default_answer_date = 20181231 + if (.not.answers_2018 .and. (default_answer_date < 20190101)) default_answer_date = 20190101 + call get_param(param_file, mdl, "VERT_FRICTION_ANSWER_DATE", CS%answer_date, & + "The vintage of the order of arithmetic and expressions in the viscous "//& + "calculations. Values below 20190101 recover the answers from the end of 2018, "//& + "while higher values use expressions that do not use an arbitrary hard-coded "//& + "maximum viscous coupling coefficient between layers. "//& + "If both VERT_FRICTION_2018_ANSWERS and VERT_FRICTION_ANSWER_DATE are "//& + "specified, the latter takes precedence.", default=default_answer_date) + call get_param(param_file, mdl, "BOTTOMDRAGLAW", CS%bottomdraglaw, & "If true, the bottom stress is calculated with a drag "//& "law of the form c_drag*|u|*u. The velocity magnitude "//& From 16c3126d2b5c3ac351cfdc50d404a951f8b8b787 Mon Sep 17 00:00:00 2001 From: Robert Hallberg Date: Sun, 31 Jul 2022 08:42:59 -0400 Subject: [PATCH 26/40] +Added 6 more ..._ANSWER_DATE runtime parameters Added 6 ..._ANSWER_DATE runtime parameters controlling the expressions and order of arithmetic in the core, ocean_data_assim, user, and driver modules, which take precedence over their older ..._ANSWERS_2018 counterparts. The new runtime parameters are SURFACE_ANSWER_DATE, BAROTROPIC_ANSWER_DATE, ODA_ANSWER_DATE, IDL_HURR_ANSWER_DATE SURFACE_FORCING_ANSWER_DATE and WIND_GYRES_ANSWER_DATE. All answers are bitwise identical, but there are numerous new entries in the MOM_parameter_doc.all files. --- .../FMS_cap/MOM_surface_forcing_gfdl.F90 | 40 ++++++++++++----- .../solo_driver/MOM_surface_forcing.F90 | 37 ++++++++++++---- src/core/MOM.F90 | 36 +++++++++++---- src/core/MOM_barotropic.F90 | 32 ++++++++++---- src/ocean_data_assim/MOM_oda_driver.F90 | 36 ++++++++++----- src/user/Idealized_Hurricane.F90 | 44 +++++++++++++------ 6 files changed, 165 insertions(+), 60 deletions(-) diff --git a/config_src/drivers/FMS_cap/MOM_surface_forcing_gfdl.F90 b/config_src/drivers/FMS_cap/MOM_surface_forcing_gfdl.F90 index edd2517adc..90797027c6 100644 --- a/config_src/drivers/FMS_cap/MOM_surface_forcing_gfdl.F90 +++ b/config_src/drivers/FMS_cap/MOM_surface_forcing_gfdl.F90 @@ -129,9 +129,10 @@ module MOM_surface_forcing_gfdl real :: max_delta_srestore !< Maximum delta salinity used for restoring [S ~> ppt] real :: max_delta_trestore !< Maximum delta sst used for restoring [C ~> degC] real, pointer, dimension(:,:) :: basin_mask => NULL() !< Mask for surface salinity restoring by basin - logical :: answers_2018 !< If true, use the order of arithmetic and expressions that recover - !! the answers from the end of 2018. Otherwise, use a simpler - !! expression to calculate gustiness. + integer :: answer_date !< The vintage of the order of arithmetic and expressions in the + !! gustiness calculations. Values below 20190101 recover the answers + !! from the end of 2018, while higher values use a simpler expression + !! to calculate gustiness. logical :: fix_ustar_gustless_bug !< If true correct a bug in the time-averaging of the !! gustless wind friction velocity. logical :: check_no_land_fluxes !< Return warning if IOB flux over land is non-zero @@ -533,7 +534,7 @@ subroutine convert_IOB_to_fluxes(IOB, fluxes, index_bounds, Time, valid_time, G, if (CS%check_no_land_fluxes) & call check_mask_val_consistency(IOB%sw_flux_nir_dif(i-i0,j-j0), G%mask2dT(i,j), i, j, 'sw_flux_nir_dif', G) endif - if (CS%answers_2018) then + if (CS%answer_date < 20190101) then fluxes%sw(i,j) = fluxes%sw_vis_dir(i,j) + fluxes%sw_vis_dif(i,j) + & fluxes%sw_nir_dir(i,j) + fluxes%sw_nir_dif(i,j) else @@ -1038,7 +1039,7 @@ subroutine extract_IOB_stresses(IOB, index_bounds, Time, G, US, CS, taux, tauy, endif ustar(i,j) = sqrt(gustiness*IRho0 + IRho0*Pa_conversion*IOB%stress_mag(i-i0,j-j0)) enddo ; enddo ; endif - if (CS%answers_2018) then + if (CS%answer_date < 20190101) then if (do_gustless) then ; do j=js,je ; do i=is,ie gustless_ustar(i,j) = sqrt(Pa_conversion*US%L_to_Z*IOB%stress_mag(i-i0,j-j0) / CS%Rho0) enddo ; enddo ; endif @@ -1060,7 +1061,7 @@ subroutine extract_IOB_stresses(IOB, index_bounds, Time, G, US, CS, taux, tauy, if (CS%read_gust_2d) gustiness = CS%gust(i,j) endif if (do_ustar) ustar(i,j) = sqrt(gustiness*IRho0 + IRho0 * tau_mag) - if (CS%answers_2018) then + if (CS%answer_date < 20190101) then if (do_gustless) gustless_ustar(i,j) = sqrt(US%L_to_Z*tau_mag / CS%Rho0) else if (do_gustless) gustless_ustar(i,j) = sqrt(IRho0 * tau_mag) @@ -1072,7 +1073,7 @@ subroutine extract_IOB_stresses(IOB, index_bounds, Time, G, US, CS, taux, tauy, gustiness = CS%gust_const if (CS%read_gust_2d .and. (G%mask2dT(i,j) > 0.0)) gustiness = CS%gust(i,j) if (do_ustar) ustar(i,j) = sqrt(gustiness*IRho0 + IRho0 * tau_mag) - if (CS%answers_2018) then + if (CS%answer_date < 20190101) then if (do_gustless) gustless_ustar(i,j) = sqrt(US%L_to_Z*tau_mag / CS%Rho0) else if (do_gustless) gustless_ustar(i,j) = sqrt(IRho0 * tau_mag) @@ -1093,7 +1094,7 @@ subroutine extract_IOB_stresses(IOB, index_bounds, Time, G, US, CS, taux, tauy, if (CS%read_gust_2d) gustiness = CS%gust(i,j) if (do_ustar) ustar(i,j) = sqrt(gustiness*IRho0 + IRho0 * tau_mag) - if (CS%answers_2018) then + if (CS%answer_date < 20190101) then if (do_gustless) gustless_ustar(i,j) = sqrt(US%L_to_Z*tau_mag / CS%Rho0) else if (do_gustless) gustless_ustar(i,j) = sqrt(IRho0 * tau_mag) @@ -1250,7 +1251,11 @@ subroutine surface_forcing_init(Time, G, US, param_file, diag, CS, wind_stagger) real :: utide ! The RMS tidal velocity [Z T-1 ~> m s-1]. type(directories) :: dirs logical :: new_sim, iceberg_flux_diags - logical :: default_2018_answers + integer :: default_answer_date ! The default setting for the various ANSWER_DATE flags. + logical :: default_2018_answers ! The default setting for the various 2018_ANSWERS flags. + logical :: answers_2018 ! If true, use the order of arithmetic and expressions that recover + ! the answers from the end of 2018. Otherwise, use a simpler + ! expression to calculate gustiness. type(time_type) :: Time_frc character(len=200) :: TideAmp_file, gust_file, salt_file, temp_file ! Input file names. ! This include declares and sets the variable "version". @@ -1531,13 +1536,26 @@ subroutine surface_forcing_init(Time, G, US, param_file, diag, CS, wind_stagger) call MOM_read_data(gust_file, 'gustiness', CS%gust, G%domain, timelevel=1, & scale=US%kg_m3_to_R*US%m_s_to_L_T**2*US%L_to_Z) ! units in file should be Pa endif + call get_param(param_file, mdl, "DEFAULT_ANSWER_DATE", default_answer_date, & + "This sets the default value for the various _ANSWER_DATE parameters.", & + default=99991231) call get_param(param_file, mdl, "DEFAULT_2018_ANSWERS", default_2018_answers, & "This sets the default value for the various _2018_ANSWERS parameters.", & - default=.false.) - call get_param(param_file, mdl, "SURFACE_FORCING_2018_ANSWERS", CS%answers_2018, & + default=(default_answer_date<20190101)) + call get_param(param_file, mdl, "SURFACE_FORCING_2018_ANSWERS", answers_2018, & "If true, use the order of arithmetic and expressions that recover the answers "//& "from the end of 2018. Otherwise, use a simpler expression to calculate gustiness.", & default=default_2018_answers) + ! Revise inconsistent default answer dates. + if (answers_2018 .and. (default_answer_date >= 20190101)) default_answer_date = 20181231 + if (.not.answers_2018 .and. (default_answer_date < 20190101)) default_answer_date = 20190101 + call get_param(param_file, mdl, "SURFACE_FORCING_ANSWER_DATE", CS%answer_date, & + "The vintage of the order of arithmetic and expressions in the gustiness "//& + "calculations. Values below 20190101 recover the answers from the end "//& + "of 2018, while higher values use a simpler expression to calculate gustiness. "//& + "If both SURFACE_FORCING_2018_ANSWERS and SURFACE_FORCING_ANSWER_DATE are "//& + "specified, the latter takes precedence.", default=default_answer_date) + call get_param(param_file, mdl, "FIX_USTAR_GUSTLESS_BUG", CS%fix_ustar_gustless_bug, & "If true correct a bug in the time-averaging of the gustless wind friction velocity", & default=.true.) diff --git a/config_src/drivers/solo_driver/MOM_surface_forcing.F90 b/config_src/drivers/solo_driver/MOM_surface_forcing.F90 index 58865888ca..10b5f377fa 100644 --- a/config_src/drivers/solo_driver/MOM_surface_forcing.F90 +++ b/config_src/drivers/solo_driver/MOM_surface_forcing.F90 @@ -105,10 +105,11 @@ module MOM_surface_forcing real :: gyres_taux_sin_amp !< The amplitude of cosine wind stress gyres [R L Z T-1 ~> Pa], if WIND_CONFIG=='gyres' real :: gyres_taux_cos_amp !< The amplitude of cosine wind stress gyres [R L Z T-1 ~> Pa], if WIND_CONFIG=='gyres' real :: gyres_taux_n_pis !< The number of sine lobes in the basin if WIND_CONFIG=='gyres' - logical :: answers_2018 !< If true, use the order of arithmetic and expressions that recover - !! the answers from the end of 2018. Otherwise, use a form of the gyre - !! wind stresses that are rotationally invariant and more likely to be - !! the same between compilers. + integer :: answer_date !< This 8-digit integer gives the approximate date with which the order + !! of arithmetic and and expressions were added to the code. + !! Dates before 20190101 use original answers. + !! Dates after 20190101 use a form of the gyre wind stresses that are + !! rotationally invariant and more likely to be the same between compilers. logical :: fix_ustar_gustless_bug !< If true correct a bug in the time-averaging of the !! gustless wind friction velocity. ! if WIND_CONFIG=='scurves' then use the following to define a piecewise scurve profile @@ -522,7 +523,7 @@ subroutine wind_forcing_gyres(sfc_state, forces, day, G, US, CS) enddo ; enddo ! set the friction velocity - if (CS%answers_2018) then + if (CS%answer_date < 20190101) then do j=js,je ; do i=is,ie forces%ustar(i,j) = sqrt(US%L_to_Z * ((CS%gust_const/CS%Rho0) + & sqrt(0.5*(forces%tauy(i,j-1)*forces%tauy(i,j-1) + forces%tauy(i,j)*forces%tauy(i,j) + & @@ -1504,7 +1505,12 @@ subroutine surface_forcing_init(Time, G, US, param_file, diag, CS, tracer_flow_C real :: flux_const_default ! The unscaled value of FLUXCONST [m day-1] real :: Pa_to_RLZ_T2 ! A unit conversion factor from Pa to the internal units ! for wind stresses [R Z L T-2 Pa-1 ~> 1] - logical :: default_2018_answers + integer :: default_answer_date ! The default setting for the various ANSWER_DATE flags. + logical :: default_2018_answers ! The default setting for the various 2018_ANSWERS flags. + logical :: answers_2018 ! If true, use the order of arithmetic and expressions that recover + ! the answers from the end of 2018. Otherwise, use a form of the gyre + ! wind stresses that are rotationally invariant and more likely to be + ! the same between compilers. character(len=40) :: mdl = "MOM_surface_forcing" ! This module's name. character(len=200) :: filename, gust_file ! The name of the gustiness input file. @@ -1736,16 +1742,29 @@ subroutine surface_forcing_init(Time, G, US, param_file, diag, CS, tracer_flow_C "the zonal wind stress profile: "//& " n in taux = A + B*sin(n*pi*y/L) + C*cos(n*pi*y/L).", & units="nondim", default=0.0) + call get_param(param_file, mdl, "DEFAULT_ANSWER_DATE", default_answer_date, & + "This sets the default value for the various _ANSWER_DATE parameters.", & + default=99991231) call get_param(param_file, mdl, "DEFAULT_2018_ANSWERS", default_2018_answers, & "This sets the default value for the various _2018_ANSWERS parameters.", & - default=.false.) - call get_param(param_file, mdl, "WIND_GYRES_2018_ANSWERS", CS%answers_2018, & + default=(default_answer_date<20190101)) + call get_param(param_file, mdl, "WIND_GYRES_2018_ANSWERS", answers_2018, & "If true, use the order of arithmetic and expressions that recover the answers "//& "from the end of 2018. Otherwise, use expressions for the gyre friction velocities "//& "that are rotationally invariant and more likely to be the same between compilers.", & default=default_2018_answers) + ! Revise inconsistent default answer dates. + if (answers_2018 .and. (default_answer_date >= 20190101)) default_answer_date = 20181231 + if (.not.answers_2018 .and. (default_answer_date < 20190101)) default_answer_date = 20190101 + call get_param(param_file, mdl, "WIND_GYRES_ANSWER_DATE", CS%answer_date, & + "The vintage of the expressions used to set gyre wind stresses. "//& + "Values below 20190101 recover the answers from the end of 2018, "//& + "while higher values use a form of the gyre wind stresses that are "//& + "rotationally invariant and more likely to be the same between compilers. "//& + "If both WIND_GYRES_2018_ANSWERS and WIND_GYRES_ANSWER_DATE are specified, "//& + "the latter takes precedence.", default=default_answer_date) else - CS%answers_2018 = .false. + CS%answer_date = 20190101 endif if (trim(CS%wind_config) == "scurves") then call get_param(param_file, mdl, "WIND_SCURVES_LATS", CS%scurves_ydata, & diff --git a/src/core/MOM.F90 b/src/core/MOM.F90 index e8e95ea560..8fc6600b69 100644 --- a/src/core/MOM.F90 +++ b/src/core/MOM.F90 @@ -333,9 +333,10 @@ module MOM real :: bad_val_sst_min !< Minimum SST before triggering bad value message [C ~> degC] real :: bad_val_sss_max !< Maximum SSS before triggering bad value message [S ~> ppt] real :: bad_val_col_thick !< Minimum column thickness before triggering bad value message [Z ~> m] - logical :: answers_2018 !< If true, use expressions for the surface properties that recover - !! the answers from the end of 2018. Otherwise, use more appropriate - !! expressions that differ at roundoff for non-Boussinesq cases. + integer :: answer_date !< The vintage of the expressions for the surface properties. Values + !! below 20190101 recover the answers from the end of 2018, while + !! higher values use more appropriate expressions that differ at + !! roundoff for non-Boussinesq cases. logical :: use_particles !< Turns on the particles package character(len=10) :: particle_type !< Particle types include: surface(default), profiling and sail drone. @@ -1823,7 +1824,11 @@ subroutine initialize_MOM(Time, Time_init, param_file, dirs, CS, restart_CSp, & ! with accumulated heat deficit returned to surface ocean. logical :: bound_salinity ! If true, salt is added to keep salinity above ! a minimum value, and the deficit is reported. + integer :: default_answer_date ! The default setting for the various ANSWER_DATE flags. logical :: default_2018_answers ! The default setting for the various 2018_ANSWERS flags. + logical :: answers_2018 ! If true, use expressions for the surface properties that recover + ! the answers from the end of 2018. Otherwise, use more appropriate + ! expressions that differ at roundoff for non-Boussinesq cases. logical :: use_conT_absS ! If true, the prognostics T & S are conservative temperature ! and absolute salinity. Care should be taken to convert them ! to potential temperature and practical salinity before @@ -2147,13 +2152,26 @@ subroutine initialize_MOM(Time, Time_init, param_file, dirs, CS, restart_CSp, & "triggered, if CHECK_BAD_SURFACE_VALS is true.", & units="m", default=0.0, scale=US%m_to_Z) endif + call get_param(param_file, "MOM", "DEFAULT_ANSWER_DATE", default_answer_date, & + "This sets the default value for the various _ANSWER_DATE parameters.", & + default=99991231) call get_param(param_file, "MOM", "DEFAULT_2018_ANSWERS", default_2018_answers, & "This sets the default value for the various _2018_ANSWERS parameters.", & - default=.false.) - call get_param(param_file, "MOM", "SURFACE_2018_ANSWERS", CS%answers_2018, & + default=(default_answer_date<20190101)) + call get_param(param_file, "MOM", "SURFACE_2018_ANSWERS", answers_2018, & "If true, use expressions for the surface properties that recover the answers "//& "from the end of 2018. Otherwise, use more appropriate expressions that differ "//& "at roundoff for non-Boussinesq cases.", default=default_2018_answers) + ! Revise inconsistent default answer dates. + if (answers_2018 .and. (default_answer_date >= 20190101)) default_answer_date = 20181231 + if (.not.answers_2018 .and. (default_answer_date < 20190101)) default_answer_date = 20190101 + call get_param(param_file, "MOM", "SURFACE_ANSWER_DATE", CS%answer_date, & + "The vintage of the expressions for the surface properties. Values below "//& + "20190101 recover the answers from the end of 2018, while higher values "//& + "use updated and more robust forms of the same expressions. "//& + "If both SURFACE_2018_ANSWERS and SURFACE_ANSWER_DATE are specified, the "//& + "latter takes precedence.", default=default_answer_date) + call get_param(param_file, "MOM", "USE_DIABATIC_TIME_BUG", CS%use_diabatic_time_bug, & "If true, uses the wrong calendar time for diabatic processes, as was "//& "done in MOM6 versions prior to February 2018. This is not recommended.", & @@ -3343,9 +3361,9 @@ subroutine extract_surface_state(CS, sfc_state_in) enddo ; enddo else ! (CS%Hmix >= 0.0) - H_rescale = 1.0 ; if (CS%answers_2018) H_rescale = GV%H_to_Z + H_rescale = 1.0 ; if (CS%answer_date < 20190101) H_rescale = GV%H_to_Z depth_ml = CS%Hmix - if (.not.CS%answers_2018) depth_ml = CS%Hmix*GV%Z_to_H + if (CS%answer_date >= 20190101) depth_ml = CS%Hmix*GV%Z_to_H ! Determine the mean tracer properties of the uppermost depth_ml fluid. !$OMP parallel do default(shared) private(depth,dh) @@ -3377,7 +3395,7 @@ subroutine extract_surface_state(CS, sfc_state_in) enddo ; enddo ! Calculate the average properties of the mixed layer depth. do i=is,ie - if (CS%answers_2018) then + if (CS%answer_date < 20190101) then if (depth(i) < GV%H_subroundoff*H_rescale) & depth(i) = GV%H_subroundoff*H_rescale if (use_temperature) then @@ -3416,7 +3434,7 @@ subroutine extract_surface_state(CS, sfc_state_in) ! This assumes that u and v halos have already been updated. if (CS%Hmix_UV>0.) then depth_ml = CS%Hmix_UV - if (.not.CS%answers_2018) depth_ml = CS%Hmix_UV*GV%Z_to_H + if (CS%answer_date >= 20190101) depth_ml = CS%Hmix_UV*GV%Z_to_H !$OMP parallel do default(shared) private(depth,dh,hv) do J=js-1,ie do i=is,ie diff --git a/src/core/MOM_barotropic.F90 b/src/core/MOM_barotropic.F90 index 6c13fa8af0..5a02f64240 100644 --- a/src/core/MOM_barotropic.F90 +++ b/src/core/MOM_barotropic.F90 @@ -210,9 +210,9 @@ module MOM_barotropic !! the barotropic acclerations. Otherwise use the depth based on bathyT. real :: BT_Coriolis_scale !< A factor by which the barotropic Coriolis acceleration anomaly !! terms are scaled [nondim]. - logical :: answers_2018 !< If true, use expressions for the barotropic solver that recover - !! the answers from the end of 2018. Otherwise, use more efficient - !! or general expressions. + integer :: answer_date !< The vintage of the expressions in the barotropic solver. + !! Values below 20190101 recover the answers from the end of 2018, + !! while higher values use more efficient or general expressions. logical :: dynamic_psurf !< If true, add a dynamic pressure due to a viscous !! ice shelf, for instance. @@ -1724,7 +1724,7 @@ subroutine btstep(U_in, V_in, eta_in, dt, bc_accel_u, bc_accel_v, forces, pbce, I_sum_wt_eta = 1.0 / sum_wt_eta ; I_sum_wt_trans = 1.0 / sum_wt_trans do n=1,nstep+nfilter wt_vel(n) = wt_vel(n) * I_sum_wt_vel - if (CS%answers_2018) then + if (CS%answer_date < 20190101) then wt_accel2(n) = wt_accel(n) ! wt_trans(n) = wt_trans(n) * I_sum_wt_trans else @@ -2394,7 +2394,7 @@ subroutine btstep(U_in, V_in, eta_in, dt, bc_accel_u, bc_accel_v, forces, pbce, ! Reset the time information in the diag type. if (do_hifreq_output) call enable_averaging(time_int_in, time_end_in, CS%diag) - if (CS%answers_2018) then + if (CS%answer_date < 20190101) then I_sum_wt_vel = 1.0 / sum_wt_vel ; I_sum_wt_eta = 1.0 / sum_wt_eta I_sum_wt_accel = 1.0 / sum_wt_accel ; I_sum_wt_trans = 1.0 / sum_wt_trans else @@ -2462,7 +2462,7 @@ subroutine btstep(U_in, V_in, eta_in, dt, bc_accel_u, bc_accel_v, forces, pbce, if (id_clock_pass_post > 0) call cpu_clock_end(id_clock_pass_post) if (id_clock_calc_post > 0) call cpu_clock_begin(id_clock_calc_post) - if (CS%answers_2018) then + if (CS%answer_date < 20190101) then do j=js,je ; do I=is-1,ie CS%ubtav(I,j) = ubt_sum(I,j) * I_sum_wt_trans uhbtav(I,j) = uhbt_sum(I,j) * I_sum_wt_trans @@ -4299,7 +4299,11 @@ subroutine barotropic_init(u, v, h, eta, Time, G, GV, US, param_file, diag, CS, type(memory_size_type) :: MS type(group_pass_type) :: pass_static_data, pass_q_D_Cor type(group_pass_type) :: pass_bt_hbt_btav, pass_a_polarity + integer :: default_answer_date ! The default setting for the various ANSWER_DATE flags. logical :: default_2018_answers ! The default setting for the various 2018_ANSWERS flags. + logical :: answers_2018 ! If true, use expressions for the barotropic solver that recover + ! the answers from the end of 2018. Otherwise, use more efficient + ! or general expressions. logical :: use_BT_cont_type character(len=48) :: thickness_units, flux_units character*(40) :: hvel_str @@ -4439,13 +4443,25 @@ subroutine barotropic_init(u, v, h, eta, Time, G, GV, US, param_file, diag, CS, call get_param(param_file, mdl, "BT_CORIOLIS_SCALE", CS%BT_Coriolis_scale, & "A factor by which the barotropic Coriolis anomaly terms are scaled.", & units="nondim", default=1.0) + call get_param(param_file, mdl, "DEFAULT_ANSWER_DATE", default_answer_date, & + "This sets the default value for the various _ANSWER_DATE parameters.", & + default=99991231) call get_param(param_file, mdl, "DEFAULT_2018_ANSWERS", default_2018_answers, & "This sets the default value for the various _2018_ANSWERS parameters.", & - default=.false.) - call get_param(param_file, mdl, "BAROTROPIC_2018_ANSWERS", CS%answers_2018, & + default=(default_answer_date<20190101)) + call get_param(param_file, mdl, "BAROTROPIC_2018_ANSWERS", answers_2018, & "If true, use expressions for the barotropic solver that recover the answers "//& "from the end of 2018. Otherwise, use more efficient or general expressions.", & default=default_2018_answers) + ! Revise inconsistent default answer dates. + if (answers_2018 .and. (default_answer_date >= 20190101)) default_answer_date = 20181231 + if (.not.answers_2018 .and. (default_answer_date < 20190101)) default_answer_date = 20190101 + call get_param(param_file, mdl, "BAROTROPIC_ANSWER_DATE", CS%answer_date, & + "The vintage of the expressions in the barotropic solver. "//& + "Values below 20190101 recover the answers from the end of 2018, "//& + "while higher values uuse more efficient or general expressions. "//& + "If both BAROTROPIC_2018_ANSWERS and BAROTROPIC_ANSWER_DATE are specified, the "//& + "latter takes precedence.", default=default_answer_date) call get_param(param_file, mdl, "TIDES", CS%tides, & "If true, apply tidal momentum forcing.", default=.false.) diff --git a/src/ocean_data_assim/MOM_oda_driver.F90 b/src/ocean_data_assim/MOM_oda_driver.F90 index 43a8416a10..fd49ec5a98 100644 --- a/src/ocean_data_assim/MOM_oda_driver.F90 +++ b/src/ocean_data_assim/MOM_oda_driver.F90 @@ -134,9 +134,10 @@ module MOM_oda_driver_mod type(INC_CS) :: INC_CS !< A Structure containing integer file handles for bias adjustment integer :: id_inc_t !< A diagnostic handle for the temperature climatological adjustment integer :: id_inc_s !< A diagnostic handle for the salinity climatological adjustment - logical :: answers_2018 !< If true, use the order of arithmetic and expressions for remapping - !! that recover the answers from the end of 2018. Otherwise, use more - !! robust and accurate forms of mathematically equivalent expressions. + integer :: answer_date !< The vintage of the order of arithmetic and expressions in the + !! remapping invoked by the ODA driver. Values below 20190101 recover + !! the answers from the end of 2018, while higher values use updated + !! and more robust forms of the same expressions. end type ODA_CS @@ -175,7 +176,11 @@ subroutine init_oda(Time, G, GV, US, diag_CS, CS) character(len=200) :: inputdir, basin_file character(len=80) :: remap_scheme character(len=80) :: bias_correction_file, inc_file - logical :: default_2018_answers + logical :: answers_2018 ! If true, use the order of arithmetic and expressions that recover the + ! answers from the end of 2018. Otherwise, use updated and more robust + ! forms of the same expressions. + integer :: default_answer_date ! The default setting for the various ANSWER_DATE flags. + logical :: default_2018_answers ! The default setting for the various 2018_ANSWERS flags. if (associated(CS)) call MOM_error(FATAL, 'Calling oda_init with associated control structure') allocate(CS) @@ -232,14 +237,25 @@ subroutine init_oda(Time, G, GV, US, diag_CS, CS) "for vertical remapping for all variables. "//& "It can be one of the following schemes: "//& trim(remappingSchemesDoc), default="PPM_H4") + call get_param(PF, mdl, "DEFAULT_ANSWER_DATE", default_answer_date, & + "This sets the default value for the various _ANSWER_DATE parameters.", & + default=99991231) call get_param(PF, mdl, "DEFAULT_2018_ANSWERS", default_2018_answers, & "This sets the default value for the various _2018_ANSWERS parameters.", & - default=.false., do_not_log=.true.) - call get_param(PF, mdl, "ODA_2018_ANSWERS", CS%answers_2018, & + default=(default_answer_date<20190101)) + call get_param(PF, mdl, "ODA_2018_ANSWERS", answers_2018, & "If true, use the order of arithmetic and expressions that recover the "//& "answers from original version of the ODA driver. Otherwise, use updated and "//& - "more robust forms of the same expressions.", default=default_2018_answers, & - do_not_log=.true.) + "more robust forms of the same expressions.", default=default_2018_answers) + ! Revise inconsistent default answer dates. + if (answers_2018 .and. (default_answer_date >= 20190101)) default_answer_date = 20181231 + if (.not.answers_2018 .and. (default_answer_date < 20190101)) default_answer_date = 20190101 + call get_param(PF, mdl, "ODA_ANSWER_DATE", CS%answer_date, & + "The vintage of the order of arithmetic and expressions used by the ODA driver "//& + "Values below 20190101 recover the answers from the end of 2018, while higher "//& + "values use updated and more robust forms of the same expressions. "//& + "If both ODA_2018_ANSWERS and ODA_ANSWER_DATE are specified, the "//& + "latter takes precedence.", default=default_answer_date) inputdir = slasher(inputdir) select case(lowercase(trim(assim_method))) @@ -408,7 +424,7 @@ subroutine set_prior_tracer(Time, G, GV, h, tv, CS) call set_PElist(CS%filter_pelist) !call MOM_mesg('Setting prior') - if (.not. CS%answers_2018) then + if (CS%answer_date >= 20190101) then h_neglect = GV%H_subroundoff ; h_neglect_edge = GV%H_subroundoff elseif (GV%Boussinesq) then h_neglect = GV%m_to_H * 1.0e-30 ; h_neglect_edge = GV%m_to_H * 1.0e-10 @@ -676,7 +692,7 @@ subroutine apply_oda_tracer_increments(dt, Time_end, G, GV, tv, h, CS) S = S + CS%tv_bc%S endif - if (.not. CS%answers_2018) then + if (CS%answer_date >= 20190101) then h_neglect = GV%H_subroundoff ; h_neglect_edge = GV%H_subroundoff elseif (GV%Boussinesq) then h_neglect = GV%m_to_H * 1.0e-30 ; h_neglect_edge = GV%m_to_H * 1.0e-10 diff --git a/src/user/Idealized_Hurricane.F90 b/src/user/Idealized_Hurricane.F90 index d067b76eff..0d2926798f 100644 --- a/src/user/Idealized_Hurricane.F90 +++ b/src/user/Idealized_Hurricane.F90 @@ -66,9 +66,10 @@ module Idealized_hurricane !! for the Holland prorfile calculation [R L2 T-2 ~> Pa] logical :: relative_tau !< A logical to take difference between wind !! and surface currents to compute the stress - logical :: answers_2018 !< If true, use expressions driving the idealized hurricane test - !! case that recover the answers from the end of 2018. Otherwise use - !! expressions that are rescalable and respect rotational symmetry. + integer :: answer_date !< The vintage of the expressions in the idealized hurricane + !! test case. Values below 20190101 recover the answers + !! from the end of 2018, while higher values use expressions + !! that are rescalable and respect rotational symmetry. ! Parameters used if in SCM (single column model) mode logical :: SCM_mode !< If true this being used in Single Column Model mode @@ -102,7 +103,11 @@ subroutine idealized_hurricane_wind_init(Time, G, US, param_file, CS) ! Local variables real :: dP ! The pressure difference across the hurricane [R L2 T-2 ~> Pa] real :: C + integer :: default_answer_date ! The default setting for the various ANSWER_DATE flags. logical :: default_2018_answers ! The default setting for the various 2018_ANSWERS flags. + logical :: answers_2018 ! If true, use expressions driving the idealized hurricane test + ! case that recover the answers from the end of 2018. Otherwise use + ! expressions that are rescalable and respect rotational symmetry. ! This include declares and sets the variable "version". # include "version_variable.h" @@ -166,14 +171,27 @@ subroutine idealized_hurricane_wind_init(Time, G, US, param_file, CS) call get_param(param_file, mdl, "IDL_HURR_SCM_LOCY", CS%dy_from_center, & "Y distance of station used in the SCM idealized hurricane "//& "wind profile.", units='m', default=50.e3, scale=US%m_to_L) + call get_param(param_file, mdl, "DEFAULT_ANSWER_DATE", default_answer_date, & + "This sets the default value for the various _ANSWER_DATE parameters.", & + default=99991231) call get_param(param_file, mdl, "DEFAULT_2018_ANSWERS", default_2018_answers, & "This sets the default value for the various _2018_ANSWERS parameters.", & - default=.false.) - call get_param(param_file, mdl, "IDL_HURR_2018_ANSWERS", CS%answers_2018, & + default=(default_answer_date<20190101)) + call get_param(param_file, mdl, "IDL_HURR_2018_ANSWERS", answers_2018, & "If true, use expressions driving the idealized hurricane test case that recover "//& "the answers from the end of 2018. Otherwise use expressions that are rescalable "//& "and respect rotational symmetry.", default=default_2018_answers) + ! Revise inconsistent default answer dates. + if (answers_2018 .and. (default_answer_date >= 20190101)) default_answer_date = 20181231 + if (.not.answers_2018 .and. (default_answer_date < 20190101)) default_answer_date = 20190101 + call get_param(param_file, mdl, "IDL_HURR_ANSWER_DATE", CS%answer_date, & + "The vintage of the expressions in the idealized hurricane test case. "//& + "Values below 20190101 recover the answers from the end of 2018, while higher "//& + "values use expressions that are rescalable and respect rotational symmetry. "//& + "If both IDL_HURR_2018_ANSWERS and IDL_HURR_ANSWER_DATE are specified, "//& + "the latter takes precedence.", default=default_answer_date) + ! The following parameters are model run-time parameters which are used ! and logged elsewhere and so should not be logged here. The default ! value should be consistent with the rest of the model. @@ -191,7 +209,7 @@ subroutine idealized_hurricane_wind_init(Time, G, US, param_file, CS) CS%rho_a = 1.2*US%kg_m3_to_R endif dP = CS%pressure_ambient - CS%pressure_central - if (CS%answers_2018) then + if (CS%answer_date < 20190101) then C = CS%max_windspeed / sqrt( US%R_to_kg_m3 * dP ) CS%Holland_B = C**2 * US%R_to_kg_m3*CS%rho_a * exp(1.0) else @@ -261,7 +279,7 @@ subroutine idealized_hurricane_wind_forcing(sfc_state, forces, day, G, US, CS) do j=js,je do I=is-1,Ieq Uocn = sfc_state%u(I,j) * REL_TAU_FAC - if (CS%answers_2018) then + if (CS%answer_date < 20190101) then Vocn = 0.25*(sfc_state%v(i,J)+sfc_state%v(i+1,J-1)& +sfc_state%v(i+1,J)+sfc_state%v(i,J-1))*REL_TAU_FAC else @@ -284,7 +302,7 @@ subroutine idealized_hurricane_wind_forcing(sfc_state, forces, day, G, US, CS) !> Computes tauy do J=js-1,Jeq do i=is,ie - if (CS%answers_2018) then + if (CS%answer_date < 20190101) then Uocn = 0.25*(sfc_state%u(I,j)+sfc_state%u(I-1,j+1) + & sfc_state%u(I-1,j)+sfc_state%u(I,j+1))*REL_TAU_FAC else @@ -381,7 +399,7 @@ subroutine idealized_hurricane_wind_profile(CS, US, absf, YY, XX, UOCN, VOCN, Tx !/ ! Calculate U10 in the interior (inside of 10x radius of maximum wind), ! while adjusting U10 to 0 outside of 12x radius of maximum wind. - if (CS%answers_2018) then + if (CS%answer_date < 20190101) then if ( (radius > 0.001*CS%rad_max_wind) .and. (radius < 10.*CS%rad_max_wind) ) then U10 = sqrt(CS%Holland_AxBxDP*exp(-CS%Holland_A/radiusB) / (CS%rho_a*radiusB) + & 0.25*(radius_km*absf)**2) - 0.5*radius_km*absf @@ -449,7 +467,7 @@ subroutine idealized_hurricane_wind_profile(CS, US, absf, YY, XX, UOCN, VOCN, Tx if (dU10 < 11.0*US%m_s_to_L_T) then Cd = 1.2e-3 elseif (dU10 < 20.0*US%m_s_to_L_T) then - if (CS%answers_2018) then + if (CS%answer_date < 20190101) then Cd = (0.49 + 0.065*US%L_T_to_m_s*U10)*1.e-3 else Cd = (0.49 + 0.065*US%L_T_to_m_s*dU10)*1.e-3 @@ -514,7 +532,7 @@ subroutine SCM_idealized_hurricane_wind_forcing(sfc_state, forces, day, G, US, C transdir = pie !translation direction (-x) | !------------------------------------------------------| dP = CS%pressure_ambient - CS%pressure_central - if (CS%answers_2018) then + if (CS%answer_date < 20190101) then C = CS%max_windspeed / sqrt( US%R_to_kg_m3*dP ) B = C**2 * US%R_to_kg_m3*CS%rho_a * exp(1.0) if (BR_Bench) then ! rho_a reset to value used in generated wind for benchmark test @@ -617,7 +635,7 @@ subroutine SCM_idealized_hurricane_wind_forcing(sfc_state, forces, day, G, US, C if (dU10 < 11.0*US%m_s_to_L_T) then Cd = 1.2e-3 elseif (dU10 < 20.0*US%m_s_to_L_T) then - if (CS%answers_2018) then + if (CS%answer_date < 20190101) then Cd = (0.49 + 0.065 * US%L_T_to_m_s*U10 )*0.001 else Cd = (0.49 + 0.065 * US%L_T_to_m_s*dU10 )*0.001 @@ -639,7 +657,7 @@ subroutine SCM_idealized_hurricane_wind_forcing(sfc_state, forces, day, G, US, C if (dU10 < 11.0*US%m_s_to_L_T) then Cd = 1.2e-3 elseif (dU10 < 20.0*US%m_s_to_L_T) then - if (CS%answers_2018) then + if (CS%answer_date < 20190101) then Cd = (0.49 + 0.065 * US%L_T_to_m_s*U10 )*0.001 else Cd = (0.49 + 0.065 * US%L_T_to_m_s*dU10 )*0.001 From 1d054ef720a0cbe4584095a82cf3d96a3cff5141 Mon Sep 17 00:00:00 2001 From: Robert Hallberg Date: Sun, 31 Jul 2022 08:50:21 -0400 Subject: [PATCH 27/40] +Add do_not_log=just_read args in get_param calls Added do_not_log=just_read arguments to get_param calls where other calls in the same initialization routines already had them, so that the logging of the parameters is consistent. All answers are bitwise identical, but this could lead to changes in the MOM_parameter_doc files. --- .../MOM_state_initialization.F90 | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/src/initialization/MOM_state_initialization.F90 b/src/initialization/MOM_state_initialization.F90 index 3064d52035..b8e74e3c45 100644 --- a/src/initialization/MOM_state_initialization.F90 +++ b/src/initialization/MOM_state_initialization.F90 @@ -1194,21 +1194,21 @@ subroutine trim_for_ice(PF, G, GV, US, ALE_CSp, tv, h, just_read) "file SURFACE_PRESSURE_FILE into a surface pressure.", & units="file dependent", default=1., do_not_log=just_read) call get_param(PF, mdl, "MIN_THICKNESS", min_thickness, 'Minimum layer thickness', & - units='m', default=1.e-3, do_not_log=just_read, scale=US%m_to_Z) + units='m', default=1.e-3, scale=US%m_to_Z, do_not_log=just_read) call get_param(PF, mdl, "TRIMMING_USES_REMAPPING", use_remapping, & 'When trimming the column, also remap T and S.', & default=.false., do_not_log=just_read) if (use_remapping) then call get_param(PF, mdl, "DEFAULT_ANSWER_DATE", default_answer_date, & "This sets the default value for the various _ANSWER_DATE parameters.", & - default=99991231) + default=99991231, do_not_log=just_read) call get_param(PF, mdl, "DEFAULT_2018_ANSWERS", default_2018_answers, & "This sets the default value for the various _2018_ANSWERS parameters.", & - default=(default_answer_date<20190101)) + default=(default_answer_date<20190101), do_not_log=just_read) call get_param(PF, mdl, "REMAPPING_2018_ANSWERS", remap_answers_2018, & "If true, use the order of arithmetic and expressions that recover the "//& "answers from the end of 2018. Otherwise, use updated and more robust "//& - "forms of the same expressions.", default=default_2018_answers) + "forms of the same expressions.", default=default_2018_answers, do_not_log=just_read) ! Revise inconsistent default answer dates for remapping. if (remap_answers_2018 .and. (default_answer_date >= 20190101)) default_answer_date = 20181231 if (.not.remap_answers_2018 .and. (default_answer_date < 20190101)) default_answer_date = 20190101 @@ -1218,7 +1218,7 @@ subroutine trim_for_ice(PF, G, GV, US, ALE_CSp, tv, h, just_read) "that were in use at the end of 2018. Higher values result in the use of more "//& "robust and accurate forms of mathematically equivalent expressions. "//& "If both REMAPPING_2018_ANSWERS and REMAPPING_ANSWER_DATE are specified, the "//& - "latter takes precedence.", default=default_answer_date) + "latter takes precedence.", default=default_answer_date, do_not_log=just_read) else remap_answer_date = 20181231 endif @@ -2574,20 +2574,20 @@ subroutine MOM_temp_salt_initialize_from_Z(h, tv, depth_tot, G, GV, US, PF, just default=.false., do_not_log=just_read) call get_param(PF, mdl, "DEFAULT_ANSWER_DATE", default_answer_date, & "This sets the default value for the various _ANSWER_DATE parameters.", & - default=99991231) + default=99991231, do_not_log=just_read) call get_param(PF, mdl, "DEFAULT_2018_ANSWERS", default_2018_answers, & "This sets the default value for the various _2018_ANSWERS parameters.", & - default=(default_answer_date<20190101)) + default=(default_answer_date<20190101), do_not_log=just_read) call get_param(PF, mdl, "TEMP_SALT_INIT_VERTICAL_REMAP_ONLY", pre_gridded, & "If true, initial conditions are on the model horizontal grid. " //& "Extrapolation over missing ocean values is done using an ICE-9 "//& "procedure with vertical ALE remapping .", & - default=.false.) + default=.false., do_not_log=just_read) if (useALEremapping) then call get_param(PF, mdl, "REMAPPING_2018_ANSWERS", remap_answers_2018, & "If true, use the order of arithmetic and expressions that recover the "//& "answers from the end of 2018. Otherwise, use updated and more robust "//& - "forms of the same expressions.", default=default_2018_answers) + "forms of the same expressions.", default=default_2018_answers, do_not_log=just_read) ! Revise inconsistent default answer dates for remapping. default_remap_ans_date = default_answer_date if (remap_answers_2018 .and. (default_remap_ans_date >= 20190101)) default_remap_ans_date = 20181231 @@ -2598,12 +2598,12 @@ subroutine MOM_temp_salt_initialize_from_Z(h, tv, depth_tot, G, GV, US, PF, just "that were in use at the end of 2018. Higher values result in the use of more "//& "robust and accurate forms of mathematically equivalent expressions. "//& "If both REMAPPING_2018_ANSWERS and REMAPPING_ANSWER_DATE are specified, the "//& - "latter takes precedence.", default=default_remap_ans_date) + "latter takes precedence.", default=default_remap_ans_date, do_not_log=just_read) endif call get_param(PF, mdl, "HOR_REGRID_2018_ANSWERS", hor_regrid_answers_2018, & "If true, use the order of arithmetic for horizonal regridding that recovers "//& "the answers from the end of 2018. Otherwise, use rotationally symmetric "//& - "forms of the same expressions.", default=default_2018_answers) + "forms of the same expressions.", default=default_2018_answers, do_not_log=just_read) ! Revise inconsistent default answer dates for horizontal regridding. default_hor_reg_ans_date = default_answer_date if (hor_regrid_answers_2018 .and. (default_hor_reg_ans_date >= 20190101)) default_hor_reg_ans_date = 20181231 @@ -2613,7 +2613,7 @@ subroutine MOM_temp_salt_initialize_from_Z(h, tv, depth_tot, G, GV, US, PF, just "Dates before 20190101 give the same answers as the code did in late 2018, "//& "while later versions add parentheses for rotational symmetry. "//& "If both HOR_REGRID_2018_ANSWERS and HOR_REGRID_ANSWER_DATE are specified, the "//& - "latter takes precedence.", default=default_hor_reg_ans_date) + "latter takes precedence.", default=default_hor_reg_ans_date, do_not_log=just_read) if (.not.useALEremapping) then call get_param(PF, mdl, "ADJUST_THICKNESS", correct_thickness, & From 7780ff4f6976b350a714ee93ef9aead0352cc92f Mon Sep 17 00:00:00 2001 From: Marshall Ward Date: Fri, 5 Aug 2022 21:52:57 -0400 Subject: [PATCH 28/40] Autoconf: Logging for dependency builds This patch adds the `REPORT_ERROR_LOGS` variable to the .testing and ac/deps makefiles. When set to true, a failed FMS build will also echo the contents of `config.log`, in order to diagnose remote CI builds. The variable is disabled on default, to reduce the amount of output at command line. It has been added to the GitHub Actions CI, and is propagated through `.testing/Makefile`. Although not used in the MOM6 builds, it could be extended to them if it proves useful. Currently, these Makefiles always echo `config.log` after a failed build. --- .github/actions/testing-setup/action.yml | 2 +- .testing/Makefile | 8 +++++++- ac/deps/Makefile | 21 ++++++++++++--------- 3 files changed, 20 insertions(+), 11 deletions(-) diff --git a/.github/actions/testing-setup/action.yml b/.github/actions/testing-setup/action.yml index a21ee949db..8a3264b140 100644 --- a/.github/actions/testing-setup/action.yml +++ b/.github/actions/testing-setup/action.yml @@ -32,7 +32,7 @@ runs: run: | echo "::group::Compile FMS library" cd .testing - make deps/lib/libFMS.a -s -j + REPORT_ERROR_LOGS=true make deps/lib/libFMS.a -s -j echo "::endgroup::" - name: Store compiler flags used in Makefile diff --git a/.testing/Makefile b/.testing/Makefile index 150a365692..917feb311b 100644 --- a/.testing/Makefile +++ b/.testing/Makefile @@ -116,6 +116,9 @@ DO_COVERAGE ?= # Report failure if coverage report is not uploaded REQUIRE_COVERAGE_UPLOAD ?= +# Print logs if an error is encountered +REPORT_ERROR_LOGS ?= + # Time measurement (configurable by the CI) TIME ?= time @@ -330,7 +333,10 @@ $(TARGET_CODEBASE): # FMS # Set up the FMS build environment variables -FMS_ENV = PATH="${PATH}:$(realpath ../ac)" FCFLAGS="$(FCFLAGS_FMS)" +FMS_ENV = \ + PATH="${PATH}:$(realpath ../ac)" \ + FCFLAGS="$(FCFLAGS_FMS)" \ + REPORT_ERROR_LOGS="$(REPORT_ERROR_LOGS)" deps/lib/libFMS.a: deps/fms/build/libFMS.a $(MAKE) -C deps lib/libFMS.a diff --git a/ac/deps/Makefile b/ac/deps/Makefile index af567f6a72..84d43eb26d 100644 --- a/ac/deps/Makefile +++ b/ac/deps/Makefile @@ -14,13 +14,16 @@ FMS_COMMIT ?= 2019.01.03 # List of source files to link this Makefile's dependencies to model Makefiles # Assumes a depth of two, and the following extensions: F90 inc c h # (1): Root directory -# NOTE: extensions could be a second variable SOURCE = \ $(foreach ext,F90 inc c h,$(wildcard $(1)/*/*.$(ext) $(1)/*/*/*.$(ext))) FMS_SOURCE = $(call SOURCE,fms/src) +# If `true`, print logs if an error is encountered. +REPORT_ERROR_LOGS ?= + + #--- # Rules @@ -33,13 +36,8 @@ all: lib/libFMS.a # NOTE: We emulate the automake `make install` stage by storing libFMS.a to # ${srcdir}/deps/lib and copying module files to ${srcdir}/deps/include. -# This is a flawed approach, since module files are untracked and could be -# handled more safely, but this is adequate for now. - - -# TODO: track *.mod copy? lib/libFMS.a: fms/build/libFMS.a - mkdir -p {lib,include} + mkdir -p lib include cp fms/build/libFMS.a lib/libFMS.a cp fms/build/*.mod include @@ -51,10 +49,15 @@ fms/build/libFMS.a: fms/build/Makefile fms/build/Makefile: Makefile.fms.in fms/src/configure mkdir -p fms/build cp Makefile.fms.in fms/src/Makefile.in - cd $(@D) && ../src/configure --srcdir=../src + cd $(@D) && { \ + ../src/configure --srcdir=../src \ + || { \ + if [ "${REPORT_ERROR_LOGS}" = true ]; then cat config.log ; fi ; \ + false; \ + } \ + } -# TODO: Track m4 macros? fms/src/configure: configure.fms.ac $(FMS_SOURCE) | fms/src cp configure.fms.ac fms/src/configure.ac cp -r m4 $(@D) From 00b0c9f522edca27d3642ded85fb7d618ac2c6e4 Mon Sep 17 00:00:00 2001 From: Marshall Ward Date: Fri, 5 Aug 2022 21:53:12 -0400 Subject: [PATCH 29/40] CI: Replace GitHub Actions MacOS compiler env This patch fixes the gcc compiler environment variables on their MacOS systems. GitHub recently made some GCC compiler changes on their MacOS machines; specifically, `gcc-11` and `gfortran-11` appear to no longer be available. Previously, we were unable to use the default gcc and had to use these executables. On this newer version, we can use the default system gcc, and the `CC` environment variable can be left unset. We still need `FC` to point to `gfortran`, since it otherwise tries to build with `f77`, which does not exist. --- .github/workflows/macos-regression.yml | 3 +-- .github/workflows/macos-stencil.yml | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/.github/workflows/macos-regression.yml b/.github/workflows/macos-regression.yml index d975854e0c..e8f7469cca 100644 --- a/.github/workflows/macos-regression.yml +++ b/.github/workflows/macos-regression.yml @@ -8,8 +8,7 @@ jobs: runs-on: macOS-latest env: - CC: gcc-11 - FC: gfortran-11 + FC: gfortran defaults: run: diff --git a/.github/workflows/macos-stencil.yml b/.github/workflows/macos-stencil.yml index 33436c221f..e0fcfeef8e 100644 --- a/.github/workflows/macos-stencil.yml +++ b/.github/workflows/macos-stencil.yml @@ -8,8 +8,7 @@ jobs: runs-on: macOS-latest env: - CC: gcc-11 - FC: gfortran-11 + FC: gfortran defaults: run: From 5a70c413e06421c5d0c0f75c3252de9f636fdd95 Mon Sep 17 00:00:00 2001 From: Robert Hallberg Date: Mon, 1 Aug 2022 15:46:40 -0400 Subject: [PATCH 30/40] Fatal error if a tracer package is registered twice Made the WARNING error messages that were issued if a tracer package registration routine is called with an associated control structure into FATAL errors, as suggested by github.com/mom-ocean/MOM6/issues/1444. A tracer package could be used twice, but it would require the use of separate control structures for each instance. This change will bring down the model in a case that probably should not be running. All solutions are bitwise identical in any cases that run, and all MOM6-examples test cases work exactly as before. --- src/tracer/DOME_tracer.F90 | 11 +++++------ src/tracer/ISOMIP_tracer.F90 | 7 +++---- src/tracer/MOM_CFC_cap.F90 | 7 +++---- src/tracer/MOM_OCMIP2_CFC.F90 | 5 ++--- src/tracer/MOM_generic_tracer.F90 | 5 ++--- src/tracer/RGC_tracer.F90 | 9 ++++----- src/tracer/advection_test_tracer.F90 | 5 ++--- src/tracer/boundary_impulse_tracer.F90 | 5 ++--- src/tracer/dye_example.F90 | 5 ++--- src/tracer/dyed_obc_tracer.F90 | 11 +++++------ src/tracer/ideal_age_example.F90 | 5 ++--- src/tracer/nw2_tracers.F90 | 5 ++--- src/tracer/oil_tracer.F90 | 5 ++--- src/tracer/pseudo_salt_tracer.F90 | 6 ++---- src/tracer/tracer_example.F90 | 5 ++--- 15 files changed, 40 insertions(+), 56 deletions(-) diff --git a/src/tracer/DOME_tracer.F90 b/src/tracer/DOME_tracer.F90 index 604751f4ef..d1c6ebd7bf 100644 --- a/src/tracer/DOME_tracer.F90 +++ b/src/tracer/DOME_tracer.F90 @@ -67,10 +67,10 @@ function register_DOME_tracer(HI, GV, param_file, CS, tr_Reg, restart_CS) type(tracer_registry_type), pointer :: tr_Reg !< A pointer to the tracer registry. type(MOM_restart_CS), intent(inout) :: restart_CS !< MOM restart control struct -! Local variables + ! Local variables character(len=80) :: name, longname -! This include declares and sets the variable "version". -#include "version_variable.h" + ! This include declares and sets the variable "version". +# include "version_variable.h" character(len=40) :: mdl = "DOME_tracer" ! This module's name. character(len=48) :: flux_units ! The units for tracer fluxes, usually ! kg(tracer) kg(water)-1 m3 s-1 or kg(tracer) s-1. @@ -81,9 +81,8 @@ function register_DOME_tracer(HI, GV, param_file, CS, tr_Reg, restart_CS) isd = HI%isd ; ied = HI%ied ; jsd = HI%jsd ; jed = HI%jed ; nz = GV%ke if (associated(CS)) then - call MOM_error(WARNING, "DOME_register_tracer called with an "// & - "associated control structure.") - return + call MOM_error(FATAL, "DOME_register_tracer called with an "// & + "associated control structure.") endif allocate(CS) diff --git a/src/tracer/ISOMIP_tracer.F90 b/src/tracer/ISOMIP_tracer.F90 index d6979f6ce4..fb2a44242f 100644 --- a/src/tracer/ISOMIP_tracer.F90 +++ b/src/tracer/ISOMIP_tracer.F90 @@ -74,8 +74,8 @@ function register_ISOMIP_tracer(HI, GV, param_file, CS, tr_Reg, restart_CS) type(MOM_restart_CS), intent(inout) :: restart_CS !< MOM restart control struct character(len=80) :: name, longname -! This include declares and sets the variable "version". -#include "version_variable.h" + ! This include declares and sets the variable "version". +# include "version_variable.h" character(len=40) :: mdl = "ISOMIP_tracer" ! This module's name. character(len=200) :: inputdir character(len=48) :: flux_units ! The units for tracer fluxes, usually @@ -86,9 +86,8 @@ function register_ISOMIP_tracer(HI, GV, param_file, CS, tr_Reg, restart_CS) isd = HI%isd ; ied = HI%ied ; jsd = HI%jsd ; jed = HI%jed ; nz = GV%ke if (associated(CS)) then - call MOM_error(WARNING, "ISOMIP_register_tracer called with an "// & + call MOM_error(FATAL, "ISOMIP_register_tracer called with an "// & "associated control structure.") - return endif allocate(CS) diff --git a/src/tracer/MOM_CFC_cap.F90 b/src/tracer/MOM_CFC_cap.F90 index fc7e78e150..2a5e3f8854 100644 --- a/src/tracer/MOM_CFC_cap.F90 +++ b/src/tracer/MOM_CFC_cap.F90 @@ -83,7 +83,7 @@ function register_CFC_cap(HI, GV, param_file, CS, tr_Reg, restart_CS) character(len=40) :: mdl = "MOM_CFC_cap" ! This module's name. character(len=200) :: inputdir ! The directory where NetCDF input files are. ! This include declares and sets the variable "version". -#include "version_variable.h" +# include "version_variable.h" real, dimension(:,:,:), pointer :: tr_ptr => NULL() character(len=200) :: dummy ! Dummy variable to store params that need to be logged here. character :: m2char @@ -93,9 +93,8 @@ function register_CFC_cap(HI, GV, param_file, CS, tr_Reg, restart_CS) isd = HI%isd ; ied = HI%ied ; jsd = HI%jsd ; jed = HI%jed ; nz = GV%ke if (associated(CS)) then - call MOM_error(WARNING, "register_CFC_cap called with an "// & - "associated control structure.") - return + call MOM_error(FATAL, "register_CFC_cap called with an "// & + "associated control structure.") endif allocate(CS) diff --git a/src/tracer/MOM_OCMIP2_CFC.F90 b/src/tracer/MOM_OCMIP2_CFC.F90 index a864ec907f..bb312b5a50 100644 --- a/src/tracer/MOM_OCMIP2_CFC.F90 +++ b/src/tracer/MOM_OCMIP2_CFC.F90 @@ -113,9 +113,8 @@ function register_OCMIP2_CFC(HI, GV, param_file, CS, tr_Reg, restart_CS) isd = HI%isd ; ied = HI%ied ; jsd = HI%jsd ; jed = HI%jed ; nz = GV%ke if (associated(CS)) then - call MOM_error(WARNING, "register_OCMIP2_CFC called with an "// & - "associated control structure.") - return + call MOM_error(FATAL, "register_OCMIP2_CFC called with an "// & + "associated control structure.") endif allocate(CS) diff --git a/src/tracer/MOM_generic_tracer.F90 b/src/tracer/MOM_generic_tracer.F90 index 6170aee602..3cbed68467 100644 --- a/src/tracer/MOM_generic_tracer.F90 +++ b/src/tracer/MOM_generic_tracer.F90 @@ -119,9 +119,8 @@ function register_MOM_generic_tracer(HI, GV, param_file, CS, tr_Reg, restart_CS) register_MOM_generic_tracer = .false. if (associated(CS)) then - call MOM_error(WARNING, "register_MOM_generic_tracer called with an "// & - "associated control structure.") - return + call MOM_error(FATAL, "register_MOM_generic_tracer called with an "// & + "associated control structure.") endif allocate(CS) diff --git a/src/tracer/RGC_tracer.F90 b/src/tracer/RGC_tracer.F90 index 2921fdd124..6801269245 100644 --- a/src/tracer/RGC_tracer.F90 +++ b/src/tracer/RGC_tracer.F90 @@ -75,8 +75,8 @@ function register_RGC_tracer(HI, GV, param_file, CS, tr_Reg, restart_CS) type(MOM_restart_CS), intent(inout) :: restart_CS !< MOM restart control struct character(len=80) :: name, longname -! This include declares and sets the variable "version". -#include "version_variable.h" + ! This include declares and sets the variable "version". +# include "version_variable.h" character(len=40) :: mdl = "RGC_tracer" ! This module's name. character(len=200) :: inputdir real, pointer :: tr_ptr(:,:,:) => NULL() @@ -85,9 +85,8 @@ function register_RGC_tracer(HI, GV, param_file, CS, tr_Reg, restart_CS) isd = HI%isd ; ied = HI%ied ; jsd = HI%jsd ; jed = HI%jed ; nz = GV%ke if (associated(CS)) then - call MOM_error(WARNING, "RGC_register_tracer called with an "// & - "associated control structure.") - return + call MOM_error(FATAL, "RGC_register_tracer called with an "// & + "associated control structure.") endif allocate(CS) diff --git a/src/tracer/advection_test_tracer.F90 b/src/tracer/advection_test_tracer.F90 index a4e53ae797..5e43ce5757 100644 --- a/src/tracer/advection_test_tracer.F90 +++ b/src/tracer/advection_test_tracer.F90 @@ -89,9 +89,8 @@ function register_advection_test_tracer(HI, GV, param_file, CS, tr_Reg, restart_ isd = HI%isd ; ied = HI%ied ; jsd = HI%jsd ; jed = HI%jed ; nz = GV%ke if (associated(CS)) then - call MOM_error(WARNING, "register_advection_test_tracer called with an "// & - "associated control structure.") - return + call MOM_error(FATAL, "register_advection_test_tracer called with an "// & + "associated control structure.") endif allocate(CS) diff --git a/src/tracer/boundary_impulse_tracer.F90 b/src/tracer/boundary_impulse_tracer.F90 index 3f8d8e7937..a7066c1ab8 100644 --- a/src/tracer/boundary_impulse_tracer.F90 +++ b/src/tracer/boundary_impulse_tracer.F90 @@ -87,9 +87,8 @@ function register_boundary_impulse_tracer(HI, GV, US, param_file, CS, tr_Reg, re isd = HI%isd ; ied = HI%ied ; jsd = HI%jsd ; jed = HI%jed ; nz = GV%ke if (associated(CS)) then - call MOM_error(WARNING, "register_boundary_impulse_tracer called with an "// & - "associated control structure.") - return + call MOM_error(FATAL, "register_boundary_impulse_tracer called with an "// & + "associated control structure.") endif allocate(CS) diff --git a/src/tracer/dye_example.F90 b/src/tracer/dye_example.F90 index a372faa518..1aae1d3367 100644 --- a/src/tracer/dye_example.F90 +++ b/src/tracer/dye_example.F90 @@ -88,9 +88,8 @@ function register_dye_tracer(HI, GV, US, param_file, CS, tr_Reg, restart_CS) isd = HI%isd ; ied = HI%ied ; jsd = HI%jsd ; jed = HI%jed ; nz = GV%ke if (associated(CS)) then - call MOM_error(WARNING, "register_dye_tracer called with an "// & - "associated control structure.") - return + call MOM_error(FATAL, "register_dye_tracer called with an "// & + "associated control structure.") endif allocate(CS) diff --git a/src/tracer/dyed_obc_tracer.F90 b/src/tracer/dyed_obc_tracer.F90 index b82bcf7fc6..285abe3785 100644 --- a/src/tracer/dyed_obc_tracer.F90 +++ b/src/tracer/dyed_obc_tracer.F90 @@ -58,10 +58,10 @@ function register_dyed_obc_tracer(HI, GV, param_file, CS, tr_Reg, restart_CS) type(tracer_registry_type), pointer :: tr_Reg !< A pointer to the tracer registry. type(MOM_restart_CS), target, intent(inout) :: restart_CS !< MOM restart control struct -! Local variables + ! Local variables character(len=80) :: name, longname -! This include declares and sets the variable "version". -#include "version_variable.h" + ! This include declares and sets the variable "version". +# include "version_variable.h" character(len=40) :: mdl = "dyed_obc_tracer" ! This module's name. character(len=200) :: inputdir character(len=48) :: flux_units ! The units for tracer fluxes, usually @@ -72,9 +72,8 @@ function register_dyed_obc_tracer(HI, GV, param_file, CS, tr_Reg, restart_CS) isd = HI%isd ; ied = HI%ied ; jsd = HI%jsd ; jed = HI%jed ; nz = GV%ke if (associated(CS)) then - call MOM_error(WARNING, "dyed_obc_register_tracer called with an "// & - "associated control structure.") - return + call MOM_error(FATAL, "dyed_obc_register_tracer called with an "// & + "associated control structure.") endif allocate(CS) diff --git a/src/tracer/ideal_age_example.F90 b/src/tracer/ideal_age_example.F90 index 66c76f0e2c..4aff3ed4bd 100644 --- a/src/tracer/ideal_age_example.F90 +++ b/src/tracer/ideal_age_example.F90 @@ -92,9 +92,8 @@ function register_ideal_age_tracer(HI, GV, param_file, CS, tr_Reg, restart_CS) isd = HI%isd ; ied = HI%ied ; jsd = HI%jsd ; jed = HI%jed ; nz = GV%ke if (associated(CS)) then - call MOM_error(WARNING, "register_ideal_age_tracer called with an "// & - "associated control structure.") - return + call MOM_error(FATAL, "register_ideal_age_tracer called with an "// & + "associated control structure.") endif allocate(CS) diff --git a/src/tracer/nw2_tracers.F90 b/src/tracer/nw2_tracers.F90 index 36885d8dc8..e9d0bd5ef7 100644 --- a/src/tracer/nw2_tracers.F90 +++ b/src/tracer/nw2_tracers.F90 @@ -55,8 +55,8 @@ logical function register_nw2_tracers(HI, GV, US, param_file, CS, tr_Reg, restar !! diffusion module type(MOM_restart_CS), target, intent(inout) :: restart_CS !< MOM restart control struct -! This include declares and sets the variable "version". -#include "version_variable.h" + ! This include declares and sets the variable "version". +# include "version_variable.h" character(len=40) :: mdl = "nw2_tracers" ! This module's name. character(len=8) :: var_name ! The variable's name. real, pointer :: tr_ptr(:,:,:) => NULL() @@ -69,7 +69,6 @@ logical function register_nw2_tracers(HI, GV, US, param_file, CS, tr_Reg, restar if (associated(CS)) then call MOM_error(FATAL, "register_nw2_tracer called with an "// & "associated control structure.") - return endif allocate(CS) diff --git a/src/tracer/oil_tracer.F90 b/src/tracer/oil_tracer.F90 index 9b7b630237..3fc2537caa 100644 --- a/src/tracer/oil_tracer.F90 +++ b/src/tracer/oil_tracer.F90 @@ -98,9 +98,8 @@ function register_oil_tracer(HI, GV, US, param_file, CS, tr_Reg, restart_CS) isd = HI%isd ; ied = HI%ied ; jsd = HI%jsd ; jed = HI%jed ; nz = GV%ke if (associated(CS)) then - call MOM_error(WARNING, "register_oil_tracer called with an "// & - "associated control structure.") - return + call MOM_error(FATAL, "register_oil_tracer called with an "// & + "associated control structure.") endif allocate(CS) diff --git a/src/tracer/pseudo_salt_tracer.F90 b/src/tracer/pseudo_salt_tracer.F90 index 9221d76f2c..843d725839 100644 --- a/src/tracer/pseudo_salt_tracer.F90 +++ b/src/tracer/pseudo_salt_tracer.F90 @@ -79,10 +79,8 @@ function register_pseudo_salt_tracer(HI, GV, param_file, CS, tr_Reg, restart_CS) isd = HI%isd ; ied = HI%ied ; jsd = HI%jsd ; jed = HI%jed ; nz = GV%ke if (associated(CS)) then - call MOM_error(WARNING, "register_pseudo_salt_tracer called with an "// & - "associated control structure.") - register_pseudo_salt_tracer = .false. - return + call MOM_error(FATAL, "register_pseudo_salt_tracer called with an "// & + "associated control structure.") endif allocate(CS) diff --git a/src/tracer/tracer_example.F90 b/src/tracer/tracer_example.F90 index 5d53c84df8..335f82a59b 100644 --- a/src/tracer/tracer_example.F90 +++ b/src/tracer/tracer_example.F90 @@ -79,9 +79,8 @@ function USER_register_tracer_example(HI, GV, param_file, CS, tr_Reg, restart_CS isd = HI%isd ; ied = HI%ied ; jsd = HI%jsd ; jed = HI%jed ; nz = GV%ke if (associated(CS)) then - call MOM_error(WARNING, "USER_register_tracer_example called with an "// & - "associated control structure.") - return + call MOM_error(FATAL, "USER_register_tracer_example called with an "// & + "associated control structure.") endif allocate(CS) From 97198d6bb6a185476583db84cfc7221768d4cc9e Mon Sep 17 00:00:00 2001 From: Marshall Ward Date: Sun, 7 Aug 2022 21:33:48 -0400 Subject: [PATCH 31/40] Point gitmodules to mom-ocean forks This patch modifies the URLs of the git modulues from the main repositories to local mirrors at @mom-ocean. Since the MOM6 sourc code is hard-coded to particular hashes, there is no benefit to linking to the most recent versions of these codes. And linking to local forks controlled by us will protect us from potential removal of the commits in these repositories. --- .gitmodules | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.gitmodules b/.gitmodules index 637f1188ed..872100b62c 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,6 +1,6 @@ [submodule "pkg/CVMix-src"] path = pkg/CVMix-src - url = https://github.com/CVMix/CVMix-src.git + url = https://github.com/mom-ocean/CVMix-src.git [submodule "pkg/GSW-Fortran"] path = pkg/GSW-Fortran - url = https://github.com/TEOS-10/GSW-Fortran.git + url = https://github.com/mom-ocean/GSW-Fortran.git From 53fdbc03ad7da7cd8b87b264b4feb6b243190f15 Mon Sep 17 00:00:00 2001 From: Robert Hallberg Date: Tue, 2 Aug 2022 18:05:20 -0400 Subject: [PATCH 32/40] +Remove inappropriate stochastics output Modified output from MOM_stochastics to stdout to only occur if the stochastics code is actively being used, and to only register stochastics related diagnostics if they are actually available. Also, initialized some stochastics arrays to 0 when they are allocated. Some indenting and white space were also changed to bring the MOM_stochastics code into closer compliance with MOM6 code standards. All answers are bitwise identical, but there will be fewer inappropriate entries in the available_diags files, and some output to stdout will only occur when appropriate and be subject to verbosity control. --- .../stochastic/MOM_stochastics.F90 | 86 ++++++++++--------- 1 file changed, 45 insertions(+), 41 deletions(-) diff --git a/src/parameterizations/stochastic/MOM_stochastics.F90 b/src/parameterizations/stochastic/MOM_stochastics.F90 index 737ed8286e..04a29019fa 100644 --- a/src/parameterizations/stochastic/MOM_stochastics.F90 +++ b/src/parameterizations/stochastic/MOM_stochastics.F90 @@ -11,12 +11,12 @@ module MOM_stochastics use MOM_diag_mediator, only : register_diag_field, diag_ctrl, time_type use MOM_grid, only : ocean_grid_type use MOM_verticalGrid, only : verticalGrid_type -use MOM_error_handler, only : MOM_error, FATAL, WARNING, is_root_pe +use MOM_error_handler, only : MOM_error, MOM_mesg, FATAL, WARNING, is_root_pe use MOM_error_handler, only : callTree_enter, callTree_leave use MOM_file_parser, only : get_param, log_version, close_param_file, param_file_type use mpp_domains_mod, only : domain2d, mpp_get_layout, mpp_get_global_domain use mpp_domains_mod, only : mpp_define_domains, mpp_get_compute_domain, mpp_get_data_domain -use MOM_domains, only : root_PE,num_PEs +use MOM_domains, only : root_PE, num_PEs use MOM_coms, only : Get_PElist use stochastic_physics, only : init_stochastic_physics_ocn, run_stochastic_physics_ocn @@ -31,8 +31,8 @@ module MOM_stochastics logical :: do_sppt !< If true, stochastically perturb the diabatic logical :: pert_epbl !< If true, then randomly perturb the KE dissipation and genration terms integer :: id_sppt_wts = -1 !< Diagnostic id for SPPT - integer :: id_epbl1_wts=-1 !< Diagnostic id for epbl generation perturbation - integer :: id_epbl2_wts=-1 !< Diagnostic id for epbl dissipation perturbation + integer :: id_epbl1_wts = -1 !< Diagnostic id for epbl generation perturbation + integer :: id_epbl2_wts = -1 !< Diagnostic id for epbl dissipation perturbation ! stochastic patterns real, allocatable :: sppt_wts(:,:) !< Random pattern for ocean SPPT !! tendencies with a number between 0 and 2 @@ -46,15 +46,16 @@ module MOM_stochastics !! This subroutine initializes the stochastics physics control structure. subroutine stochastics_init(dt, grid, GV, CS, param_file, diag, Time) - real, intent(in) :: dt !< time step [T ~> s] - type(ocean_grid_type), intent(in) :: grid !< horizontal grid information - type(verticalGrid_type), intent(in) :: GV !< vertical grid structure - type(stochastic_CS), pointer, intent(inout):: CS !< stochastic control structure + real, intent(in) :: dt !< time step [T ~> s] + type(ocean_grid_type), intent(in) :: grid !< horizontal grid information + type(verticalGrid_type), intent(in) :: GV !< vertical grid structure + type(stochastic_CS), pointer, intent(inout) :: CS !< stochastic control structure type(param_file_type), intent(in) :: param_file !< A structure to parse for run-time parameters - type(diag_ctrl), target, intent(inout) :: diag !< structure to regulate diagnostic output - type(time_type), target :: Time !< model time + type(diag_ctrl), target, intent(inout) :: diag !< structure to regulate diagnostic output + type(time_type), target :: Time !< model time + ! Local variables - integer,allocatable :: pelist(:) ! list of pes for this instance of the ocean + integer, allocatable :: pelist(:) ! list of pes for this instance of the ocean integer :: mom_comm ! list of pes for this instance of the ocean integer :: num_procs ! number of processors to pass to stochastic physics integer :: iret ! return code from stochastic physics @@ -62,8 +63,8 @@ subroutine stochastics_init(dt, grid, GV, CS, param_file, diag, Time) integer :: nx ! number of x-points including halo integer :: ny ! number of x-points including halo -! This include declares and sets the variable "version". -#include "version_variable.h" + ! This include declares and sets the variable "version". +# include "version_variable.h" character(len=40) :: mdl = "ocean_stochastics_init" ! This module's name. call callTree_enter("ocean_model_stochastic_init(), MOM_stochastics.F90") @@ -79,7 +80,7 @@ subroutine stochastics_init(dt, grid, GV, CS, param_file, diag, Time) ! Read all relevant parameters and write them to the model log. call log_version(param_file, mdl, version, "") -! get number of processors and PE list for stocasthci physics initialization + ! get number of processors and PE list for stochastic physics initialization call get_param(param_file, mdl, "DO_SPPT", CS%do_sppt, & "If true, then stochastically perturb the thermodynamic "//& "tendemcies of T,S, amd h. Amplitude and correlations are "//& @@ -91,37 +92,40 @@ subroutine stochastics_init(dt, grid, GV, CS, param_file, diag, Time) "controlled by the nam_stoch namelist in the UFS model only.", & default=.false.) if (CS%do_sppt .OR. CS%pert_epbl) then - num_procs=num_PEs() - allocate(pelist(num_procs)) - call Get_PElist(pelist,commID = mom_comm) - pe_zero=root_PE() - nx = grid%ied - grid%isd + 1 - ny = grid%jed - grid%jsd + 1 - call init_stochastic_physics_ocn(dt,grid%geoLonT,grid%geoLatT,nx,ny,GV%ke, & - CS%pert_epbl,CS%do_sppt,pe_zero,mom_comm,iret) - if (iret/=0) then - call MOM_error(FATAL, "call to init_stochastic_physics_ocn failed") - return - endif - - if (CS%do_sppt) allocate(CS%sppt_wts(grid%isd:grid%ied,grid%jsd:grid%jed)) - if (CS%pert_epbl) then - allocate(CS%epbl1_wts(grid%isd:grid%ied,grid%jsd:grid%jed)) - allocate(CS%epbl2_wts(grid%isd:grid%ied,grid%jsd:grid%jed)) - endif + num_procs = num_PEs() + allocate(pelist(num_procs)) + call Get_PElist(pelist,commID = mom_comm) + pe_zero = root_PE() + nx = grid%ied - grid%isd + 1 + ny = grid%jed - grid%jsd + 1 + call init_stochastic_physics_ocn(dt,grid%geoLonT,grid%geoLatT,nx,ny,GV%ke, & + CS%pert_epbl,CS%do_sppt,pe_zero,mom_comm,iret) + if (iret/=0) then + call MOM_error(FATAL, "call to init_stochastic_physics_ocn failed") + endif + + if (CS%do_sppt) allocate(CS%sppt_wts(grid%isd:grid%ied,grid%jsd:grid%jed), source=0.0) + if (CS%pert_epbl) then + allocate(CS%epbl1_wts(grid%isd:grid%ied,grid%jsd:grid%jed), source=0.0) + allocate(CS%epbl2_wts(grid%isd:grid%ied,grid%jsd:grid%jed), source=0.0) + endif + endif + if (CS%do_sppt) then + CS%id_sppt_wts = register_diag_field('ocean_model', 'sppt_pattern', CS%diag%axesT1, Time, & + 'random pattern for sppt', 'None') + endif + if (CS%pert_epbl) then + CS%id_epbl1_wts = register_diag_field('ocean_model', 'epbl1_wts', CS%diag%axesT1, Time, & + 'random pattern for KE generation', 'None') + CS%id_epbl2_wts = register_diag_field('ocean_model', 'epbl2_wts', CS%diag%axesT1, Time, & + 'random pattern for KE dissipation', 'None') endif - CS%id_sppt_wts = register_diag_field('ocean_model', 'sppt_pattern', CS%diag%axesT1, Time, & - 'random pattern for sppt', 'None') - CS%id_epbl1_wts = register_diag_field('ocean_model', 'epbl1_wts', CS%diag%axesT1, Time, & - 'random pattern for KE generation', 'None') - CS%id_epbl2_wts = register_diag_field('ocean_model', 'epbl2_wts', CS%diag%axesT1, Time, & - 'random pattern for KE dissipation', 'None') - if (is_root_pe()) & - write(*,'(/12x,a/)') '=== COMPLETED MOM STOCHASTIC INITIALIZATION =====' + if (CS%do_sppt .OR. CS%pert_epbl) & + call MOM_mesg(' === COMPLETED MOM STOCHASTIC INITIALIZATION =====') call callTree_leave("ocean_model_init(") - return + end subroutine stochastics_init !> update_ocean_model uses the forcing in Ice_ocean_boundary to advance the From a3697c6a7c80b8d8a62b7baa6c04a6f070e9e190 Mon Sep 17 00:00:00 2001 From: Gustavo Marques Date: Sun, 20 Dec 2020 18:10:38 -0700 Subject: [PATCH 33/40] Add option to read EKE via file for MEKE Eddy kinetic energy can now be read in from a time-varying field in a file. The interpolated EKE is used in place of MEKE's PDE based approximation. All the other options that are used to estimate viscosity, GM coefficient, and tracer eddy diffusivity are still valid. Note: this feature has not been extensively tested. --- src/core/MOM.F90 | 2 +- src/parameterizations/lateral/MOM_MEKE.F90 | 355 ++++++++++++--------- 2 files changed, 201 insertions(+), 156 deletions(-) diff --git a/src/core/MOM.F90 b/src/core/MOM.F90 index 8fc6600b69..4cb438c2c3 100644 --- a/src/core/MOM.F90 +++ b/src/core/MOM.F90 @@ -1215,7 +1215,7 @@ subroutine step_MOM_dynamics(forces, p_surf_begin, p_surf_end, dt, dt_thermo, & call diag_update_remap_grids(CS%diag) if (CS%useMEKE) call step_forward_MEKE(CS%MEKE, h, CS%VarMix%SN_u, CS%VarMix%SN_v, & - CS%visc, dt, G, GV, US, CS%MEKE_CSp, CS%uhtr, CS%vhtr) + CS%visc, dt, G, GV, US, CS%MEKE_CSp, CS%uhtr, CS%vhtr, Time_local) call disable_averaging(CS%diag) ! Advance the dynamics time by dt. diff --git a/src/parameterizations/lateral/MOM_MEKE.F90 b/src/parameterizations/lateral/MOM_MEKE.F90 index 2661251766..3aa700d05c 100644 --- a/src/parameterizations/lateral/MOM_MEKE.F90 +++ b/src/parameterizations/lateral/MOM_MEKE.F90 @@ -14,12 +14,19 @@ module MOM_MEKE use MOM_file_parser, only : read_param, get_param, log_version, param_file_type use MOM_grid, only : ocean_grid_type use MOM_hor_index, only : hor_index_type +<<<<<<< HEAD +======= +use MOM_io, only : vardesc, var_desc, slasher +>>>>>>> 0986bc3ca (Add option to read EKE via file) use MOM_restart, only : MOM_restart_CS, register_restart_field, query_initialized use MOM_unit_scaling, only : unit_scale_type use MOM_variables, only : vertvisc_type use MOM_verticalGrid, only : verticalGrid_type use MOM_MEKE_types, only : MEKE_type +use time_interp_external_mod, only : init_external_field, time_interp_external +use time_interp_external_mod, only : time_interp_external_init + implicit none ; private #include @@ -43,6 +50,7 @@ module MOM_MEKE logical :: visc_drag !< If true use the vertvisc_type to calculate bottom drag. logical :: MEKE_GEOMETRIC !< If true, uses the GM coefficient formulation from the GEOMETRIC !! framework (Marshall et al., 2012) + logical :: MEKE_from_file !< If true, reads EKE from a netCDF file real :: MEKE_GEOMETRIC_alpha !< The nondimensional coefficient governing the efficiency of the !! GEOMETRIC thickness diffusion. logical :: MEKE_equilibrium_alt !< If true, use an alternative calculation for the @@ -90,7 +98,9 @@ module MOM_MEKE logical :: kh_flux_enabled !< If true, lateral diffusive MEKE flux is enabled. logical :: initialize !< If True, invokes a steady state solver to calculate MEKE. logical :: debug !< If true, write out checksums of data for debugging - + character(len=200) :: inputdir !< directory where NetCDF input files are + character(len=200) :: eke_file !< filename for eke data + character(len=30) :: eke_var_name !< name of variable in ncfile type(diag_ctrl), pointer :: diag => NULL() !< A type that regulates diagnostics output !>@{ Diagnostic handles integer :: id_MEKE = -1, id_Ue = -1, id_Kh = -1, id_src = -1 @@ -101,7 +111,7 @@ module MOM_MEKE integer :: id_Lrhines = -1, id_Leady = -1 integer :: id_MEKE_equilibrium = -1 !>@} - + integer :: id_eke = -1 ! Infrastructure integer :: id_clock_pass !< Clock for group pass calls type(group_pass_type) :: pass_MEKE !< Group halo pass handle for MEKE%MEKE and maybe MEKE%Kh_diff @@ -112,8 +122,8 @@ module MOM_MEKE !> Integrates forward-in-time the MEKE eddy energy equation. !! See \ref section_MEKE_equations. -subroutine step_forward_MEKE(MEKE, h, SN_u, SN_v, visc, dt, G, GV, US, CS, hu, hv) - type(MEKE_type), intent(inout) :: MEKE !< MEKE fields +subroutine step_forward_MEKE(MEKE, h, SN_u, SN_v, visc, dt, G, GV, US, CS, hu, hv, Time) + type(MEKE_type), pointer :: MEKE !< MEKE data. type(ocean_grid_type), intent(inout) :: G !< Ocean grid. type(verticalGrid_type), intent(in) :: GV !< Ocean vertical grid structure. type(unit_scale_type), intent(in) :: US !< A dimensional unit scaling type @@ -122,12 +132,14 @@ subroutine step_forward_MEKE(MEKE, h, SN_u, SN_v, visc, dt, G, GV, US, CS, hu, h real, dimension(SZI_(G),SZJB_(G)), intent(in) :: SN_v !< Eady growth rate at v-points [T-1 ~> s-1]. type(vertvisc_type), intent(in) :: visc !< The vertical viscosity type. real, intent(in) :: dt !< Model(baroclinic) time-step [T ~> s]. - type(MEKE_CS), intent(inout) :: CS !< MEKE control structure. - real, dimension(SZIB_(G),SZJ_(G),SZK_(GV)), intent(in) :: hu !< Accumlated zonal mass flux [H L2 ~> m3 or kg] - real, dimension(SZI_(G),SZJB_(G),SZK_(GV)), intent(in) :: hv !< Accumlated meridional mass flux [H L2 ~> m3 or kg] + type(MEKE_CS), pointer :: CS !< MEKE control structure. + real, dimension(SZIB_(G),SZJ_(G),SZK_(G)), intent(in) :: hu !< Accumlated zonal mass flux [H L2 ~> m3 or kg]. + real, dimension(SZI_(G),SZJB_(G),SZK_(G)), intent(in) :: hv !< Accumlated meridional mass flux [H L2 ~> m3 or kg] + type(time_type), intent(in) :: Time !< The time used for interpolating EKE ! Local variables real, dimension(SZI_(G),SZJ_(G)) :: & + data_eke, & ! EKE from file mass, & ! The total mass of the water column [R Z ~> kg m-2]. I_mass, & ! The inverse of mass [R-1 Z-1 ~> m2 kg-1]. depth_tot, & ! The depth of the water column [Z ~> m]. @@ -191,6 +203,8 @@ subroutine step_forward_MEKE(MEKE, h, SN_u, SN_v, visc, dt, G, GV, US, CS, hu, h return endif + if (.not. CS%MEKE_from_file) then + if (CS%debug) then if (allocated(MEKE%mom_src)) & call hchksum(MEKE%mom_src, 'MEKE mom_src', G%HI, scale=US%RZ3_T3_to_W_m2*US%L_to_Z**2) @@ -569,102 +583,109 @@ subroutine step_forward_MEKE(MEKE, h, SN_u, SN_v, visc, dt, G, GV, US, CS, hu, h call hchksum(MEKE%MEKE, "MEKE post-update MEKE", G%HI, haloshift=0, scale=US%L_T_to_m_s**2) endif - call cpu_clock_begin(CS%id_clock_pass) - call do_group_pass(CS%pass_MEKE, G%Domain) - call cpu_clock_end(CS%id_clock_pass) + else ! read MEKE from file + call time_interp_external(CS%id_eke,Time,data_eke) + do j=js,je ; do i=is,ie + MEKE%MEKE(i,j) = data_eke(i,j) * G%mask2dT(i,j) + enddo; enddo + endif - ! Calculate diffusivity for main model to use - if (CS%MEKE_KhCoeff>0.) then - if (.not.CS%MEKE_GEOMETRIC) then - if (CS%use_old_lscale) then - if (CS%Rd_as_max_scale) then - !$OMP parallel do default(shared) - do j=js,je ; do i=is,ie - MEKE%Kh(i,j) = (CS%MEKE_KhCoeff * & - sqrt(2.*max(0.,barotrFac2(i,j)*MEKE%MEKE(i,j))*G%areaT(i,j)) ) * & - min(MEKE%Rd_dx_h(i,j), 1.0) - enddo ; enddo - else - !$OMP parallel do default(shared) - do j=js,je ; do i=is,ie - MEKE%Kh(i,j) = CS%MEKE_KhCoeff * & - sqrt(2.*max(0., barotrFac2(i,j)*MEKE%MEKE(i,j))*G%areaT(i,j)) - enddo ; enddo - endif + call cpu_clock_begin(CS%id_clock_pass) + call do_group_pass(CS%pass_MEKE, G%Domain) + call cpu_clock_end(CS%id_clock_pass) + + ! Calculate diffusivity for main model to use + if (CS%MEKE_KhCoeff>0.) then + if (.not.CS%MEKE_GEOMETRIC) then + if (CS%use_old_lscale) then + if (CS%Rd_as_max_scale) then + !$OMP parallel do default(shared) + do j=js,je ; do i=is,ie + MEKE%Kh(i,j) = (CS%MEKE_KhCoeff * & + sqrt(2.*max(0.,barotrFac2(i,j)*MEKE%MEKE(i,j))*G%areaT(i,j)) ) * & + min(MEKE%Rd_dx_h(i,j), 1.0) + enddo ; enddo else !$OMP parallel do default(shared) do j=js,je ; do i=is,ie MEKE%Kh(i,j) = CS%MEKE_KhCoeff * & - sqrt(2.*max(0., barotrFac2(i,j)*MEKE%MEKE(i,j))) * LmixScale(i,j) + sqrt(2.*max(0., barotrFac2(i,j)*MEKE%MEKE(i,j))*G%areaT(i,j)) enddo ; enddo endif + else + !$OMP parallel do default(shared) + do j=js,je ; do i=is,ie + MEKE%Kh(i,j) = CS%MEKE_KhCoeff * & + sqrt(2.*max(0., barotrFac2(i,j)*MEKE%MEKE(i,j))) * LmixScale(i,j) + enddo ; enddo endif endif + endif - ! Calculate viscosity for the main model to use - if (CS%viscosity_coeff_Ku /=0.) then - do j=js,je ; do i=is,ie - MEKE%Ku(i,j) = CS%viscosity_coeff_Ku * sqrt(2.*max(0.,MEKE%MEKE(i,j))) * LmixScale(i,j) - enddo ; enddo - endif + ! Calculate viscosity for the main model to use + if (CS%viscosity_coeff_Ku /=0.) then + do j=js,je ; do i=is,ie + MEKE%Ku(i,j) = CS%viscosity_coeff_Ku * sqrt(2.*max(0.,MEKE%MEKE(i,j))) * LmixScale(i,j) + enddo ; enddo + endif - if (CS%viscosity_coeff_Au /=0.) then - do j=js,je ; do i=is,ie - MEKE%Au(i,j) = CS%viscosity_coeff_Au * sqrt(2.*max(0.,MEKE%MEKE(i,j))) * LmixScale(i,j)**3 - enddo ; enddo - endif + if (CS%viscosity_coeff_Au /=0.) then + do j=js,je ; do i=is,ie + MEKE%Au(i,j) = CS%viscosity_coeff_Au * sqrt(2.*max(0.,MEKE%MEKE(i,j))) * LmixScale(i,j)**3 + enddo ; enddo + endif - if (allocated(MEKE%Kh) .or. allocated(MEKE%Ku) .or. allocated(MEKE%Au)) then - call cpu_clock_begin(CS%id_clock_pass) - call do_group_pass(CS%pass_Kh, G%Domain) - call cpu_clock_end(CS%id_clock_pass) - endif + if (associated(MEKE%Kh) .or. associated(MEKE%Ku) .or. associated(MEKE%Au)) then + call cpu_clock_begin(CS%id_clock_pass) + call do_group_pass(CS%pass_Kh, G%Domain) + call cpu_clock_end(CS%id_clock_pass) + endif - ! Offer fields for averaging. - if (any([CS%id_Ue, CS%id_Ub, CS%id_Ut] > 0)) & - tmp(:,:) = 0. - if (CS%id_MEKE>0) call post_data(CS%id_MEKE, MEKE%MEKE, CS%diag) - if (CS%id_Ue>0) then - do j=js,je ; do i=is,ie - tmp(i,j) = sqrt(max(0., 2. * MEKE%MEKE(i,j))) - enddo ; enddo - call post_data(CS%id_Ue, tmp, CS%diag) - endif - if (CS%id_Ub>0) then - do j=js,je ; do i=is,ie - tmp(i,j) = sqrt(max(0., 2. * MEKE%MEKE(i,j) * bottomFac2(i,j))) - enddo ; enddo - call post_data(CS%id_Ub, tmp, CS%diag) - endif - if (CS%id_Ut>0) then - do j=js,je ; do i=is,ie - tmp(i,j) = sqrt(max(0., 2. * MEKE%MEKE(i,j) * barotrFac2(i,j))) - enddo ; enddo - call post_data(CS%id_Ut, tmp, CS%diag) - endif - if (CS%id_Kh>0) call post_data(CS%id_Kh, MEKE%Kh, CS%diag) - if (CS%id_Ku>0) call post_data(CS%id_Ku, MEKE%Ku, CS%diag) - if (CS%id_Au>0) call post_data(CS%id_Au, MEKE%Au, CS%diag) - if (CS%id_KhMEKE_u>0) call post_data(CS%id_KhMEKE_u, Kh_u, CS%diag) - if (CS%id_KhMEKE_v>0) call post_data(CS%id_KhMEKE_v, Kh_v, CS%diag) - if (CS%id_src>0) call post_data(CS%id_src, src, CS%diag) - if (CS%id_decay>0) call post_data(CS%id_decay, MEKE_decay, CS%diag) - if (CS%id_GM_src>0) call post_data(CS%id_GM_src, MEKE%GM_src, CS%diag) - if (CS%id_mom_src>0) call post_data(CS%id_mom_src, MEKE%mom_src, CS%diag) - if (CS%id_GME_snk>0) call post_data(CS%id_GME_snk, MEKE%GME_snk, CS%diag) - if (CS%id_Le>0) call post_data(CS%id_Le, LmixScale, CS%diag) - if (CS%id_gamma_b>0) then - do j=js,je ; do i=is,ie - bottomFac2(i,j) = sqrt(bottomFac2(i,j)) - enddo ; enddo - call post_data(CS%id_gamma_b, bottomFac2, CS%diag) - endif - if (CS%id_gamma_t>0) then - do j=js,je ; do i=is,ie - barotrFac2(i,j) = sqrt(barotrFac2(i,j)) - enddo ; enddo - call post_data(CS%id_gamma_t, barotrFac2, CS%diag) - endif + ! Offer fields for averaging. + if (any([CS%id_Ue, CS%id_Ub, CS%id_Ut] > 0)) & + tmp(:,:) = 0. + if (CS%id_MEKE>0) call post_data(CS%id_MEKE, MEKE%MEKE, CS%diag) + if (CS%id_Ue>0) then + do j=js,je ; do i=is,ie + tmp(i,j) = sqrt(max(0., 2. * MEKE%MEKE(i,j))) + enddo ; enddo + call post_data(CS%id_Ue, tmp, CS%diag) + endif + if (CS%id_Ub>0) then + do j=js,je ; do i=is,ie + tmp(i,j) = sqrt(max(0., 2. * MEKE%MEKE(i,j) * bottomFac2(i,j))) + enddo ; enddo + call post_data(CS%id_Ub, tmp, CS%diag) + endif + if (CS%id_Ut>0) then + do j=js,je ; do i=is,ie + tmp(i,j) = sqrt(max(0., 2. * MEKE%MEKE(i,j) * barotrFac2(i,j))) + enddo ; enddo + call post_data(CS%id_Ut, tmp, CS%diag) + endif + if (CS%id_Kh>0) call post_data(CS%id_Kh, MEKE%Kh, CS%diag) + if (CS%id_Ku>0) call post_data(CS%id_Ku, MEKE%Ku, CS%diag) + if (CS%id_Au>0) call post_data(CS%id_Au, MEKE%Au, CS%diag) + if (CS%id_KhMEKE_u>0) call post_data(CS%id_KhMEKE_u, Kh_u, CS%diag) + if (CS%id_KhMEKE_v>0) call post_data(CS%id_KhMEKE_v, Kh_v, CS%diag) + if (CS%id_src>0) call post_data(CS%id_src, src, CS%diag) + if (CS%id_decay>0) call post_data(CS%id_decay, MEKE_decay, CS%diag) + if (CS%id_GM_src>0) call post_data(CS%id_GM_src, MEKE%GM_src, CS%diag) + if (CS%id_mom_src>0) call post_data(CS%id_mom_src, MEKE%mom_src, CS%diag) + if (CS%id_GME_snk>0) call post_data(CS%id_GME_snk, MEKE%GME_snk, CS%diag) + if (CS%id_Le>0) call post_data(CS%id_Le, LmixScale, CS%diag) + if (CS%id_gamma_b>0) then + do j=js,je ; do i=is,ie + bottomFac2(i,j) = sqrt(bottomFac2(i,j)) + enddo ; enddo + call post_data(CS%id_gamma_b, bottomFac2, CS%diag) + endif + if (CS%id_gamma_t>0) then + do j=js,je ; do i=is,ie + barotrFac2(i,j) = sqrt(barotrFac2(i,j)) + enddo ; enddo + call post_data(CS%id_gamma_t, barotrFac2, CS%diag) + endif end subroutine step_forward_MEKE @@ -1033,6 +1054,7 @@ logical function MEKE_init(Time, G, US, param_file, diag, CS, MEKE, restart_CS) ! run to the representation in a restart file. real :: MEKE_restoring_timescale ! The timescale used to nudge MEKE toward its equilibrium value. real :: cdrag ! The default bottom drag coefficient [nondim]. + character(len=200) :: eke_file integer :: i, j, is, ie, js, je, isd, ied, jsd, jed logical :: laplacian, biharmonic, coldStart ! This include declares and sets the variable "version". @@ -1054,72 +1076,95 @@ logical function MEKE_init(Time, G, US, param_file, diag, CS, MEKE, restart_CS) call MOM_mesg("MEKE_init: reading parameters ", 5) - ! Read all relevant parameters and write them to the model log. - call get_param(param_file, mdl, "MEKE_DAMPING", CS%MEKE_damping, & - "The local depth-independent MEKE dissipation rate.", & - units="s-1", default=0.0, scale=US%T_to_s) - call get_param(param_file, mdl, "MEKE_CD_SCALE", CS%MEKE_Cd_scale, & - "The ratio of the bottom eddy velocity to the column mean "//& - "eddy velocity, i.e. sqrt(2*MEKE). This should be less than 1 "//& - "to account for the surface intensification of MEKE.", & - units="nondim", default=0.) - call get_param(param_file, mdl, "MEKE_CB", CS%MEKE_Cb, & - "A coefficient in the expression for the ratio of bottom projected "//& - "eddy energy and mean column energy (see Jansen et al. 2015).",& - units="nondim", default=25.) - call get_param(param_file, mdl, "MEKE_MIN_GAMMA2", CS%MEKE_min_gamma, & - "The minimum allowed value of gamma_b^2.",& - units="nondim", default=0.0001) - call get_param(param_file, mdl, "MEKE_CT", CS%MEKE_Ct, & - "A coefficient in the expression for the ratio of barotropic "//& - "eddy energy and mean column energy (see Jansen et al. 2015).",& - units="nondim", default=50.) - call get_param(param_file, mdl, "MEKE_GMCOEFF", CS%MEKE_GMcoeff, & - "The efficiency of the conversion of potential energy "//& - "into MEKE by the thickness mixing parameterization. "//& - "If MEKE_GMCOEFF is negative, this conversion is not "//& - "used or calculated.", units="nondim", default=-1.0) - call get_param(param_file, mdl, "MEKE_GEOMETRIC", CS%MEKE_GEOMETRIC, & - "If MEKE_GEOMETRIC is true, uses the GM coefficient formulation "//& - "from the GEOMETRIC framework (Marshall et al., 2012).", default=.false.) - call get_param(param_file, mdl, "MEKE_GEOMETRIC_ALPHA", CS%MEKE_GEOMETRIC_alpha, & - "The nondimensional coefficient governing the efficiency of the GEOMETRIC \n"//& - "thickness diffusion.", units="nondim", default=0.05) - call get_param(param_file, mdl, "MEKE_EQUILIBRIUM_ALT", CS%MEKE_equilibrium_alt, & - "If true, use an alternative formula for computing the (equilibrium)"//& - "initial value of MEKE.", default=.false.) - call get_param(param_file, mdl, "MEKE_EQUILIBRIUM_RESTORING", CS%MEKE_equilibrium_restoring, & - "If true, restore MEKE back to its equilibrium value, which is calculated at "//& - "each time step.", default=.false.) - if (CS%MEKE_equilibrium_restoring) then - call get_param(param_file, mdl, "MEKE_RESTORING_TIMESCALE", MEKE_restoring_timescale, & - "The timescale used to nudge MEKE toward its equilibrium value.", units="s", & - default=1e6, scale=US%s_to_T) - CS%MEKE_restoring_rate = 1.0 / MEKE_restoring_timescale - endif + call get_param(param_file, mdl, "MEKE_FROM_FILE", CS%MEKE_from_file, & + "If true, reads EKE from a netCDF file.", default=.false.) + if (CS%MEKE_from_file) then + call time_interp_external_init + call get_param(param_file, mdl, "EKE_FILE", CS%eke_file, & + "A file in which to find the surface salinity to use for restoring.", & + default="eke_file.nc") + call get_param(param_file, mdl, "EKE_VARIABLE", CS%eke_var_name, & + "The name of the surface salinity variable to read from "//& + "SALT_RESTORE_FILE for restoring salinity.", & + default="eke") + call get_param(param_file, mdl, "INPUTDIR", CS%inputdir, & + "The directory in which all input files are found.", & + default=".", do_not_log=.true.) + CS%inputdir = slasher(CS%inputdir) + + eke_file = trim(CS%inputdir) // trim(CS%eke_file) + CS%id_eke = init_external_field(eke_file, CS%eke_var_name, domain=G%Domain%mpp_domain) + + else + ! Read all relevant parameters and write them to the model log. + call get_param(param_file, mdl, "MEKE_DAMPING", CS%MEKE_damping, & + "The local depth-independent MEKE dissipation rate.", & + units="s-1", default=0.0, scale=US%T_to_s) + call get_param(param_file, mdl, "MEKE_CD_SCALE", CS%MEKE_Cd_scale, & + "The ratio of the bottom eddy velocity to the column mean "//& + "eddy velocity, i.e. sqrt(2*MEKE). This should be less than 1 "//& + "to account for the surface intensification of MEKE.", & + units="nondim", default=0.) + call get_param(param_file, mdl, "MEKE_CB", CS%MEKE_Cb, & + "A coefficient in the expression for the ratio of bottom projected "//& + "eddy energy and mean column energy (see Jansen et al. 2015).",& + units="nondim", default=25.) + call get_param(param_file, mdl, "MEKE_MIN_GAMMA2", CS%MEKE_min_gamma, & + "The minimum allowed value of gamma_b^2.",& + units="nondim", default=0.0001) + call get_param(param_file, mdl, "MEKE_CT", CS%MEKE_Ct, & + "A coefficient in the expression for the ratio of barotropic "//& + "eddy energy and mean column energy (see Jansen et al. 2015).",& + units="nondim", default=50.) + call get_param(param_file, mdl, "MEKE_GMCOEFF", CS%MEKE_GMcoeff, & + "The efficiency of the conversion of potential energy "//& + "into MEKE by the thickness mixing parameterization. "//& + "If MEKE_GMCOEFF is negative, this conversion is not "//& + "used or calculated.", units="nondim", default=-1.0) + call get_param(param_file, mdl, "MEKE_GEOMETRIC", CS%MEKE_GEOMETRIC, & + "If MEKE_GEOMETRIC is true, uses the GM coefficient formulation "//& + "from the GEOMETRIC framework (Marshall et al., 2012).", default=.false.) + call get_param(param_file, mdl, "MEKE_GEOMETRIC_ALPHA", CS%MEKE_GEOMETRIC_alpha, & + "The nondimensional coefficient governing the efficiency of the GEOMETRIC \n"//& + "thickness diffusion.", units="nondim", default=0.05) + call get_param(param_file, mdl, "MEKE_EQUILIBRIUM_ALT", CS%MEKE_equilibrium_alt, & + "If true, use an alternative formula for computing the (equilibrium)"//& + "initial value of MEKE.", default=.false.) + call get_param(param_file, mdl, "MEKE_EQUILIBRIUM_RESTORING", CS%MEKE_equilibrium_restoring, & + "If true, restore MEKE back to its equilibrium value, which is calculated at "//& + "each time step.", default=.false.) + if (CS%MEKE_equilibrium_restoring) then + call get_param(param_file, mdl, "MEKE_RESTORING_TIMESCALE", MEKE_restoring_timescale, & + "The timescale used to nudge MEKE toward its equilibrium value.", units="s", & + default=1e6, scale=US%T_to_s) + CS%MEKE_restoring_rate = 1.0 / MEKE_restoring_timescale + endif + + call get_param(param_file, mdl, "MEKE_FRCOEFF", CS%MEKE_FrCoeff, & + "The efficiency of the conversion of mean energy into "//& + "MEKE. If MEKE_FRCOEFF is negative, this conversion "//& + "is not used or calculated.", units="nondim", default=-1.0) + call get_param(param_file, mdl, "MEKE_GMECOEFF", CS%MEKE_GMECoeff, & + "The efficiency of the conversion of MEKE into mean energy "//& + "by GME. If MEKE_GMECOEFF is negative, this conversion "//& + "is not used or calculated.", units="nondim", default=-1.0) + call get_param(param_file, mdl, "MEKE_BGSRC", CS%MEKE_BGsrc, & + "A background energy source for MEKE.", units="W kg-1", & + default=0.0, scale=US%m_to_L**2*US%T_to_s**3) + call get_param(param_file, mdl, "MEKE_KH", CS%MEKE_Kh, & + "A background lateral diffusivity of MEKE. "//& + "Use a negative value to not apply lateral diffusion to MEKE.", & + units="m2 s-1", default=-1.0, scale=US%m_to_L**2*US%T_to_s) + call get_param(param_file, mdl, "MEKE_K4", CS%MEKE_K4, & + "A lateral bi-harmonic diffusivity of MEKE. "//& + "Use a negative value to not apply bi-harmonic diffusion to MEKE.", & + units="m4 s-1", default=-1.0, scale=US%m_to_L**4*US%T_to_s) + call get_param(param_file, mdl, "MEKE_DTSCALE", CS%MEKE_dtScale, & + "A scaling factor to accelerate the time evolution of MEKE.", & + units="nondim", default=1.0) + endif ! MEKE_from_file + ! GMM, make sure all params used to calculated MEKE are within the above if - call get_param(param_file, mdl, "MEKE_FRCOEFF", CS%MEKE_FrCoeff, & - "The efficiency of the conversion of mean energy into "//& - "MEKE. If MEKE_FRCOEFF is negative, this conversion "//& - "is not used or calculated.", units="nondim", default=-1.0) - call get_param(param_file, mdl, "MEKE_GMECOEFF", CS%MEKE_GMECoeff, & - "The efficiency of the conversion of MEKE into mean energy "//& - "by GME. If MEKE_GMECOEFF is negative, this conversion "//& - "is not used or calculated.", units="nondim", default=-1.0) - call get_param(param_file, mdl, "MEKE_BGSRC", CS%MEKE_BGsrc, & - "A background energy source for MEKE.", units="W kg-1", & - default=0.0, scale=US%m_to_L**2*US%T_to_s**3) - call get_param(param_file, mdl, "MEKE_KH", CS%MEKE_Kh, & - "A background lateral diffusivity of MEKE. "//& - "Use a negative value to not apply lateral diffusion to MEKE.", & - units="m2 s-1", default=-1.0, scale=US%m_to_L**2*US%T_to_s) - call get_param(param_file, mdl, "MEKE_K4", CS%MEKE_K4, & - "A lateral bi-harmonic diffusivity of MEKE. "//& - "Use a negative value to not apply bi-harmonic diffusion to MEKE.", & - units="m4 s-1", default=-1.0, scale=US%m_to_L**4*US%T_to_s) - call get_param(param_file, mdl, "MEKE_DTSCALE", CS%MEKE_dtScale, & - "A scaling factor to accelerate the time evolution of MEKE.", & - units="nondim", default=1.0) call get_param(param_file, mdl, "MEKE_KHCOEFF", CS%MEKE_KhCoeff, & "A scaling factor in the expression for eddy diffusivity "//& "which is otherwise proportional to the MEKE velocity- "//& From 3872fc9186f8ac77b9542af69250eb86e8ba821d Mon Sep 17 00:00:00 2001 From: Andrew Shao Date: Wed, 6 Jul 2022 17:28:15 -0500 Subject: [PATCH 34/40] Reimplement inference of EKE via SmartRedis Significant changes to MOM_MEKE.F90 made it difficult to direcly merge the initial implementation of SmartRedis with MOM6. A refactor of the implementation was also done in consultation with @adcroft to isolate the SmartRedis implementations of the code which includes creating two stub modules for the - SmartRedis client module with associated methods - MEKE-related module for inferring EKE using a neural network via SmartRedis The substantive code is now hosted for users interested in replicating the work at https://github.com/CrayLabs/MOM6-smartredis --- .../smartredis/MOM_smartredis_MEKE.F90 | 61 ++ .../external/smartredis/smartredis_client.F90 | 925 ++++++++++++++++++ src/core/MOM.F90 | 39 +- src/framework/MOM_smartredis.F90 | 78 ++ src/parameterizations/lateral/MOM_MEKE.F90 | 141 ++- 5 files changed, 1180 insertions(+), 64 deletions(-) create mode 100644 config_src/external/smartredis/MOM_smartredis_MEKE.F90 create mode 100644 config_src/external/smartredis/smartredis_client.F90 create mode 100644 src/framework/MOM_smartredis.F90 diff --git a/config_src/external/smartredis/MOM_smartredis_MEKE.F90 b/config_src/external/smartredis/MOM_smartredis_MEKE.F90 new file mode 100644 index 0000000000..0928fc98c2 --- /dev/null +++ b/config_src/external/smartredis/MOM_smartredis_MEKE.F90 @@ -0,0 +1,61 @@ +!> Contains routines that contain dummy routines for the smart +module MOM_smartredis_meke + +use MOM_diag_mediator, only : diag_ctrl, time_type +use MOM_error_handler, only : MOM_error, FATAL, WARNING, is_root_pe +use MOM_grid, only : ocean_grid_type +use MOM_file_parser, only : param_file_type +use MOM_smartredis, only : smartredis_CS_type +use MOM_unit_scaling, only : unit_scale_type +use MOM_variables, only : thermo_var_ptrs +use MOM_verticalGrid, only : verticalGrid_type + +implicit none; private + +#include + +public smartredis_meke_init, infer_meke + +type, public :: smartredis_meke_CS_type; private + +end type smartredis_meke_CS_type + +contains + +!> Initializer for the SmartRedis MEKE module that uses ML to predict eddy kinetic energy +subroutine smartredis_meke_init(diag, G, US, Time, param_file, smartredis_CS, CS) + type(diag_ctrl), target, intent(inout) :: diag !< Diagnostics structure. + type(ocean_grid_type), intent(inout) :: G !< The ocean's grid structure. + type(unit_scale_type), intent(in) :: US !< A dimensional unit scaling type + type(time_type), intent(in) :: Time !< The current model time. + type(param_file_type), intent(in) :: param_file !< Parameter file parser structure. + type(smartredis_CS_type), target, intent(in) :: smartredis_CS !< SmartRedis client + type(smartredis_meke_CS_type), intent(inout) :: CS !< Control structure for this module + + call MOM_error(FATAL,"smartredis_meke_init was compiled using the dummy module. Recompile"//& + "with source code from https://github.com/CrayLabs/MOM6-smartredis") +end subroutine smartredis_meke_init + +!> Use the SmartRedis client to call a machine learning to predict eddy kinetic energy +subroutine infer_meke(G, GV, US, CS, Time, MEKE, Rd_dx_h, u, v, tv, h, dt) + type(ocean_grid_type), intent(inout) :: G !< Ocean grid + type(verticalGrid_type), intent(in) :: GV !< Ocean vertical grid structure + type(unit_scale_type), intent(in) :: US !< A dimensional unit scaling type + type(time_type), intent(in) :: Time !< The current model time. + type(smartredis_meke_CS_type), intent(in) :: CS !< Control structure for inferring MEKE + !! using SmartRedis + real, dimension(SZI_(G),SZJ_(G)), intent( out) :: MEKE !< Vertically averaged eddy kinetic energy [L2 T-2 ~> m2 s-2] + real, dimension(SZI_(G),SZJ_(G)), intent(in ) :: Rd_dx_h !< Rossby radius of deformation over + !! the grid length scale [nondim] + real, dimension(SZIB_(G),SZJ_(G),SZK_(G)), intent(in) :: u !< Zonal velocity [L T-1 ~> m s-1] + real, dimension(SZI_(G),SZJB_(G),SZK_(G)), intent(in) :: v !< Meridional velocity [L T-1 ~> m s-1] + type(thermo_var_ptrs), intent(in) :: tv !< Type containing thermodynamic variables + real, dimension(SZI_(G),SZJ_(G),SZK_(GV)), intent(in) :: h !< Layer thickness [H ~> m or kg m-2]. + real, intent(in) :: dt !< Model(baroclinic) time-step [T ~> s]. + + call MOM_error(FATAL,"infer_meke was compiled using the dummy module. Recompile"//& + "with source code from https://github.com/CrayLabs/MOM6-smartredis") + +end subroutine infer_meke + +end module MOM_smartredis_meke diff --git a/config_src/external/smartredis/smartredis_client.F90 b/config_src/external/smartredis/smartredis_client.F90 new file mode 100644 index 0000000000..30bb991b00 --- /dev/null +++ b/config_src/external/smartredis/smartredis_client.F90 @@ -0,0 +1,925 @@ +module smartredis_client + + use iso_c_binding, only : c_ptr, c_bool, c_null_ptr, c_char, c_int + use iso_c_binding, only : c_int8_t, c_int16_t, c_int32_t, c_int64_t, c_float, c_double, c_size_t + use iso_c_binding, only : c_loc, c_f_pointer + + use, intrinsic :: iso_fortran_env, only: stderr => error_unit + + implicit none; private + + integer, parameter, public :: enum_kind = c_int + + !> Dummy type for dataset + type, public :: dataset_type + private + end type dataset_type + + !> Stores all data and methods associated with the SmartRedis client that is used to communicate with the database + type, public :: client_type + private + + logical(kind=c_bool) :: cluster = .false. !< True if a database cluster is being used + type(c_ptr) :: client_ptr = c_null_ptr !< Pointer to the initialized SmartRedisClient + logical :: is_initialized = .false. !< True if client is initialized + contains + + ! Public procedures + !> Puts a tensor into the database (overloaded) + generic :: put_tensor => put_tensor_i8, put_tensor_i16, put_tensor_i32, put_tensor_i64, & + put_tensor_float, put_tensor_double + !> Retrieve the tensor in the database into already allocated memory (overloaded) + generic :: unpack_tensor => unpack_tensor_i8, unpack_tensor_i16, unpack_tensor_i32, unpack_tensor_i64, & + unpack_tensor_float, unpack_tensor_double + + !> Decode a response code from an API function + procedure :: SR_error_parser + !> Initializes a new instance of the SmartRedis client + procedure :: initialize => initialize_client + !> Check if a SmartRedis client has been initialized + procedure :: isinitialized + !> Destructs a new instance of the SmartRedis client + procedure :: destructor + !> Check the database for the existence of a specific model + procedure :: model_exists + !> Check the database for the existence of a specific tensor + procedure :: tensor_exists + !> Check the database for the existence of a specific key + procedure :: key_exists + !> Check the database for the existence of a specific dataset + procedure :: dataset_exists + !> Poll the database and return if the model exists + procedure :: poll_model + !> Poll the database and return if the tensor exists + procedure :: poll_tensor + !> Poll the database and return if the datasaet exists + procedure :: poll_dataset + !> Poll the database and return if the key exists + procedure :: poll_key + !> Rename a tensor within the database + procedure :: rename_tensor + !> Delete a tensor from the database + procedure :: delete_tensor + !> Copy a tensor within the database to a new name + procedure :: copy_tensor + !> Set a model from a file + procedure :: set_model_from_file + !> Set a model from a file on a system with multiple GPUs + procedure :: set_model_from_file_multigpu + !> Set a model from a byte string that has been loaded within the application + procedure :: set_model + !> Set a model from a byte string that has been loaded within the application on a system with multiple GPUs + procedure :: set_model_multigpu + !> Retrieve the model as a byte string + procedure :: get_model + !> Set a script from a specified file + procedure :: set_script_from_file + !> Set a script from a specified file on a system with multiple GPUS + procedure :: set_script_from_file_multigpu + !> Set a script as a byte or text string + procedure :: set_script + !> Set a script as a byte or text string on a system with multiple GPUs + procedure :: set_script_multigpu + !> Retrieve the script from the database + procedure :: get_script + !> Run a script that has already been stored in the database + procedure :: run_script + !> Run a script that has already been stored in the database with multiple GPUs + procedure :: run_script_multigpu + !> Run a model that has already been stored in the database + procedure :: run_model + !> Run a model that has already been stored in the database with multiple GPUs + procedure :: run_model_multigpu + !> Remove a script from the database + procedure :: delete_script + !> Remove a script from the database with multiple GPUs + procedure :: delete_script_multigpu + !> Remove a model from the database + procedure :: delete_model + !> Remove a model from the database with multiple GPUs + procedure :: delete_model_multigpu + !> Put a SmartRedis dataset into the database + procedure :: put_dataset + !> Retrieve a SmartRedis dataset from the database + procedure :: get_dataset + !> Rename the dataset within the database + procedure :: rename_dataset + !> Copy a dataset stored in the database into another name + procedure :: copy_dataset + !> Delete the dataset from the database + procedure :: delete_dataset + + !> If true, preprend the ensemble id for tensor-related keys + procedure :: use_tensor_ensemble_prefix + !> If true, preprend the ensemble id for model-related keys + procedure :: use_model_ensemble_prefix + !> If true, preprend the ensemble id for dataset list-related keys + procedure :: use_list_ensemble_prefix + !> Specify a specific source of data (e.g. another ensemble member) + procedure :: set_data_source + + !> Append a dataset to a list for aggregation + procedure :: append_to_list + !> Delete an aggregation list + procedure :: delete_list + !> Copy an aggregation list + procedure :: copy_list + !> Rename an existing aggregation list + procedure :: rename_list + !> Retrieve the number of datasets in the list + procedure :: get_list_length + !> Repeatedly check the length of the list until it is a given size + procedure :: poll_list_length + !> Repeatedly check the length of the list until it greater than or equal to the given size + procedure :: poll_list_length_gte + !> Repeatedly check the length of the list until it less than or equal to the given size + procedure :: poll_list_length_lte + !> Retrieve vector of datasetes from the list + procedure :: get_datasets_from_list + + ! Private procedures + procedure, private :: put_tensor_i8 !< Put 8-bit integer tensor into database + procedure, private :: put_tensor_i16 !< Put 16-bit integer tensor into database + procedure, private :: put_tensor_i32 !< Put 32-bit integer tensor into database + procedure, private :: put_tensor_i64 !< Put 64-bit tensor into database + procedure, private :: put_tensor_float !< Put 32-bit real tensor into database + procedure, private :: put_tensor_double !< Put 64-bit real tensor into database + procedure, private :: unpack_tensor_i8 !< Unpack a 8-bit integer tensor into memory + procedure, private :: unpack_tensor_i16 !< Unpack a 16-bit integer tensor into memory + procedure, private :: unpack_tensor_i32 !< Unpack a 32-bit integer tensor into memory + procedure, private :: unpack_tensor_i64 !< Unpack a 64-bit integer tensor into memory + procedure, private :: unpack_tensor_float !< Unpack a 32-bit real tensor into memory + procedure, private :: unpack_tensor_double !< Unpack a 64-bit real tensor into memory + + end type client_type + + contains + + !> Decode a response code from an API function + function SR_error_parser(self, response_code) result(is_error) + class(client_type), intent(in) :: self !< Receives the initialized client + integer (kind=enum_kind), intent(in) :: response_code !< The response code to decode + logical :: is_error !< Indicates whether this is an error response + + is_error = .true. + end function SR_error_parser + + !> Initializes a new instance of a SmartRedis client + function initialize_client(self, cluster) + integer(kind=enum_kind) :: initialize_client + class(client_type), intent(inout) :: self !< Receives the initialized client + logical, optional, intent(in ) :: cluster !< If true, client uses a database cluster (Default: .false.) + + initialize_client = -1 + end function initialize_client + + !> Check whether the client has been initialized + logical function isinitialized(this) + class(client_type) :: this + isinitialized = .false. + end function isinitialized + + !> A destructor for the SmartRedis client + function destructor(self) + integer(kind=enum_kind) :: destructor + class(client_type), intent(inout) :: self + + destructor = -1 + end function destructor + + !> Check if the specified key exists in the database + function key_exists(self, key, exists) + class(client_type), intent(in) :: self !< The client + character(len=*), intent(in) :: key !< The key to check + logical(kind=c_bool), intent(out) :: exists !< Receives whether the key exists + integer(kind=enum_kind) :: key_exists + + key_exists = -1 + end function key_exists + + !> Check if the specified model exists in the database + function model_exists(self, model_name, exists) result(code) + class(client_type), intent(in) :: self !< The client + character(len=*), intent(in) :: model_name !< The model to check + logical(kind=c_bool), intent(out) :: exists !< Receives whether the model exists + integer(kind=enum_kind) :: code + + code = -1 + end function model_exists + + !> Check if the specified tensor exists in the database + function tensor_exists(self, tensor_name, exists) result(code) + class(client_type), intent(in) :: self !< The client + character(len=*), intent(in) :: tensor_name !< The tensor to check + logical(kind=c_bool), intent(out) :: exists !< Receives whether the model exists + integer(kind=enum_kind) :: code + + code = -1 + end function tensor_exists + + !> Check if the specified dataset exists in the database + function dataset_exists(this, dataset_name, exists) result(code) + class(client_type), intent(in) :: this !< The client + character(len=*), intent(in) :: dataset_name !< The dataset to check + logical(kind=c_bool), intent(out) :: exists !< Receives whether the model exists + integer(kind=enum_kind) :: code + + code = -1 + end function dataset_exists + + !> Repeatedly poll the database until the tensor exists or the number of tries is exceeded + function poll_tensor(self, tensor_name, poll_frequency_ms, num_tries, exists) result(code) + class(client_type), intent(in) :: self !< The client + character(len=*), intent(in) :: tensor_name !< name in the database to poll + integer, intent(in) :: poll_frequency_ms !< Frequency at which to poll the database (ms) + integer, intent(in) :: num_tries !< Number of times to poll the database before failing + logical(kind=c_bool), intent(out) :: exists !< Receives whether the tensor exists + integer(kind=enum_kind) :: code + + code = -1 + end function poll_tensor + + !> Repeatedly poll the database until the dataset exists or the number of tries is exceeded + function poll_dataset(self, dataset_name, poll_frequency_ms, num_tries, exists) + integer(kind=enum_kind) :: poll_dataset + class(client_type), intent(in) :: self !< The client + character(len=*), intent(in) :: dataset_name !< Name in the database to poll + integer, intent(in) :: poll_frequency_ms !< Frequency at which to poll the database (ms) + integer, intent(in) :: num_tries !< Number of times to poll the database before failing + logical(kind=c_bool), intent(out) :: exists !< Receives whether the tensor exists + + poll_dataset = -1 + end function poll_dataset + + !> Repeatedly poll the database until the model exists or the number of tries is exceeded + function poll_model(self, model_name, poll_frequency_ms, num_tries, exists) result(code) + class(client_type), intent(in) :: self !< The client + character(len=*), intent(in) :: model_name !< Name in the database to poll + integer, intent(in) :: poll_frequency_ms !< Frequency at which to poll the database (ms) + integer, intent(in) :: num_tries !< Number of times to poll the database before failing + logical(kind=c_bool), intent(out) :: exists !< Receives whether the model exists + integer(kind=enum_kind) :: code + + code = -1 + end function poll_model + + !> Repeatedly poll the database until the key exists or the number of tries is exceeded + function poll_key(self, key, poll_frequency_ms, num_tries, exists) result(code) + class(client_type), intent(in) :: self !< The client + character(len=*), intent(in) :: key !< Key in the database to poll + integer, intent(in) :: poll_frequency_ms !< Frequency at which to poll the database (ms) + integer, intent(in) :: num_tries !< Number of times to poll the database before failing + logical(kind=c_bool), intent(out) :: exists !< Receives whether the key exists + integer(kind=enum_kind) :: code + + code = -1 + end function poll_key + + !> Put a tensor whose Fortran type is the equivalent 'int8' C-type + function put_tensor_i8(self, name, data, dims) result(code) + integer(kind=c_int8_t), dimension(..), target, intent(in) :: data !< Data to be sent + class(client_type), intent(in) :: self !< Fortran SmartRedis client + character(len=*), intent(in) :: name !< The unique name used to store in the database + integer, dimension(:), intent(in) :: dims !< The length of each dimension + integer(kind=enum_kind) :: code + + code = -1 + end function put_tensor_i8 + + !> Put a tensor whose Fortran type is the equivalent 'int16' C-type + function put_tensor_i16(self, name, data, dims) result(code) + integer(kind=c_int16_t), dimension(..), target, intent(in) :: data !< Data to be sent + class(client_type), intent(in) :: self !< Fortran SmartRedis client + character(len=*), intent(in) :: name !< The unique name used to store in the database + integer, dimension(:), intent(in) :: dims !< The length of each dimension + integer(kind=enum_kind) :: code + + code = -1 + end function put_tensor_i16 + + !> Put a tensor whose Fortran type is the equivalent 'int32' C-type + function put_tensor_i32(self, name, data, dims) result(code) + integer(kind=c_int32_t), dimension(..), target, intent(in) :: data !< Data to be sent + class(client_type), intent(in) :: self !< Fortran SmartRedis client + character(len=*), intent(in) :: name !< The unique name used to store in the database + integer, dimension(:), intent(in) :: dims !< The length of each dimension + integer(kind=enum_kind) :: code + + code = -1 + end function put_tensor_i32 + + !> Put a tensor whose Fortran type is the equivalent 'int64' C-type + function put_tensor_i64(self, name, data, dims) result(code) + integer(kind=c_int64_t), dimension(..), target, intent(in) :: data !< Data to be sent + class(client_type), intent(in) :: self !< Fortran SmartRedis client + character(len=*), intent(in) :: name !< The unique name used to store in the database + integer, dimension(:), intent(in) :: dims !< The length of each dimension + integer(kind=enum_kind) :: code + + code = -1 + end function put_tensor_i64 + + !> Put a tensor whose Fortran type is the equivalent 'float' C-type + function put_tensor_float(self, name, data, dims) result(code) + real(kind=c_float), dimension(..), target, intent(in) :: data !< Data to be sent + class(client_type), intent(in) :: self !< Fortran SmartRedis client + character(len=*), intent(in) :: name !< The unique name used to store in the database + integer, dimension(:), intent(in) :: dims !< The length of each dimension + integer(kind=enum_kind) :: code + + code = -1 + end function put_tensor_float + + !> Put a tensor whose Fortran type is the equivalent 'double' C-type + function put_tensor_double(self, name, data, dims) result(code) + real(kind=c_double), dimension(..), target, intent(in) :: data !< Data to be sent + class(client_type), intent(in) :: self !< Fortran SmartRedis client + character(len=*), intent(in) :: name !< The unique name used to store in the database + integer, dimension(:), intent(in) :: dims !< The length of each dimension + integer(kind=enum_kind) :: code + + code = -1 + end function put_tensor_double + + !> Put a tensor whose Fortran type is the equivalent 'int8' C-type + function unpack_tensor_i8(self, name, result, dims) result(code) + integer(kind=c_int8_t), dimension(..), target, intent(out) :: result !< Data to be sent + class(client_type), intent(in) :: self !< Pointer to the initialized client + character(len=*), intent(in) :: name !< The name to use to place the tensor + integer, dimension(:), intent(in) :: dims !< Length along each dimension of the tensor + integer(kind=enum_kind) :: code + + code = -1 + end function unpack_tensor_i8 + + !> Put a tensor whose Fortran type is the equivalent 'int16' C-type + function unpack_tensor_i16(self, name, result, dims) result(code) + integer(kind=c_int16_t), dimension(..), target, intent(out) :: result !< Data to be sent + class(client_type), intent(in) :: self !< Pointer to the initialized client + character(len=*), intent(in) :: name !< The name to use to place the tensor + integer, dimension(:), intent(in) :: dims !< Length along each dimension of the tensor + integer(kind=enum_kind) :: code + + code = -1 + end function unpack_tensor_i16 + + !> Put a tensor whose Fortran type is the equivalent 'int32' C-type + function unpack_tensor_i32(self, name, result, dims) result(code) + integer(kind=c_int32_t), dimension(..), target, intent(out) :: result !< Data to be sent + class(client_type), intent(in) :: self !< Pointer to the initialized client + character(len=*), intent(in) :: name !< The name to use to place the tensor + integer, dimension(:), intent(in) :: dims !< Length along each dimension of the tensor + integer(kind=enum_kind) :: code + + code = -1 + end function unpack_tensor_i32 + + !> Put a tensor whose Fortran type is the equivalent 'int64' C-type + function unpack_tensor_i64(self, name, result, dims) result(code) + integer(kind=c_int64_t), dimension(..), target, intent(out) :: result !< Data to be sent + class(client_type), intent(in) :: self !< Pointer to the initialized client + character(len=*), intent(in) :: name !< The name to use to place the tensor + integer, dimension(:), intent(in) :: dims !< Length along each dimension of the tensor + integer(kind=enum_kind) :: code + + code = -1 + end function unpack_tensor_i64 + + !> Put a tensor whose Fortran type is the equivalent 'float' C-type + function unpack_tensor_float(self, name, result, dims) result(code) + real(kind=c_float), dimension(..), target, intent(out) :: result !< Data to be sent + class(client_type), intent(in) :: self !< Pointer to the initialized client + character(len=*), intent(in) :: name !< The name to use to place the tensor + integer, dimension(:), intent(in) :: dims !< Length along each dimension of the tensor + integer(kind=enum_kind) :: code + + code = -1 + end function unpack_tensor_float + + !> Put a tensor whose Fortran type is the equivalent 'double' C-type + function unpack_tensor_double(self, name, result, dims) result(code) + real(kind=c_double), dimension(..), target, intent(out) :: result !< Data to be sent + class(client_type), intent(in) :: self !< Pointer to the initialized client + character(len=*), intent(in) :: name !< The name to use to place the tensor + integer, dimension(:), intent(in) :: dims !< Length along each dimension of the tensor + integer(kind=enum_kind) :: code + + code = -1 + end function unpack_tensor_double + + !> Move a tensor to a new name + function rename_tensor(self, old_name, new_name) result(code) + class(client_type), intent(in) :: self !< The initialized Fortran SmartRedis client + character(len=*), intent(in) :: old_name !< The current name for the tensor + !! excluding null terminating character + character(len=*), intent(in) :: new_name !< The new tensor name + integer(kind=enum_kind) :: code + + code = -1 + end function rename_tensor + + !> Delete a tensor + function delete_tensor(self, name) result(code) + class(client_type), intent(in) :: self !< The initialized Fortran SmartRedis client + character(len=*), intent(in) :: name !< The name associated with the tensor + integer(kind=enum_kind) :: code + + code = -1 + end function delete_tensor + + !> Copy a tensor to the destination name + function copy_tensor(self, src_name, dest_name) result(code) + class(client_type), intent(in) :: self !< The initialized Fortran SmartRedis client + character(len=*), intent(in) :: src_name !< The name associated with the tensor + !! excluding null terminating character + character(len=*), intent(in) :: dest_name !< The new tensor name + integer(kind=enum_kind) :: code + + code = -1 + end function copy_tensor + + !> Retrieve the model from the database + function get_model(self, name, model) result(code) + class(client_type), intent(in ) :: self !< An initialized SmartRedis client + character(len=*), intent(in ) :: name !< The name associated with the model + character(len=*), intent( out) :: model !< The model as a continuous buffer + integer(kind=enum_kind) :: code + + code = -1 + end function get_model + + !> Load the machine learning model from a file and set the configuration + function set_model_from_file(self, name, model_file, backend, device, batch_size, min_batch_size, tag, & + inputs, outputs) result(code) + class(client_type), intent(in) :: self !< An initialized SmartRedis client + character(len=*), intent(in) :: name !< The name to use to place the model + character(len=*), intent(in) :: model_file !< The file storing the model + character(len=*), intent(in) :: backend !< The name of the backend + !! (TF, TFLITE, TORCH, ONNX) + character(len=*), intent(in) :: device !< The name of the device + !! (CPU, GPU, GPU:0, GPU:1...) + integer, optional, intent(in) :: batch_size !< The batch size for model execution + integer, optional, intent(in) :: min_batch_size !< The minimum batch size for model execution + character(len=*), optional, intent(in) :: tag !< A tag to attach to the model for + !! information purposes + character(len=*), dimension(:), optional, intent(in) :: inputs !< One or more names of model + !! input nodes (TF models) + character(len=*), dimension(:), optional, intent(in) :: outputs !< One or more names of model + !! output nodes (TF models) + integer(kind=enum_kind) :: code + + code = -1 + end function set_model_from_file + + !> Load the machine learning model from a file and set the configuration for use in multi-GPU systems + function set_model_from_file_multigpu(self, name, model_file, backend, first_gpu, num_gpus, batch_size, & + min_batch_size, tag, inputs, outputs) result(code) + class(client_type), intent(in) :: self !< An initialized SmartRedis client + character(len=*), intent(in) :: name !< The name to use to place the model + character(len=*), intent(in) :: model_file !< The file storing the model + character(len=*), intent(in) :: backend !< The name of the backend + !! (TF, TFLITE, TORCH, ONNX) + integer, intent(in) :: first_gpu !< The first GPU (zero-based) + !! to use with the model + integer, intent(in) :: num_gpus !< The number of GPUs to use with the model + integer, optional, intent(in) :: batch_size !< The batch size for model execution + integer, optional, intent(in) :: min_batch_size !< The minimum batch size for model execution + character(len=*), optional, intent(in) :: tag !< A tag to attach to the model for + !! information purposes + character(len=*), dimension(:), optional, intent(in) :: inputs !< One or more names of model + !! input nodes (TF models) + character(len=*), dimension(:), optional, intent(in) :: outputs !< One or more names of model + !! output nodes (TF models) + integer(kind=enum_kind) :: code + + code = -1 + end function set_model_from_file_multigpu + + !> Establish a model to run + function set_model(self, name, model, backend, device, batch_size, min_batch_size, tag, & + inputs, outputs) result(code) + class(client_type), intent(in) :: self !< An initialized SmartRedis client + character(len=*), intent(in) :: name !< The name to use to place the model + character(len=*), intent(in) :: model !< The binary representation of the model + character(len=*), intent(in) :: backend !< The name of the backend (TF, TFLITE, TORCH, ONNX) + character(len=*), intent(in) :: device !< The name of the device (CPU, GPU, GPU:0, GPU:1...) + integer, intent(in) :: batch_size !< The batch size for model execution + integer, intent(in) :: min_batch_size !< The minimum batch size for model execution + character(len=*), intent(in) :: tag !< A tag to attach to the model for + !! information purposes + character(len=*), dimension(:), intent(in) :: inputs !< One or more names of model input nodes (TF models) + character(len=*), dimension(:), intent(in) :: outputs !< One or more names of model output nodes (TF models) + integer(kind=enum_kind) :: code + + code = -1 + end function set_model + + !> Set a model from a byte string to run on a system with multiple GPUs + function set_model_multigpu(self, name, model, backend, first_gpu, num_gpus, batch_size, min_batch_size, tag, & + inputs, outputs) result(code) + class(client_type), intent(in) :: self !< An initialized SmartRedis client + character(len=*), intent(in) :: name !< The name to use to place the model + character(len=*), intent(in) :: model !< The binary representation of the model + character(len=*), intent(in) :: backend !< The name of the backend (TF, TFLITE, TORCH, ONNX) + integer, intent(in) :: first_gpu !< The first GPU (zero-based) to use with the model + integer, intent(in) :: num_gpus !< The number of GPUs to use with the model + integer, intent(in) :: batch_size !< The batch size for model execution + integer, intent(in) :: min_batch_size !< The minimum batch size for model execution + character(len=*), intent(in) :: tag !< A tag to attach to the model for + !! information purposes + character(len=*), dimension(:), intent(in) :: inputs !< One or more names of model input nodes (TF models) + character(len=*), dimension(:), intent(in) :: outputs !< One or more names of model output nodes (TF models) + integer(kind=enum_kind) :: code + + code = -1 + end function set_model_multigpu + + !> Run a model in the database using the specified input and output tensors + function run_model(self, name, inputs, outputs) result(code) + class(client_type), intent(in) :: self !< An initialized SmartRedis client + character(len=*), intent(in) :: name !< The name to use to place the model + character(len=*), dimension(:), intent(in) :: inputs !< One or more names of model input nodes (TF models) + character(len=*), dimension(:), intent(in) :: outputs !< One or more names of model output nodes (TF models) + integer(kind=enum_kind) :: code + + code = -1 + end function run_model + + !> Run a model in the database using the specified input and output tensors in a multi-GPU system + function run_model_multigpu(self, name, inputs, outputs, offset, first_gpu, num_gpus) result(code) + class(client_type), intent(in) :: self !< An initialized SmartRedis client + character(len=*), intent(in) :: name !< The name to use to place the model + character(len=*), dimension(:), intent(in) :: inputs !< One or more names of model input nodes (TF models) + character(len=*), dimension(:), intent(in) :: outputs !< One or more names of model output nodes (TF models) + integer, intent(in) :: offset !< Index of the current image, such as a processor ID + !! or MPI rank + integer, intent(in) :: first_gpu !< The first GPU (zero-based) to use with the model + integer, intent(in) :: num_gpus !< The number of GPUs to use with the model + integer(kind=enum_kind) :: code + + code = -1 + end function run_model_multigpu + + !> Remove a model from the database + function delete_model(self, name) result(code) + class(client_type), intent(in) :: self !< An initialized SmartRedis client + character(len=*), intent(in) :: name !< The name to use to remove the model + integer(kind=enum_kind) :: code + + code = -1 + end function delete_model + + !> Remove a model from the database + function delete_model_multigpu(self, name, first_gpu, num_gpus) result(code) + class(client_type), intent(in) :: self !< An initialized SmartRedis client + character(len=*), intent(in) :: name !< The name to use to remove the model + integer, intent(in) :: first_gpu !< The first GPU (zero-based) to use with the model + integer, intent(in) :: num_gpus !< The number of GPUs to use with the model + integer(kind=enum_kind) :: code + + code = -1 + end function delete_model_multigpu + + !> Retrieve the script from the database + function get_script(self, name, script) result(code) + class(client_type), intent(in ) :: self !< An initialized SmartRedis client + character(len=*), intent(in ) :: name !< The name to use to place the script + character(len=*), intent( out) :: script !< The script as a continuous buffer + integer(kind=enum_kind) :: code + + code = -1 + end function get_script + + !> Set a script (from file) in the database for future execution + function set_script_from_file(self, name, device, script_file) result(code) + class(client_type), intent(in) :: self !< An initialized SmartRedis client + character(len=*), intent(in) :: name !< The name to use to place the script + character(len=*), intent(in) :: device !< The name of the device (CPU, GPU, GPU:0, GPU:1...) + character(len=*), intent(in) :: script_file !< The file storing the script + integer(kind=enum_kind) :: code + + code = -1 + end function set_script_from_file + + !> Set a script (from file) in the database for future execution in a multi-GPU system + function set_script_from_file_multigpu(self, name, script_file, first_gpu, num_gpus) result(code) + class(client_type), intent(in) :: self !< An initialized SmartRedis client + character(len=*), intent(in) :: name !< The name to use to place the script + character(len=*), intent(in) :: script_file !< The file storing the script + integer, intent(in) :: first_gpu !< The first GPU (zero-based) to use with the model + integer, intent(in) :: num_gpus !< The number of GPUs to use with the model + integer(kind=enum_kind) :: code + + code = -1 + end function set_script_from_file_multigpu + + !> Set a script (from buffer) in the database for future execution + function set_script(self, name, device, script) result(code) + class(client_type), intent(in) :: self !< An initialized SmartRedis client + character(len=*), intent(in) :: name !< The name to use to place the script + character(len=*), intent(in) :: device !< The name of the device (CPU, GPU, GPU:0, GPU:1...) + character(len=*), intent(in) :: script !< The file storing the script + integer(kind=enum_kind) :: code + + code = -1 + end function set_script + + !> Set a script (from buffer) in the database for future execution in a multi-GPU system + function set_script_multigpu(self, name, script, first_gpu, num_gpus) result(code) + class(client_type), intent(in) :: self !< An initialized SmartRedis client + character(len=*), intent(in) :: name !< The name to use to place the script + character(len=*), intent(in) :: script !< The file storing the script + integer, intent(in) :: first_gpu !< The first GPU (zero-based) to use with the model + integer, intent(in) :: num_gpus !< The number of GPUs to use with the model + integer(kind=enum_kind) :: code + + code = -1 + end function set_script_multigpu + + function run_script(self, name, func, inputs, outputs) result(code) + class(client_type), intent(in) :: self !< An initialized SmartRedis client + character(len=*), intent(in) :: name !< The name to use to place the script + character(len=*), intent(in) :: func !< The name of the function in the script to call + character(len=*), dimension(:), intent(in) :: inputs !< One or more names of script + !! input nodes (TF scripts) + character(len=*), dimension(:), intent(in) :: outputs !< One or more names of script + !! output nodes (TF scripts) + integer(kind=enum_kind) :: code + + code = -1 + end function run_script + + function run_script_multigpu(self, name, func, inputs, outputs, offset, first_gpu, num_gpus) result(code) + class(client_type), intent(in) :: self !< An initialized SmartRedis client + character(len=*), intent(in) :: name !< The name to use to place the script + character(len=*), intent(in) :: func !< The name of the function in the script to call + character(len=*), dimension(:), intent(in) :: inputs !< One or more names of script + !! input nodes (TF scripts) + character(len=*), dimension(:), intent(in) :: outputs !< One or more names of script + !! output nodes (TF scripts) + integer, intent(in) :: offset !< Index of the current image, such as a processor ID + !! or MPI rank + integer, intent(in) :: first_gpu !< The first GPU (zero-based) to use with the model + integer, intent(in) :: num_gpus !< The number of GPUs to use with the model + integer(kind=enum_kind) :: code + + code = -1 + end function run_script_multigpu + + !> Remove a script from the database + function delete_script(self, name) result(code) + class(client_type), intent(in) :: self !< An initialized SmartRedis client + character(len=*), intent(in) :: name !< The name to use to delete the script + integer(kind=enum_kind) :: code + + code = -1 + end function delete_script + + !> Remove a script_multigpu from the database + function delete_script_multigpu(self, name, first_gpu, num_gpus) result(code) + class(client_type), intent(in) :: self !< An initialized SmartRedis client + character(len=*), intent(in) :: name !< The name to use to delete the script_multigpu + integer, intent(in) :: first_gpu !< The first GPU (zero-based) to use with the model + integer, intent(in) :: num_gpus !< The number of GPUs to use with the model + integer(kind=enum_kind) :: code + + code = -1 + end function delete_script_multigpu + + !> Store a dataset in the database + function put_dataset(self, dataset) result(code) + class(client_type), intent(in) :: self !< An initialized SmartRedis client + type(dataset_type), intent(in) :: dataset !< Dataset to store in the dataset + integer(kind=enum_kind) :: code + + code = -1 + end function put_dataset + + !> Retrieve a dataset from the database + function get_dataset(self, name, dataset) result(code) + class(client_type), intent(in ) :: self !< An initialized SmartRedis client + character(len=*), intent(in ) :: name !< Name of the dataset to get + type(dataset_type), intent( out) :: dataset !< receives the dataset + integer(kind=enum_kind) :: code + + code = -1 + end function get_dataset + + !> Rename a dataset stored in the database + function rename_dataset(self, name, new_name) result(code) + class(client_type), intent(in) :: self !< An initialized SmartRedis client + character(len=*), intent(in) :: name !< Original name of the dataset + character(len=*), intent(in) :: new_name !< New name of the dataset + integer(kind=enum_kind) :: code + + code = -1 + end function rename_dataset + + !> Copy a dataset within the database to a new name + function copy_dataset(self, name, new_name) result(code) + class(client_type), intent(in) :: self !< An initialized SmartRedis client + character(len=*), intent(in) :: name !< Source name of the dataset + character(len=*), intent(in) :: new_name !< Name of the new dataset + integer(kind=enum_kind) :: code + + code = -1 + end function copy_dataset + + !> Delete a dataset stored within a database + function delete_dataset(self, name) result(code) + class(client_type), intent(in) :: self !< An initialized SmartRedis client + character(len=*), intent(in) :: name !< Name of the dataset to delete + integer(kind=enum_kind) :: code + + code = -1 + end function delete_dataset + + !> Set the data source (i.e. name prefix for get functions) + function set_data_source(self, source_id) result(code) + class(client_type), intent(in) :: self !< An initialized SmartRedis client + character(len=*), intent(in) :: source_id !< The name prefix + integer(kind=enum_kind) :: code + + code = -1 + end function set_data_source + + !> Set whether names of model and script entities should be prefixed (e.g. in an ensemble) to form database names. + !! Prefixes will only be used if they were previously set through the environment variables SSKEYOUT and SSKEYIN. + !! Keys of entities created before client function is called will not be affected. By default, the client does not + !! prefix model and script names. + function use_model_ensemble_prefix(self, use_prefix) result(code) + class(client_type), intent(in) :: self !< An initialized SmartRedis client + logical, intent(in) :: use_prefix !< The prefix setting + integer(kind=enum_kind) :: code + + code = -1 + end function use_model_ensemble_prefix + + + !> Set whether names of tensor and dataset entities should be prefixed (e.g. in an ensemble) to form database keys. + !! Prefixes will only be used if they were previously set through the environment variables SSKEYOUT and SSKEYIN. + !! Keys of entities created before client function is called will not be affected. By default, the client prefixes + !! tensor and dataset keys with the first prefix specified with the SSKEYIN and SSKEYOUT environment variables. + function use_tensor_ensemble_prefix(self, use_prefix) result(code) + class(client_type), intent(in) :: self !< An initialized SmartRedis client + logical, intent(in) :: use_prefix !< The prefix setting + integer(kind=enum_kind) :: code + + code = -1 + end function use_tensor_ensemble_prefix + + !> Control whether aggregation lists are prefixed + function use_list_ensemble_prefix(self, use_prefix) result(code) + class(client_type), intent(in) :: self !< An initialized SmartRedis client + logical, intent(in) :: use_prefix !< The prefix setting + integer(kind=enum_kind) :: code + + code = -1 + end function use_list_ensemble_prefix + + !> Appends a dataset to the aggregation list When appending a dataset to an aggregation list, the list will + !! automatically be created if it does not exist (i.e. this is the first entry in the list). Aggregation + !! lists work by referencing the dataset by storing its key, so appending a dataset to an aggregation list + !! does not create a copy of the dataset. Also, for this reason, the dataset must have been previously + !! placed into the database with a separate call to put_dataset(). + function append_to_list(self, list_name, dataset) result(code) + class(client_type), intent(in) :: self !< An initialized SmartRedis client + character(len=*), intent(in) :: list_name !< Name of the dataset to get + type(dataset_type), intent(in) :: dataset !< Dataset to append to the list + integer(kind=enum_kind) :: code + + integer(kind=c_size_t) :: list_name_length + character(kind=c_char,len=len_trim(list_name)) :: list_name_c + + list_name_c = trim(list_name) + list_name_length = len_trim(list_name) + code = -1 + end function append_to_list + + !> Delete an aggregation list + function delete_list(self, list_name) result(code) + class(client_type), intent(in) :: self !< An initialized SmartRedis client + character(len=*), intent(in) :: list_name !< Name of the aggregated dataset list to delete + integer(kind=enum_kind) :: code + + integer(kind=c_size_t) :: list_name_length + character(kind=c_char,len=len_trim(list_name)) :: list_name_c + + list_name_c = trim(list_name) + list_name_length = len_trim(list_name) + + code = -1 + end function delete_list + + !> Copy an aggregation list + function copy_list(self, src_name, dest_name) result(code) + class(client_type), intent(in) :: self !< An initialized SmartRedis client + character(len=*), intent(in) :: src_name !< Name of the dataset to copy + character(len=*), intent(in) :: dest_name !< The new list name + integer(kind=enum_kind) :: code + + code = -1 + end function copy_list + + !> Rename an aggregation list + function rename_list(self, src_name, dest_name) result(code) + class(client_type), intent(in) :: self !< An initialized SmartRedis client + character(len=*), intent(in) :: src_name !< Name of the dataset to rename + character(len=*), intent(in) :: dest_name !< The new list name + integer(kind=enum_kind) :: code + + code = -1 + end function rename_list + + !> Get the length of the aggregation list + function get_list_length(self, list_name, result_length) result(code) + class(client_type), intent(in ) :: self !< An initialized SmartRedis client + character(len=*), intent(in ) :: list_name !< Name of the dataset to get + integer, intent( out) :: result_length !< The length of the list + integer(kind=enum_kind) :: code + + code = -1 + end function get_list_length + + !> Get the length of the aggregation list + function poll_list_length(self, list_name, list_length, poll_frequency_ms, num_tries, poll_result) result(code) + class(client_type), intent(in ) :: self !< An initialized SmartRedis client + character(len=*), intent(in ) :: list_name !< Name of the dataset to get + integer, intent(in ) :: list_length !< The desired length of the list + integer, intent(in ) :: poll_frequency_ms !< Frequency at which to poll the database (ms) + integer, intent(in ) :: num_tries !< Number of times to poll the database before failing + logical(kind=c_bool), intent( out) :: poll_result !< True if the list is the requested length, + !! False if not after num_tries. + integer(kind=enum_kind) :: code + + code = -1 + end function poll_list_length + + !> Get the length of the aggregation list + function poll_list_length_gte(self, list_name, list_length, poll_frequency_ms, num_tries, poll_result) result(code) + class(client_type), intent(in ) :: self !< An initialized SmartRedis client + character(len=*), intent(in ) :: list_name !< Name of the dataset to get + integer, intent(in ) :: list_length !< The desired length of the list + integer, intent(in ) :: poll_frequency_ms !< Frequency at which to poll the database (ms) + integer, intent(in ) :: num_tries !< Number of times to poll the database before failing + logical(kind=c_bool), intent( out) :: poll_result !< True if the list is the requested length, + !! False if not after num_tries. + integer(kind=enum_kind) :: code + + code = -1 + end function poll_list_length_gte + + !> Get the length of the aggregation list + function poll_list_length_lte(self, list_name, list_length, poll_frequency_ms, num_tries, poll_result) result(code) + class(client_type), intent(in) :: self !< An initialized SmartRedis client + character(len=*), intent(in) :: list_name !< Name of the dataset to get + integer, intent(in) :: list_length !< The desired length of the list + integer, intent(in) :: poll_frequency_ms !< Frequency at which to poll the database (ms) + integer, intent(in) :: num_tries !< Number of times to poll the database before failing + logical(kind=c_bool), intent( out) :: poll_result !< True if the list is the requested length, + !! False if not after num_tries. + + integer(kind=enum_kind) :: code + + code = -1 + end function poll_list_length_lte + + !> Get datasets from an aggregation list. Note that this will deallocate an existing list. + !! NOTE: This potentially be less performant than get_datasets_from_list_range due to an + !! extra query to the database to get the list length. This is for now necessary because + !! difficulties in allocating memory for Fortran alloctables from within C. + function get_datasets_from_list(self, list_name, datasets, num_datasets) result(code) + class(client_type), intent(in) :: self !< An initialized SmartRedis client + character(len=*), intent(in) :: list_name !< Name of the dataset to get + type(dataset_type), dimension(:), allocatable, intent( out) :: datasets !< The array of datasets included + integer(kind=enum_kind) :: code + !! in the list + integer, intent(out) :: num_datasets !< The numbr of datasets returned + + code = -1 + end function get_datasets_from_list + + !> Get datasets from an aggregation list over a given range by index. Note that this will deallocate an existing list + function get_datasets_from_list_range(self, list_name, start_index, end_index, datasets) result(code) + class(client_type), intent(in) :: self !< An initialized SmartRedis client + character(len=*), intent(in) :: list_name !< Name of the dataset to get + integer, intent(in) :: start_index !< The starting index of the range (inclusive, + !! starting at zero). Negative values are + !! supported. A negative value indicates offsets + !! starting at the end of the list. For example, -1 is + !! the last element of the list. + integer, intent(in) :: end_index !< The ending index of the range (inclusive, + !! starting at zero). Negative values are + !! supported. A negative value indicates offsets + !! starting at the end of the list. For example, -1 is + !! the last element of the list. + + type(dataset_type), dimension(:), allocatable, intent( out) :: datasets !< The array of datasets included + integer(kind=enum_kind) :: code + !! in the list + + code = -1 + end function get_datasets_from_list_range + + end module smartredis_client + diff --git a/src/core/MOM.F90 b/src/core/MOM.F90 index 4cb438c2c3..4cb6696fc5 100644 --- a/src/core/MOM.F90 +++ b/src/core/MOM.F90 @@ -143,6 +143,9 @@ module MOM use MOM_porous_barriers, only : porous_widths +! SmartRedis machine-learning interface +use MOM_smartredis, only : smartredis_CS_type, smartredis_init, client_type + ! ODA modules use MOM_oda_driver_mod, only : ODA_CS, oda, init_oda, oda_end use MOM_oda_driver_mod, only : set_prior_tracer, set_analysis_time, apply_oda_tracer_increments @@ -251,6 +254,8 @@ module MOM logical :: offline_tracer_mode = .false. !< If true, step_offline() is called instead of step_MOM(). !! This is intended for running MOM6 in offline tracer mode + logical :: MEKE_in_dynamics !< If .true. (default), MEKE is called in the dynamics routine otherwise + !! it is called during the tracer dynamics type(time_type), pointer :: Time !< pointer to the ocean clock real :: dt !< (baroclinic) dynamics time step [T ~> s] @@ -404,15 +409,14 @@ module MOM type(ODA_CS), pointer :: odaCS => NULL() !< a pointer to the control structure for handling !! ensemble model state vectors and data assimilation !! increments and priors + type(smartredis_CS_type) :: smartredis_CS !< SmartRedis control structure for online ML/AI type(porous_barrier_ptrs) :: pbv !< porous barrier fractional cell metrics - real ALLOCABLE_, dimension(NIMEMB_PTR_,NJMEM_,NKMEM_) & - :: por_face_areaU !< fractional open area of U-faces [nondim] - real ALLOCABLE_, dimension(NIMEM_,NJMEMB_PTR_,NKMEM_) & - :: por_face_areaV !< fractional open area of V-faces [nondim] - real ALLOCABLE_, dimension(NIMEMB_PTR_,NJMEM_,NK_INTERFACE_) & - :: por_layer_widthU !< fractional open width of U-faces [nondim] - real ALLOCABLE_, dimension(NIMEM_,NJMEMB_PTR_,NK_INTERFACE_) & - :: por_layer_widthV !< fractional open width of V-faces [nondim] + real ALLOCABLE_, dimension(NIMEMB_PTR_,NJMEM_,NKMEM_) :: por_face_areaU !< fractional open area of U-faces [nondim] + real ALLOCABLE_, dimension(NIMEM_,NJMEMB_PTR_,NKMEM_) :: por_face_areaV !< fractional open area of V-faces [nondim] + real ALLOCABLE_, dimension(NIMEMB_PTR_,NJMEM_,NK_INTERFACE_) :: por_layer_widthU !< fractional open width + !! of U-faces [nondim] + real ALLOCABLE_, dimension(NIMEM_,NJMEMB_PTR_,NK_INTERFACE_) :: por_layer_widthV !< fractional open width + !! of V-faces [nondim] type(particles), pointer :: particles => NULL() ! NULL() !< a pointer to the stochastics control structure end type MOM_control_struct @@ -1214,8 +1218,11 @@ subroutine step_MOM_dynamics(forces, p_surf_begin, p_surf_end, dt, dt_thermo, & ! for vertical remapping may need to be regenerated. call diag_update_remap_grids(CS%diag) - if (CS%useMEKE) call step_forward_MEKE(CS%MEKE, h, CS%VarMix%SN_u, CS%VarMix%SN_v, & - CS%visc, dt, G, GV, US, CS%MEKE_CSp, CS%uhtr, CS%vhtr, Time_local) + if (CS%useMEKE .and. CS%MEKE_in_dynamics) then + call step_forward_MEKE(CS%MEKE, h, CS%VarMix%SN_u, CS%VarMix%SN_v, & + CS%visc, dt, G, GV, US, CS%MEKE_CSp, CS%uhtr, CS%vhtr, & + CS%u, CS%v, CS%tv, Time_local) + endif call disable_averaging(CS%diag) ! Advance the dynamics time by dt. @@ -1320,6 +1327,12 @@ subroutine step_MOM_tracer_dyn(CS, G, GV, US, h, Time_local) CS%t_dyn_rel_adv = 0.0 call cpu_clock_end(id_clock_tracer) ; call cpu_clock_end(id_clock_thermo) + if (CS%useMEKE .and. (.not. CS%MEKE_in_dynamics)) then + call step_forward_MEKE(CS%MEKE, h, CS%VarMix%SN_u, CS%VarMix%SN_v, & + CS%visc, CS%t_dyn_rel_adv, G, GV, US, CS%MEKE_CSp, CS%uhtr, CS%vhtr, & + CS%u, CS%v, CS%tv, Time_local) + endif + if (associated(CS%tv%T)) then call extract_diabatic_member(CS%diabatic_CSp, diabatic_halo=halo_sz) if (halo_sz > 0) then @@ -1799,7 +1812,7 @@ subroutine initialize_MOM(Time, Time_init, param_file, dirs, CS, restart_CSp, & ! This include declares and sets the variable "version". # include "version_variable.h" - integer :: i, j, is, ie, js, je, isd, ied, jsd, jed, nz + integer :: i, j, k, is, ie, js, je, isd, ied, jsd, jed, nz integer :: IsdB, IedB, JsdB, JedB real :: dtbt ! If negative, this specifies the barotropic timestep as a fraction ! of the maximum stable value [nondim]. @@ -2794,7 +2807,9 @@ subroutine initialize_MOM(Time, Time_init, param_file, dirs, CS, restart_CSp, & endif call cpu_clock_end(id_clock_MOM_init) - CS%useMEKE = MEKE_init(Time, G, US, param_file, diag, CS%MEKE_CSp, CS%MEKE, restart_CSp) + call smartredis_init(param_file, CS%smartredis_CS) + CS%useMEKE = MEKE_init(Time, G, US, param_file, diag, CS%smartredis_CS, CS%MEKE_CSp, CS%MEKE, & + restart_CSp, CS%MEKE_in_dynamics) call VarMix_init(Time, G, GV, US, param_file, diag, CS%VarMix) call set_visc_init(Time, G, GV, US, param_file, diag, CS%visc, CS%set_visc_CSp, restart_CSp, CS%OBC) diff --git a/src/framework/MOM_smartredis.F90 b/src/framework/MOM_smartredis.F90 new file mode 100644 index 0000000000..f9db54dd3c --- /dev/null +++ b/src/framework/MOM_smartredis.F90 @@ -0,0 +1,78 @@ +!> Contains routines necessary to initialize the SmartRedis client +module MOM_smartredis + +use MOM_cpu_clock, only : cpu_clock_id, cpu_clock_begin, cpu_clock_end, CLOCK_ROUTINE +use MOM_error_handler, only : MOM_error, FATAL, WARNING, NOTE, MOM_mesg, is_root_pe +use MOM_file_parser, only : read_param, get_param, log_version, param_file_type +use smartredis_client, only : client_type + +implicit none; private + +!> Control structure to store SmartRedis client related parameters and objects +type, public :: smartredis_CS_type + type(client_type) :: client !< The SmartRedis client itself + logical :: use_smartredis !< If True, use SmartRedis within MOM6 + logical :: colocated !< If True, the orchestrator was setup in 'co-located' mode + logical :: cluster !< If True, the orchestrator has three shards or more + integer :: colocated_stride !< Sets which ranks will load the model from the file + !! e.g. mod(rank,colocated_stride) == 0 +end type + +public :: client_type +public :: smartredis_init + +contains + +subroutine smartredis_init(param_file, CS, client_in) + type(param_file_type), intent(in ) :: param_file !< Parameter file structure + type(smartredis_CS_type), intent(inout) :: CS !< Control structure for SmartRedis + type(client_type), optional, intent(in ) :: client_in !< If present, use a previously initialized + !! SmartRedis client + + character(len=40) :: mdl = "MOM_SMARTREDIS" + integer :: id_client_init + integer :: return_code + call get_param(param_file, mdl, "USE_SMARTREDIS", CS%use_smartredis, & + "If true, use the data client to connect"//& + "with the SmartRedis database", default=.false.) + + if (present(client_in)) then ! The driver (e.g. the NUOPC cap) has already initialized the client + + CS%client = client_in + + if (.not. CS%client%isinitialized() .and. CS%use_smartredis) then + call MOM_error(FATAL, & + "If using a SmartRedis client not initialized within MOM, client%initialize must have already been invoked."//& + " Check that the client has been initialized in the driver before the call to initialize_MOM") + endif + + elseif (CS%use_smartredis) then ! The client will be initialized within MOM + + call get_param(param_file, mdl, "SMARTREDIS_COLOCATED", CS%colocated, & + "If true, the SmartRedis database is colocated on the simulation nodes.",& + default=.false.) + if (CS%colocated) then + CS%cluster = .false. + call get_param(param_file, mdl, "SMARTREDIS_COLOCATED_STRIDE", CS%colocated_stride, & + "If true, the SmartRedis database is colocated on the simulation nodes.",& + default=0) + else + call get_param(param_file, mdl, "SMARTREDIS_CLUSTER", CS%cluster, & + "If true, the SmartRedis database is distributed over multiple nodes.",& + default=.true.) + endif + id_client_init = cpu_clock_id('(SMARTREDIS client init)', grain=CLOCK_ROUTINE) + call MOM_error(NOTE,"SmartRedis Client Initializing") + call cpu_clock_begin(id_client_init) + return_code = CS%client%initialize(CS%cluster) + if (CS%client%SR_error_parser(return_code)) then + call MOM_error(FATAL, "SmartRedis client failed to initialize") + endif + call MOM_error(NOTE,"SmartRedis Client Initialized") + call cpu_clock_end(id_client_init) + + endif +end subroutine smartredis_init + +end module MOM_smartredis + diff --git a/src/parameterizations/lateral/MOM_MEKE.F90 b/src/parameterizations/lateral/MOM_MEKE.F90 index 3aa700d05c..f5f542a40a 100644 --- a/src/parameterizations/lateral/MOM_MEKE.F90 +++ b/src/parameterizations/lateral/MOM_MEKE.F90 @@ -5,24 +5,25 @@ module MOM_MEKE ! This file is part of MOM6. See LICENSE.md for the license. -use MOM_debugging, only : hchksum, uvchksum -use MOM_cpu_clock, only : cpu_clock_id, cpu_clock_begin, cpu_clock_end, CLOCK_ROUTINE -use MOM_diag_mediator, only : post_data, register_diag_field, safe_alloc_ptr -use MOM_diag_mediator, only : diag_ctrl, time_type -use MOM_domains, only : create_group_pass, do_group_pass, group_pass_type -use MOM_error_handler, only : MOM_error, FATAL, WARNING, NOTE, MOM_mesg -use MOM_file_parser, only : read_param, get_param, log_version, param_file_type -use MOM_grid, only : ocean_grid_type -use MOM_hor_index, only : hor_index_type -<<<<<<< HEAD -======= -use MOM_io, only : vardesc, var_desc, slasher ->>>>>>> 0986bc3ca (Add option to read EKE via file) -use MOM_restart, only : MOM_restart_CS, register_restart_field, query_initialized -use MOM_unit_scaling, only : unit_scale_type -use MOM_variables, only : vertvisc_type -use MOM_verticalGrid, only : verticalGrid_type -use MOM_MEKE_types, only : MEKE_type +use MOM_debugging, only : hchksum, uvchksum +use MOM_cpu_clock, only : cpu_clock_id, cpu_clock_begin, cpu_clock_end, CLOCK_ROUTINE +use MOM_diag_mediator, only : post_data, register_diag_field, safe_alloc_ptr +use MOM_diag_mediator, only : diag_ctrl, time_type +use MOM_domains, only : create_group_pass, do_group_pass, group_pass_type +use MOM_domains, only : pass_vector +use MOM_error_handler, only : MOM_error, FATAL, WARNING, NOTE, MOM_mesg +use MOM_file_parser, only : read_param, get_param, log_version, param_file_type +use MOM_grid, only : ocean_grid_type +use MOM_hor_index, only : hor_index_type +use MOM_io, only : vardesc, var_desc, slasher +use MOM_smartredis_MEKE, only : smartredis_meke_CS_type, smartredis_meke_init, infer_meke +use MOM_restart, only : MOM_restart_CS, register_restart_field, query_initialized +use MOM_smartredis, only : client_type, smartredis_CS_type +use MOM_string_functions, only : lowercase +use MOM_unit_scaling, only : unit_scale_type +use MOM_variables, only : vertvisc_type, thermo_var_ptrs +use MOM_verticalGrid, only : verticalGrid_type +use MOM_MEKE_types, only : MEKE_type use time_interp_external_mod, only : init_external_field, time_interp_external use time_interp_external_mod, only : time_interp_external_init @@ -33,6 +34,13 @@ module MOM_MEKE public step_forward_MEKE, MEKE_init, MEKE_alloc_register_restart, MEKE_end +!> Private enum to define the source of the EKE used in MEKE +enum, bind(c) + enumerator :: EKE_PROG !< Use prognostic equation to calcualte EKE + enumerator :: EKE_FILE !< Read in EKE from a file + enumerator :: EKE_SMARTREDIS !< Infer EKE using a neural network +end enum + !> Control structure that contains MEKE parameters and diagnostics handles type, public :: MEKE_CS ; private logical :: initialized = .false. !< True if this control structure has been initialized. @@ -50,7 +58,6 @@ module MOM_MEKE logical :: visc_drag !< If true use the vertvisc_type to calculate bottom drag. logical :: MEKE_GEOMETRIC !< If true, uses the GM coefficient formulation from the GEOMETRIC !! framework (Marshall et al., 2012) - logical :: MEKE_from_file !< If true, reads EKE from a netCDF file real :: MEKE_GEOMETRIC_alpha !< The nondimensional coefficient governing the efficiency of the !! GEOMETRIC thickness diffusion. logical :: MEKE_equilibrium_alt !< If true, use an alternative calculation for the @@ -98,9 +105,8 @@ module MOM_MEKE logical :: kh_flux_enabled !< If true, lateral diffusive MEKE flux is enabled. logical :: initialize !< If True, invokes a steady state solver to calculate MEKE. logical :: debug !< If true, write out checksums of data for debugging - character(len=200) :: inputdir !< directory where NetCDF input files are - character(len=200) :: eke_file !< filename for eke data - character(len=30) :: eke_var_name !< name of variable in ncfile + integer :: eke_src !< Enum specifying whether EKE is stepped forward prognostically (default), + !! read in from a file, or inferred via a neural network type(diag_ctrl), pointer :: diag => NULL() !< A type that regulates diagnostics output !>@{ Diagnostic handles integer :: id_MEKE = -1, id_Ue = -1, id_Kh = -1, id_src = -1 @@ -111,19 +117,22 @@ module MOM_MEKE integer :: id_Lrhines = -1, id_Leady = -1 integer :: id_MEKE_equilibrium = -1 !>@} - integer :: id_eke = -1 + integer :: id_eke = -1 !< Handle for reading in EKE from a file ! Infrastructure integer :: id_clock_pass !< Clock for group pass calls type(group_pass_type) :: pass_MEKE !< Group halo pass handle for MEKE%MEKE and maybe MEKE%Kh_diff type(group_pass_type) :: pass_Kh !< Group halo pass handle for MEKE%Kh, MEKE%Ku, and/or MEKE%Au + type(smartredis_meke_cs_type) :: smartredis_meke_CS !< Control structure for the inferring EKE via + !! ML using using the SmartRedis interface + end type MEKE_CS contains !> Integrates forward-in-time the MEKE eddy energy equation. !! See \ref section_MEKE_equations. -subroutine step_forward_MEKE(MEKE, h, SN_u, SN_v, visc, dt, G, GV, US, CS, hu, hv, Time) - type(MEKE_type), pointer :: MEKE !< MEKE data. +subroutine step_forward_MEKE(MEKE, h, SN_u, SN_v, visc, dt, G, GV, US, CS, hu, hv, u, v, tv, Time) + type(MEKE_type), intent(inout) :: MEKE !< MEKE data. type(ocean_grid_type), intent(inout) :: G !< Ocean grid. type(verticalGrid_type), intent(in) :: GV !< Ocean vertical grid structure. type(unit_scale_type), intent(in) :: US !< A dimensional unit scaling type @@ -132,9 +141,12 @@ subroutine step_forward_MEKE(MEKE, h, SN_u, SN_v, visc, dt, G, GV, US, CS, hu, h real, dimension(SZI_(G),SZJB_(G)), intent(in) :: SN_v !< Eady growth rate at v-points [T-1 ~> s-1]. type(vertvisc_type), intent(in) :: visc !< The vertical viscosity type. real, intent(in) :: dt !< Model(baroclinic) time-step [T ~> s]. - type(MEKE_CS), pointer :: CS !< MEKE control structure. - real, dimension(SZIB_(G),SZJ_(G),SZK_(G)), intent(in) :: hu !< Accumlated zonal mass flux [H L2 ~> m3 or kg]. - real, dimension(SZI_(G),SZJB_(G),SZK_(G)), intent(in) :: hv !< Accumlated meridional mass flux [H L2 ~> m3 or kg] + type(MEKE_CS), intent(inout) :: CS !< MEKE control structure. + real, dimension(SZIB_(G),SZJ_(G),SZK_(GV)), intent(in) :: hu !< Accumlated zonal mass flux [H L2 ~> m3 or kg]. + real, dimension(SZI_(G),SZJB_(G),SZK_(GV)), intent(in) :: hv !< Accumlated meridional mass flux [H L2 ~> m3 or kg] + real, dimension(SZIB_(G),SZJ_(G),SZK_(GV)), intent(inout) :: u !< Zonal velocity + real, dimension(SZI_(G),SZJB_(G),SZK_(GV)), intent(inout) :: v !< Meridional velocity + type(thermo_var_ptrs), intent(in) :: tv !< Type containing thermodynamic variables type(time_type), intent(in) :: Time !< The time used for interpolating EKE ! Local variables @@ -203,8 +215,8 @@ subroutine step_forward_MEKE(MEKE, h, SN_u, SN_v, visc, dt, G, GV, US, CS, hu, h return endif - if (.not. CS%MEKE_from_file) then - + select case(CS%eke_src) + case(EKE_PROG) if (CS%debug) then if (allocated(MEKE%mom_src)) & call hchksum(MEKE%mom_src, 'MEKE mom_src', G%HI, scale=US%RZ3_T3_to_W_m2*US%L_to_Z**2) @@ -260,7 +272,6 @@ subroutine step_forward_MEKE(MEKE, h, SN_u, SN_v, visc, dt, G, GV, US, CS, hu, h endif endif - ! Calculate drag_rate_visc(i,j) which accounts for the model bottom mean flow if (CS%visc_drag .and. allocated(visc%Kv_bbl_u) .and. allocated(visc%Kv_bbl_v)) then !$OMP parallel do default(shared) @@ -583,12 +594,19 @@ subroutine step_forward_MEKE(MEKE, h, SN_u, SN_v, visc, dt, G, GV, US, CS, hu, h call hchksum(MEKE%MEKE, "MEKE post-update MEKE", G%HI, haloshift=0, scale=US%L_T_to_m_s**2) endif - else ! read MEKE from file + case(EKE_FILE) call time_interp_external(CS%id_eke,Time,data_eke) do j=js,je ; do i=is,ie MEKE%MEKE(i,j) = data_eke(i,j) * G%mask2dT(i,j) enddo; enddo - endif + call MEKE_lengthScales(CS, MEKE, G, GV, US, SN_u, SN_v, MEKE%MEKE, depth_tot, bottomFac2, barotrFac2, LmixScale) + case(EKE_SMARTREDIS) + call pass_vector(u, v, G%Domain) + call MEKE_lengthScales(CS, MEKE, G, GV, US, SN_u, SN_v, MEKE%MEKE, depth_tot, bottomFac2, barotrFac2, LmixScale) + call infer_meke(G, GV, US, CS%smartredis_meke_CS, Time, MEKE%MEKE, MEKE%Rd_dx_h, u, v, tv, h, dt) + case default + call MOM_error(FATAL,"Invalid method specified for calculating EKE") + end select call cpu_clock_begin(CS%id_clock_pass) call do_group_pass(CS%pass_MEKE, G%Domain) @@ -635,7 +653,7 @@ subroutine step_forward_MEKE(MEKE, h, SN_u, SN_v, visc, dt, G, GV, US, CS, hu, h enddo ; enddo endif - if (associated(MEKE%Kh) .or. associated(MEKE%Ku) .or. associated(MEKE%Au)) then + if (allocated(MEKE%Kh) .or. allocated(MEKE%Ku) .or. allocated(MEKE%Au)) then call cpu_clock_begin(CS%id_clock_pass) call do_group_pass(CS%pass_Kh, G%Domain) call cpu_clock_end(CS%id_clock_pass) @@ -1037,15 +1055,18 @@ end subroutine MEKE_lengthScales_0d !> Initializes the MOM_MEKE module and reads parameters. !! Returns True if module is to be used, otherwise returns False. -logical function MEKE_init(Time, G, US, param_file, diag, CS, MEKE, restart_CS) +logical function MEKE_init(Time, G, US, param_file, diag, smartredis_CS, CS, MEKE, restart_CS, meke_in_dynamics) type(time_type), intent(in) :: Time !< The current model time. type(ocean_grid_type), intent(inout) :: G !< The ocean's grid structure. type(unit_scale_type), intent(in) :: US !< A dimensional unit scaling type type(param_file_type), intent(in) :: param_file !< Parameter file parser structure. + type(smartredis_CS_type),intent(in) :: smartredis_CS !< SmartRedis client type(diag_ctrl), target, intent(inout) :: diag !< Diagnostics structure. type(MEKE_CS), intent(inout) :: CS !< MEKE control structure. type(MEKE_type), intent(inout) :: MEKE !< MEKE fields type(MOM_restart_CS), intent(in) :: restart_CS !< MOM restart control struct + logical, intent( out) :: meke_in_dynamics !< If true, MEKE is stepped forward in dynamics + !! otherwise in tracer dynamics ! Local variables real :: I_T_rescale ! A rescaling factor for time from the internal representation in this @@ -1054,7 +1075,8 @@ logical function MEKE_init(Time, G, US, param_file, diag, CS, MEKE, restart_CS) ! run to the representation in a restart file. real :: MEKE_restoring_timescale ! The timescale used to nudge MEKE toward its equilibrium value. real :: cdrag ! The default bottom drag coefficient [nondim]. - character(len=200) :: eke_file + character(len=200) :: eke_filename, eke_varname, inputdir + character(len=16) :: eke_source_str integer :: i, j, is, ie, js, je, isd, ied, jsd, jed logical :: laplacian, biharmonic, coldStart ! This include declares and sets the variable "version". @@ -1073,29 +1095,39 @@ logical function MEKE_init(Time, G, US, param_file, diag, CS, MEKE, restart_CS) default=.false.) if (.not. MEKE_init) return CS%initialized = .true. + call get_param(param_file, mdl, "MEKE_IN_DYNAMICS", meke_in_dynamics, & + "If true, step MEKE forward with the dynamics"// & + "otherwise with the tracer timestep.", & + default=.true.) + + call get_param(param_file, mdl, "EKE_SOURCE", eke_source_str, & + "Determine the where EKE comes from:\n" // & + " 'prog': Calculated solving EKE equation\n"// & + " 'file': Read in from a file\n" // & + " 'smartredis': Retrieved from SMARTREDIS", default='prog') call MOM_mesg("MEKE_init: reading parameters ", 5) - call get_param(param_file, mdl, "MEKE_FROM_FILE", CS%MEKE_from_file, & - "If true, reads EKE from a netCDF file.", default=.false.) - if (CS%MEKE_from_file) then + select case (lowercase(eke_source_str)) + case("file") + CS%eke_src = EKE_FILE call time_interp_external_init - call get_param(param_file, mdl, "EKE_FILE", CS%eke_file, & - "A file in which to find the surface salinity to use for restoring.", & + call get_param(param_file, mdl, "EKE_FILE", eke_filename, & + "A file in which to find the eddy kineteic energy variable.", & default="eke_file.nc") - call get_param(param_file, mdl, "EKE_VARIABLE", CS%eke_var_name, & - "The name of the surface salinity variable to read from "//& - "SALT_RESTORE_FILE for restoring salinity.", & + call get_param(param_file, mdl, "EKE_VARIABLE", eke_varname, & + "The name of the eddy kinetic energy variable to read from "//& + "EKE_FILE to use in MEKE.", & default="eke") - call get_param(param_file, mdl, "INPUTDIR", CS%inputdir, & + call get_param(param_file, mdl, "INPUTDIR", inputdir, & "The directory in which all input files are found.", & default=".", do_not_log=.true.) - CS%inputdir = slasher(CS%inputdir) + inputdir = slasher(inputdir) - eke_file = trim(CS%inputdir) // trim(CS%eke_file) - CS%id_eke = init_external_field(eke_file, CS%eke_var_name, domain=G%Domain%mpp_domain) - - else + eke_filename = trim(inputdir) // trim(eke_filename) + CS%id_eke = init_external_field(eke_filename, eke_varname, domain=G%Domain%mpp_domain) + case("prog") + CS%eke_src = EKE_PROG ! Read all relevant parameters and write them to the model log. call get_param(param_file, mdl, "MEKE_DAMPING", CS%MEKE_damping, & "The local depth-independent MEKE dissipation rate.", & @@ -1136,7 +1168,7 @@ logical function MEKE_init(Time, G, US, param_file, diag, CS, MEKE, restart_CS) if (CS%MEKE_equilibrium_restoring) then call get_param(param_file, mdl, "MEKE_RESTORING_TIMESCALE", MEKE_restoring_timescale, & "The timescale used to nudge MEKE toward its equilibrium value.", units="s", & - default=1e6, scale=US%T_to_s) + default=1e6, scale=US%s_to_T) CS%MEKE_restoring_rate = 1.0 / MEKE_restoring_timescale endif @@ -1162,7 +1194,12 @@ logical function MEKE_init(Time, G, US, param_file, diag, CS, MEKE, restart_CS) call get_param(param_file, mdl, "MEKE_DTSCALE", CS%MEKE_dtScale, & "A scaling factor to accelerate the time evolution of MEKE.", & units="nondim", default=1.0) - endif ! MEKE_from_file + case("smartredis") + CS%eke_src = EKE_SMARTREDIS + call smartredis_meke_init(diag, G, US, Time, param_file, smartredis_CS, CS%smartredis_meke_CS) + case default + call MOM_error(FATAL, "Invalid method selected for calculating EKE") + end select ! GMM, make sure all params used to calculated MEKE are within the above if call get_param(param_file, mdl, "MEKE_KHCOEFF", CS%MEKE_KhCoeff, & From 2dbf66aaee93f044dcd53a4f930b24e7a4928f14 Mon Sep 17 00:00:00 2001 From: Andrew Shao Date: Wed, 27 Jul 2022 20:03:28 -0500 Subject: [PATCH 35/40] Refactor implementation to isolate SmartRedis The implementation of inferring EKE using a neural network via a machine-learning interface has been further refactored to: - Isolate the mention of specific solutions, instead referring to a name that is more descriptive of its functionality (i.e. dbclient instead of smartredis) - The calculation of the features is also now included in the main MOM6 codebase --- .../external/dbclient/MOM_smartredis.F90 | 38 ++ .../smartredis_client.F90 | 1 + .../smartredis/MOM_smartredis_MEKE.F90 | 61 --- src/core/MOM.F90 | 17 +- src/framework/MOM_smartredis.F90 | 78 ---- src/parameterizations/lateral/MOM_MEKE.F90 | 359 ++++++++++++++++-- 6 files changed, 376 insertions(+), 178 deletions(-) create mode 100644 config_src/external/dbclient/MOM_smartredis.F90 rename config_src/external/{smartredis => dbclient}/smartredis_client.F90 (99%) delete mode 100644 config_src/external/smartredis/MOM_smartredis_MEKE.F90 delete mode 100644 src/framework/MOM_smartredis.F90 diff --git a/config_src/external/dbclient/MOM_smartredis.F90 b/config_src/external/dbclient/MOM_smartredis.F90 new file mode 100644 index 0000000000..17175d3885 --- /dev/null +++ b/config_src/external/dbclient/MOM_smartredis.F90 @@ -0,0 +1,38 @@ +!> Contains routines necessary to initialize the Database client +module MOM_dbclient + +! This file is part of MOM6. See LICENSE.md for the license. +use MOM_file_parser, only : param_file_type +use MOM_error_handler, only : MOM_error, WARNING +use smartredis_client, only : dbclient_type => client_type + +implicit none; private + +!> Control structure to store Database client related parameters and objects +type, public :: dbclient_CS_type + type(dbclient_type) :: client !< The Database client itself + logical :: use_dbclient !< If True, use Database within MOM6 + logical :: colocated !< If True, the orchestrator was setup in 'co-located' mode + logical :: cluster !< If True, the orchestrator has three shards or more + integer :: colocated_stride !< Sets which ranks will load the model from the file + !! e.g. mod(rank,colocated_stride) == 0 +end type dbclient_CS_type + +public :: dbclient_type +public :: dbclient_init + +contains + +subroutine dbclient_init(param_file, CS, client_in) + type(param_file_type), intent(in ) :: param_file !< Parameter file structure + type(dbclient_CS_type), intent(inout) :: CS !< Control structure for Database + type(dbclient_type), optional, intent(in ) :: client_in !< If present, use a previously initialized + !! Database client + + call MOM_error(WARNING,"dbclient_init was compiled using the dummy module. If this was\n"//& + "a mistake, please follow the instructions in:\n"//& + "MOM6/config_src/external/dbclient/README.md") +end subroutine dbclient_init + +end module MOM_dbclient + diff --git a/config_src/external/smartredis/smartredis_client.F90 b/config_src/external/dbclient/smartredis_client.F90 similarity index 99% rename from config_src/external/smartredis/smartredis_client.F90 rename to config_src/external/dbclient/smartredis_client.F90 index 30bb991b00..9077d51577 100644 --- a/config_src/external/smartredis/smartredis_client.F90 +++ b/config_src/external/dbclient/smartredis_client.F90 @@ -1,5 +1,6 @@ module smartredis_client +! This file is part of MOM6. See LICENSE.md for the license. use iso_c_binding, only : c_ptr, c_bool, c_null_ptr, c_char, c_int use iso_c_binding, only : c_int8_t, c_int16_t, c_int32_t, c_int64_t, c_float, c_double, c_size_t use iso_c_binding, only : c_loc, c_f_pointer diff --git a/config_src/external/smartredis/MOM_smartredis_MEKE.F90 b/config_src/external/smartredis/MOM_smartredis_MEKE.F90 deleted file mode 100644 index 0928fc98c2..0000000000 --- a/config_src/external/smartredis/MOM_smartredis_MEKE.F90 +++ /dev/null @@ -1,61 +0,0 @@ -!> Contains routines that contain dummy routines for the smart -module MOM_smartredis_meke - -use MOM_diag_mediator, only : diag_ctrl, time_type -use MOM_error_handler, only : MOM_error, FATAL, WARNING, is_root_pe -use MOM_grid, only : ocean_grid_type -use MOM_file_parser, only : param_file_type -use MOM_smartredis, only : smartredis_CS_type -use MOM_unit_scaling, only : unit_scale_type -use MOM_variables, only : thermo_var_ptrs -use MOM_verticalGrid, only : verticalGrid_type - -implicit none; private - -#include - -public smartredis_meke_init, infer_meke - -type, public :: smartredis_meke_CS_type; private - -end type smartredis_meke_CS_type - -contains - -!> Initializer for the SmartRedis MEKE module that uses ML to predict eddy kinetic energy -subroutine smartredis_meke_init(diag, G, US, Time, param_file, smartredis_CS, CS) - type(diag_ctrl), target, intent(inout) :: diag !< Diagnostics structure. - type(ocean_grid_type), intent(inout) :: G !< The ocean's grid structure. - type(unit_scale_type), intent(in) :: US !< A dimensional unit scaling type - type(time_type), intent(in) :: Time !< The current model time. - type(param_file_type), intent(in) :: param_file !< Parameter file parser structure. - type(smartredis_CS_type), target, intent(in) :: smartredis_CS !< SmartRedis client - type(smartredis_meke_CS_type), intent(inout) :: CS !< Control structure for this module - - call MOM_error(FATAL,"smartredis_meke_init was compiled using the dummy module. Recompile"//& - "with source code from https://github.com/CrayLabs/MOM6-smartredis") -end subroutine smartredis_meke_init - -!> Use the SmartRedis client to call a machine learning to predict eddy kinetic energy -subroutine infer_meke(G, GV, US, CS, Time, MEKE, Rd_dx_h, u, v, tv, h, dt) - type(ocean_grid_type), intent(inout) :: G !< Ocean grid - type(verticalGrid_type), intent(in) :: GV !< Ocean vertical grid structure - type(unit_scale_type), intent(in) :: US !< A dimensional unit scaling type - type(time_type), intent(in) :: Time !< The current model time. - type(smartredis_meke_CS_type), intent(in) :: CS !< Control structure for inferring MEKE - !! using SmartRedis - real, dimension(SZI_(G),SZJ_(G)), intent( out) :: MEKE !< Vertically averaged eddy kinetic energy [L2 T-2 ~> m2 s-2] - real, dimension(SZI_(G),SZJ_(G)), intent(in ) :: Rd_dx_h !< Rossby radius of deformation over - !! the grid length scale [nondim] - real, dimension(SZIB_(G),SZJ_(G),SZK_(G)), intent(in) :: u !< Zonal velocity [L T-1 ~> m s-1] - real, dimension(SZI_(G),SZJB_(G),SZK_(G)), intent(in) :: v !< Meridional velocity [L T-1 ~> m s-1] - type(thermo_var_ptrs), intent(in) :: tv !< Type containing thermodynamic variables - real, dimension(SZI_(G),SZJ_(G),SZK_(GV)), intent(in) :: h !< Layer thickness [H ~> m or kg m-2]. - real, intent(in) :: dt !< Model(baroclinic) time-step [T ~> s]. - - call MOM_error(FATAL,"infer_meke was compiled using the dummy module. Recompile"//& - "with source code from https://github.com/CrayLabs/MOM6-smartredis") - -end subroutine infer_meke - -end module MOM_smartredis_meke diff --git a/src/core/MOM.F90 b/src/core/MOM.F90 index 4cb6696fc5..ebb6f9f48b 100644 --- a/src/core/MOM.F90 +++ b/src/core/MOM.F90 @@ -143,8 +143,8 @@ module MOM use MOM_porous_barriers, only : porous_widths -! SmartRedis machine-learning interface -use MOM_smartredis, only : smartredis_CS_type, smartredis_init, client_type +! Database client used for machine-learning interface +use MOM_dbclient, only : dbclient_CS_type, dbclient_init, dbclient_type ! ODA modules use MOM_oda_driver_mod, only : ODA_CS, oda, init_oda, oda_end @@ -343,6 +343,7 @@ module MOM !! higher values use more appropriate expressions that differ at !! roundoff for non-Boussinesq cases. logical :: use_particles !< Turns on the particles package + logical :: use_dbclient !< Turns on the database client used for ML inference/analysis character(len=10) :: particle_type !< Particle types include: surface(default), profiling and sail drone. type(MOM_diag_IDs) :: IDs !< Handles used for diagnostics. @@ -409,7 +410,7 @@ module MOM type(ODA_CS), pointer :: odaCS => NULL() !< a pointer to the control structure for handling !! ensemble model state vectors and data assimilation !! increments and priors - type(smartredis_CS_type) :: smartredis_CS !< SmartRedis control structure for online ML/AI + type(dbclient_CS_type) :: dbclient_CS !< Control structure for database client used for online ML/AI type(porous_barrier_ptrs) :: pbv !< porous barrier fractional cell metrics real ALLOCABLE_, dimension(NIMEMB_PTR_,NJMEM_,NKMEM_) :: por_face_areaU !< fractional open area of U-faces [nondim] real ALLOCABLE_, dimension(NIMEM_,NJMEMB_PTR_,NKMEM_) :: por_face_areaV !< fractional open area of V-faces [nondim] @@ -1812,7 +1813,7 @@ subroutine initialize_MOM(Time, Time_init, param_file, dirs, CS, restart_CSp, & ! This include declares and sets the variable "version". # include "version_variable.h" - integer :: i, j, k, is, ie, js, je, isd, ied, jsd, jed, nz + integer :: i, j, is, ie, js, je, isd, ied, jsd, jed, nz integer :: IsdB, IedB, JsdB, JedB real :: dtbt ! If negative, this specifies the barotropic timestep as a fraction ! of the maximum stable value [nondim]. @@ -2203,6 +2204,10 @@ subroutine initialize_MOM(Time, Time_init, param_file, dirs, CS, restart_CSp, & "vertical grid files. Other values are invalid.", default=1) if (write_geom<0 .or. write_geom>2) call MOM_error(FATAL,"MOM: "//& "WRITE_GEOM must be equal to 0, 1 or 2.") + call get_param(param_file, "MOM", "USE_DBCLIENT", CS%use_dbclient, & + "If true, initialize a client to a remote database that can "//& + "be used for online analysis and machine-learning inference.",& + default=.false.) ! Check for inconsistent parameter settings. if (CS%use_ALE_algorithm .and. bulkmixedlayer) call MOM_error(FATAL, & @@ -2807,8 +2812,8 @@ subroutine initialize_MOM(Time, Time_init, param_file, dirs, CS, restart_CSp, & endif call cpu_clock_end(id_clock_MOM_init) - call smartredis_init(param_file, CS%smartredis_CS) - CS%useMEKE = MEKE_init(Time, G, US, param_file, diag, CS%smartredis_CS, CS%MEKE_CSp, CS%MEKE, & + if (CS%use_dbclient) call dbclient_init(param_file, CS%dbclient_CS) + CS%useMEKE = MEKE_init(Time, G, US, param_file, diag, CS%dbclient_CS, CS%MEKE_CSp, CS%MEKE, & restart_CSp, CS%MEKE_in_dynamics) call VarMix_init(Time, G, GV, US, param_file, diag, CS%VarMix) diff --git a/src/framework/MOM_smartredis.F90 b/src/framework/MOM_smartredis.F90 deleted file mode 100644 index f9db54dd3c..0000000000 --- a/src/framework/MOM_smartredis.F90 +++ /dev/null @@ -1,78 +0,0 @@ -!> Contains routines necessary to initialize the SmartRedis client -module MOM_smartredis - -use MOM_cpu_clock, only : cpu_clock_id, cpu_clock_begin, cpu_clock_end, CLOCK_ROUTINE -use MOM_error_handler, only : MOM_error, FATAL, WARNING, NOTE, MOM_mesg, is_root_pe -use MOM_file_parser, only : read_param, get_param, log_version, param_file_type -use smartredis_client, only : client_type - -implicit none; private - -!> Control structure to store SmartRedis client related parameters and objects -type, public :: smartredis_CS_type - type(client_type) :: client !< The SmartRedis client itself - logical :: use_smartredis !< If True, use SmartRedis within MOM6 - logical :: colocated !< If True, the orchestrator was setup in 'co-located' mode - logical :: cluster !< If True, the orchestrator has three shards or more - integer :: colocated_stride !< Sets which ranks will load the model from the file - !! e.g. mod(rank,colocated_stride) == 0 -end type - -public :: client_type -public :: smartredis_init - -contains - -subroutine smartredis_init(param_file, CS, client_in) - type(param_file_type), intent(in ) :: param_file !< Parameter file structure - type(smartredis_CS_type), intent(inout) :: CS !< Control structure for SmartRedis - type(client_type), optional, intent(in ) :: client_in !< If present, use a previously initialized - !! SmartRedis client - - character(len=40) :: mdl = "MOM_SMARTREDIS" - integer :: id_client_init - integer :: return_code - call get_param(param_file, mdl, "USE_SMARTREDIS", CS%use_smartredis, & - "If true, use the data client to connect"//& - "with the SmartRedis database", default=.false.) - - if (present(client_in)) then ! The driver (e.g. the NUOPC cap) has already initialized the client - - CS%client = client_in - - if (.not. CS%client%isinitialized() .and. CS%use_smartredis) then - call MOM_error(FATAL, & - "If using a SmartRedis client not initialized within MOM, client%initialize must have already been invoked."//& - " Check that the client has been initialized in the driver before the call to initialize_MOM") - endif - - elseif (CS%use_smartredis) then ! The client will be initialized within MOM - - call get_param(param_file, mdl, "SMARTREDIS_COLOCATED", CS%colocated, & - "If true, the SmartRedis database is colocated on the simulation nodes.",& - default=.false.) - if (CS%colocated) then - CS%cluster = .false. - call get_param(param_file, mdl, "SMARTREDIS_COLOCATED_STRIDE", CS%colocated_stride, & - "If true, the SmartRedis database is colocated on the simulation nodes.",& - default=0) - else - call get_param(param_file, mdl, "SMARTREDIS_CLUSTER", CS%cluster, & - "If true, the SmartRedis database is distributed over multiple nodes.",& - default=.true.) - endif - id_client_init = cpu_clock_id('(SMARTREDIS client init)', grain=CLOCK_ROUTINE) - call MOM_error(NOTE,"SmartRedis Client Initializing") - call cpu_clock_begin(id_client_init) - return_code = CS%client%initialize(CS%cluster) - if (CS%client%SR_error_parser(return_code)) then - call MOM_error(FATAL, "SmartRedis client failed to initialize") - endif - call MOM_error(NOTE,"SmartRedis Client Initialized") - call cpu_clock_end(id_client_init) - - endif -end subroutine smartredis_init - -end module MOM_smartredis - diff --git a/src/parameterizations/lateral/MOM_MEKE.F90 b/src/parameterizations/lateral/MOM_MEKE.F90 index f5f542a40a..01bc878da2 100644 --- a/src/parameterizations/lateral/MOM_MEKE.F90 +++ b/src/parameterizations/lateral/MOM_MEKE.F90 @@ -4,29 +4,33 @@ module MOM_MEKE ! This file is part of MOM6. See LICENSE.md for the license. +use iso_c_binding, only : c_float + +use MOM_coms, only : PE_here +use MOM_dbclient, only : dbclient_type, dbclient_CS_type +use MOM_debugging, only : hchksum, uvchksum +use MOM_cpu_clock, only : cpu_clock_id, cpu_clock_begin, cpu_clock_end, CLOCK_ROUTINE +use MOM_diag_mediator, only : post_data, register_diag_field, safe_alloc_ptr +use MOM_diag_mediator, only : diag_ctrl, time_type +use MOM_domains, only : create_group_pass, do_group_pass, group_pass_type +use MOM_domains, only : pass_vector, pass_var +use MOM_error_handler, only : MOM_error, FATAL, WARNING, NOTE, MOM_mesg, is_root_pe +use MOM_file_parser, only : read_param, get_param, log_version, param_file_type +use MOM_grid, only : ocean_grid_type +use MOM_hor_index, only : hor_index_type +use MOM_interface_heights, only : find_eta +use MOM_interpolate, only : init_external_field, time_interp_external +use MOM_interpolate, only : time_interp_external_init +use MOM_io, only : vardesc, var_desc, slasher +use MOM_isopycnal_slopes, only : calc_isoneutral_slopes +use MOM_restart, only : MOM_restart_CS, register_restart_field, query_initialized +use MOM_string_functions, only : lowercase +use MOM_time_manager, only : time_type_to_real +use MOM_unit_scaling, only : unit_scale_type +use MOM_variables, only : vertvisc_type, thermo_var_ptrs +use MOM_verticalGrid, only : verticalGrid_type +use MOM_MEKE_types, only : MEKE_type -use MOM_debugging, only : hchksum, uvchksum -use MOM_cpu_clock, only : cpu_clock_id, cpu_clock_begin, cpu_clock_end, CLOCK_ROUTINE -use MOM_diag_mediator, only : post_data, register_diag_field, safe_alloc_ptr -use MOM_diag_mediator, only : diag_ctrl, time_type -use MOM_domains, only : create_group_pass, do_group_pass, group_pass_type -use MOM_domains, only : pass_vector -use MOM_error_handler, only : MOM_error, FATAL, WARNING, NOTE, MOM_mesg -use MOM_file_parser, only : read_param, get_param, log_version, param_file_type -use MOM_grid, only : ocean_grid_type -use MOM_hor_index, only : hor_index_type -use MOM_io, only : vardesc, var_desc, slasher -use MOM_smartredis_MEKE, only : smartredis_meke_CS_type, smartredis_meke_init, infer_meke -use MOM_restart, only : MOM_restart_CS, register_restart_field, query_initialized -use MOM_smartredis, only : client_type, smartredis_CS_type -use MOM_string_functions, only : lowercase -use MOM_unit_scaling, only : unit_scale_type -use MOM_variables, only : vertvisc_type, thermo_var_ptrs -use MOM_verticalGrid, only : verticalGrid_type -use MOM_MEKE_types, only : MEKE_type - -use time_interp_external_mod, only : init_external_field, time_interp_external -use time_interp_external_mod, only : time_interp_external_init implicit none ; private @@ -38,9 +42,16 @@ module MOM_MEKE enum, bind(c) enumerator :: EKE_PROG !< Use prognostic equation to calcualte EKE enumerator :: EKE_FILE !< Read in EKE from a file - enumerator :: EKE_SMARTREDIS !< Infer EKE using a neural network + enumerator :: EKE_DBCLIENT !< Infer EKE using a neural network end enum +! Constants for this module +integer, parameter :: NUM_FEATURES = 4 !< How many features used to predict EKE +integer, parameter :: MKE_IDX = 1 !< Index of mean kinetic energy in the feature array +integer, parameter :: SLOPE_Z_IDX = 2 !< Index of vertically averaged isopycnal slope in the feature array +integer, parameter :: RV_IDX = 3 !< Index of surface relative vorticity in the feature array +integer, parameter :: RD_DX_Z_IDX = 4 !< Index of the radius of deformation over the grid size in the feature array + !> Control structure that contains MEKE parameters and diagnostics handles type, public :: MEKE_CS ; private logical :: initialized = .false. !< True if this control structure has been initialized. @@ -122,8 +133,27 @@ module MOM_MEKE integer :: id_clock_pass !< Clock for group pass calls type(group_pass_type) :: pass_MEKE !< Group halo pass handle for MEKE%MEKE and maybe MEKE%Kh_diff type(group_pass_type) :: pass_Kh !< Group halo pass handle for MEKE%Kh, MEKE%Ku, and/or MEKE%Au - type(smartredis_meke_cs_type) :: smartredis_meke_CS !< Control structure for the inferring EKE via - !! ML using using the SmartRedis interface + + ! MEKE via Machine Learning + type(dbclient_type), pointer :: client => NULL() !< Pointer to the database client + + logical :: online_analysis !< If true, post the EKE used in MOM6 at every timestep + character(len=5) :: model_key = 'mleke' !< Key where the ML-model is stored + character(len=7) :: key_suffix !< Suffix appended to every key sent to Redis + real :: eke_max !< The maximum value of EKE considered physically reasonable + + ! Clock ids + integer :: id_client_init !< Clock id to time initialization of the client + integer :: id_put_tensor !< Clock id to time put_tensor routine + integer :: id_run_model !< Clock id to time running of the ML model + integer :: id_unpack_tensor !< Clock id to time retrieval of EKE prediction + + ! Diagnostic ids + integer :: id_mke = -1 !< Diagnostic id for surface mean kinetic energy + integer :: id_slope_z = -1 !< Diagnostic id for vertically averaged horizontal slope magnitude + integer :: id_slope_x = -1 !< Diagnostic id for isopycnal slope in the x-direction + integer :: id_slope_y = -1 !< Diagnostic id for isopycnal slope in the y-direction + integer :: id_rv = -1 !< Diagnostic id for surface relative vorticity end type MEKE_CS @@ -196,6 +226,7 @@ subroutine step_forward_MEKE(MEKE, h, SN_u, SN_v, visc, dt, G, GV, US, CS, hu, h real :: sdt_damp ! dt for damping [T ~> s] (sdt could be split). logical :: use_drag_rate ! Flag to indicate drag_rate is finite integer :: i, j, k, is, ie, js, je, Isq, Ieq, Jsq, Jeq, nz + real(kind=c_float), dimension(size(MEKE%MEKE),NUM_FEATURES) :: features_array is = G%isc ; ie = G%iec ; js = G%jsc ; je = G%jec ; nz = GV%ke Isq = G%IscB ; Ieq = G%IecB ; Jsq = G%JscB ; Jeq = G%JecB @@ -600,10 +631,11 @@ subroutine step_forward_MEKE(MEKE, h, SN_u, SN_v, visc, dt, G, GV, US, CS, hu, h MEKE%MEKE(i,j) = data_eke(i,j) * G%mask2dT(i,j) enddo; enddo call MEKE_lengthScales(CS, MEKE, G, GV, US, SN_u, SN_v, MEKE%MEKE, depth_tot, bottomFac2, barotrFac2, LmixScale) - case(EKE_SMARTREDIS) + case(EKE_DBCLIENT) call pass_vector(u, v, G%Domain) call MEKE_lengthScales(CS, MEKE, G, GV, US, SN_u, SN_v, MEKE%MEKE, depth_tot, bottomFac2, barotrFac2, LmixScale) - call infer_meke(G, GV, US, CS%smartredis_meke_CS, Time, MEKE%MEKE, MEKE%Rd_dx_h, u, v, tv, h, dt) + call ML_MEKE_calculate_features(G, GV, US, CS, MEKE%Rd_dx_h, u, v, tv, h, dt, features_array) + call predict_meke(G, CS, SIZE(h), Time, features_array, MEKE%MEKE) case default call MOM_error(FATAL,"Invalid method specified for calculating EKE") end select @@ -1055,12 +1087,12 @@ end subroutine MEKE_lengthScales_0d !> Initializes the MOM_MEKE module and reads parameters. !! Returns True if module is to be used, otherwise returns False. -logical function MEKE_init(Time, G, US, param_file, diag, smartredis_CS, CS, MEKE, restart_CS, meke_in_dynamics) +logical function MEKE_init(Time, G, US, param_file, diag, dbclient_CS, CS, MEKE, restart_CS, meke_in_dynamics) type(time_type), intent(in) :: Time !< The current model time. type(ocean_grid_type), intent(inout) :: G !< The ocean's grid structure. type(unit_scale_type), intent(in) :: US !< A dimensional unit scaling type type(param_file_type), intent(in) :: param_file !< Parameter file parser structure. - type(smartredis_CS_type),intent(in) :: smartredis_CS !< SmartRedis client + type(dbclient_CS_type), intent(in) :: dbclient_CS !< client type(diag_ctrl), target, intent(inout) :: diag !< Diagnostics structure. type(MEKE_CS), intent(inout) :: CS !< MEKE control structure. type(MEKE_type), intent(inout) :: MEKE !< MEKE fields @@ -1104,7 +1136,7 @@ logical function MEKE_init(Time, G, US, param_file, diag, smartredis_CS, CS, MEK "Determine the where EKE comes from:\n" // & " 'prog': Calculated solving EKE equation\n"// & " 'file': Read in from a file\n" // & - " 'smartredis': Retrieved from SMARTREDIS", default='prog') + " 'dbclient': Retrieved from ML-database", default='prog') call MOM_mesg("MEKE_init: reading parameters ", 5) @@ -1194,9 +1226,9 @@ logical function MEKE_init(Time, G, US, param_file, diag, smartredis_CS, CS, MEK call get_param(param_file, mdl, "MEKE_DTSCALE", CS%MEKE_dtScale, & "A scaling factor to accelerate the time evolution of MEKE.", & units="nondim", default=1.0) - case("smartredis") - CS%eke_src = EKE_SMARTREDIS - call smartredis_meke_init(diag, G, US, Time, param_file, smartredis_CS, CS%smartredis_meke_CS) + case("dbclient") + CS%eke_src = EKE_DBCLIENT + call ML_MEKE_init(diag, G, US, Time, param_file, dbclient_CS, CS) case default call MOM_error(FATAL, "Invalid method selected for calculating EKE") end select @@ -1458,6 +1490,267 @@ logical function MEKE_init(Time, G, US, param_file, diag, smartredis_CS, CS, MEK end function MEKE_init +!> Initializer for the variant of MEKE that uses ML to predict eddy kinetic energy +subroutine ML_MEKE_init(diag, G, US, Time, param_file, dbclient_CS, CS) + type(diag_ctrl), target, intent(inout) :: diag !< Diagnostics structure. + type(ocean_grid_type), intent(inout) :: G !< The ocean's grid structure. + type(unit_scale_type), intent(in) :: US !< A dimensional unit scaling type + type(time_type), intent(in) :: Time !< The current model time. + type(param_file_type), intent(in) :: param_file !< Parameter file parser structure. + type(dbclient_CS_type), intent(in) :: dbclient_CS !< Control structure for the database client + type(MEKE_CS), intent(inout) :: CS !< Control structure for this module + + character(len=200) :: inputdir, backend, model_filename + integer :: db_return_code, batch_size + character(len=40) :: mdl = "MOM_ML_MEKE" + + ! Store pointers in control structure + write(CS%key_suffix, '(A,I6.6)') '_', PE_here() + ! Put some basic information into the database + db_return_code = 0 + db_return_code = CS%client%put_tensor("meta"//CS%key_suffix, & + REAL([G%isd_global, G%idg_offset, G%jsd_global, G%jdg_offset]),[4]) + db_return_code + db_return_code = CS%client%put_tensor("geolat"//CS%key_suffix, G%geoLatT, shape(G%geoLatT)) + db_return_code + db_return_code = CS%client%put_tensor("geolon"//CS%key_suffix, G%geoLonT, shape(G%geoLonT)) + db_return_code + db_return_code = CS%client%put_tensor("EKE_shape"//CS%key_suffix, shape(G%geolonT), [2]) + db_return_code + + if (CS%client%SR_error_parser(db_return_code)) call MOM_error(FATAL, "Putting metadata into the database failed") + + call read_param(param_file, "INPUTDIR", inputdir) + inputdir = slasher(inputdir) + + call get_param(param_file, mdl, "BATCH_SIZE", batch_size, "Batch size to use for inference", default=1) + call get_param(param_file, mdl, "EKE_BACKEND", backend, & + "The computational backend to use for EKE inference (CPU or GPU)", default="GPU") + call get_param(param_file, mdl, "EKE_MODEL", model_filename, & + "Filename of the a saved pyTorch model to use", fail_if_missing = .true.) + call get_param(param_file, mdl, "EKE_MAX", CS%eke_max, & + "Maximum value of EKE allowed when inferring EKE", default=2., scale=US%L_T_to_m_s**2) + + ! Set the machine learning model + if (dbclient_CS%colocated) then + if (modulo(PE_here(),dbclient_CS%colocated_stride) == 0) then + db_return_code = CS%client%set_model_from_file(CS%model_key, trim(inputdir)//trim(model_filename), & + "TORCH", backend, batch_size=batch_size) + endif + else + if (is_root_pe()) then + db_return_code = CS%client%set_model_from_file(CS%model_key, trim(inputdir)//trim(model_filename), & + "TORCH", backend, batch_size=batch_size) + endif + endif + if (CS%client%SR_error_parser(db_return_code)) then + call MOM_error(FATAL, "MEKE: set_model failed") + endif + + call get_param(param_file, mdl, "ONLINE_ANALYSIS", CS%online_analysis, & + "If true, post EKE used in MOM6 to the database for analysis", default=.true.) + + ! Set various clock ids + CS%id_client_init = cpu_clock_id('(ML_MEKE client init)', grain=CLOCK_ROUTINE) + CS%id_put_tensor = cpu_clock_id('(ML_MEKE put tensor)', grain=CLOCK_ROUTINE) + CS%id_run_model = cpu_clock_id('(ML_MEKE run model)', grain=CLOCK_ROUTINE) + CS%id_unpack_tensor = cpu_clock_id('(ML_MEKE unpack tensor )', grain=CLOCK_ROUTINE) + + ! Diagnostics for ML_MEKE + CS%id_mke = register_diag_field('ocean_model', 'MEKE_MKE', diag%axesT1, Time, & + 'Surface mean (resolved) kinetic energy used in MEKE', 'm2 s-2', conversion=US%L_T_to_m_s**2) + CS%id_slope_z= register_diag_field('ocean_model', 'MEKE_slope_z', diag%axesT1, Time, & + 'Vertically averaged isopyncal slope magnitude used in MEKE', 'm2 s-2', conversion=US%L_T_to_m_s**2) + CS%id_slope_x= register_diag_field('ocean_model', 'MEKE_slope_x', diag%axesCui, Time, & + 'Isopycnal slope in the x-direction used in MEKE', 'm2 s-2', conversion=US%L_T_to_m_s**2) + CS%id_slope_y= register_diag_field('ocean_model', 'MEKE_slope_y', diag%axesCvi, Time, & + 'Isopycnal slope in the y-direction used in MEKE', 'm2 s-2', conversion=US%L_T_to_m_s**2) + CS%id_rv= register_diag_field('ocean_model', 'MEKE_RV', diag%axesT1, Time, & + 'Surface relative vorticity used in MEKE', 'm2 s-2', conversion=US%L_T_to_m_s**2) + +end subroutine ML_MEKE_init + +!> Calculate the various features used for the machine learning prediction +subroutine ML_MEKE_calculate_features(G, GV, US, CS, Rd_dx_h, u, v, tv, h, dt, features_array) + type(ocean_grid_type), intent(inout) :: G !< Ocean grid + type(verticalGrid_type), intent(in) :: GV !< Ocean vertical grid structure + type(unit_scale_type), intent(in) :: US !< A dimensional unit scaling type + type(MEKE_CS), intent(in) :: CS !< Control structure for MEKE + real, dimension(SZI_(G),SZJ_(G)), intent(in ) :: Rd_dx_h !< Rossby radius of deformation over + !! the grid length scale [nondim] + real, dimension(SZIB_(G),SZJ_(G),SZK_(G)), intent(in) :: u !< Zonal velocity [L T-1 ~> m s-1] + real, dimension(SZI_(G),SZJB_(G),SZK_(G)), intent(in) :: v !< Meridional velocity [L T-1 ~> m s-1] + type(thermo_var_ptrs), intent(in) :: tv !< Type containing thermodynamic variables + real, dimension(SZI_(G),SZJ_(G),SZK_(GV)), intent(in) :: h !< Layer thickness [H ~> m or kg m-2]. + real, intent(in) :: dt !< Model(baroclinic) time-step [T ~> s]. + real(kind=c_float), dimension(SIZE(h),num_features), intent( out) :: features_array + !< The array of features needed for machine + !! learning inference + + real, dimension(SZI_(G),SZJ_(G)) :: mke + real, dimension(SZI_(G),SZJ_(G)) :: slope_z + real, dimension(SZIB_(G),SZJB_(G)) :: rv_z + real, dimension(SZIB_(G),SZJB_(G)) :: rv_z_t + real, dimension(SZI_(G),SZJ_(G)) :: rd_dx_z + + real, dimension(SZIB_(G),SZJ_(G), SZK_(G)) :: h_u ! Thickness at u point + real, dimension(SZI_(G),SZJB_(G), SZK_(G)) :: h_v ! Thickness at v point + real, dimension(SZIB_(G),SZJ_(G),SZK_(G)+1) :: slope_x ! Isoneutral slope at U point + real, dimension(SZI_(G),SZJB_(G),SZK_(G)+1) :: slope_y ! Isoneutral slope at V point + real, dimension(SZIB_(G),SZJ_(G)) :: slope_x_vert_avg ! Isoneutral slope at U point + real, dimension(SZI_(G),SZJB_(G)) :: slope_y_vert_avg ! Isoneutral slope at V point + real, dimension(SZI_(G), SZJ_(G), SZK_(G)+1) :: e ! The interface heights relative to mean sea level [Z ~> m]. + real :: slope_t, u_t, v_t ! u and v interpolated to thickness point + real :: dvdx, dudy + real :: a_e, a_w, a_n, a_s, Idenom, sum_area + + integer :: i, j, k, is, ie, js, je, Isq, Ieq, Jsq, Jeq, nz + + is = G%isc ; ie = G%iec ; js = G%jsc ; je = G%jec ; nz = GV%ke + Isq = G%IscB ; Ieq = G%IecB ; Jsq = G%JscB ; Jeq = G%JecB + + ! Calculate various features for used to infer eddy kinetic energy + ! Linear interpolation to estimate thickness at a velocity points + do k=1,nz; do j=js-1,je+1; do i=is-1,ie+1 + h_u(I,j,k) = 0.5*(h(i,j,k)*G%mask2dT(i,j) + h(i+1,j,k)*G%mask2dT(i+1,j)) + GV%Angstrom_H + h_v(i,J,k) = 0.5*(h(i,j,k)*G%mask2dT(i,j) + h(i,j+1,k)*G%mask2dT(i,j+1)) + GV%Angstrom_H + enddo; enddo; enddo; + call find_eta(h, tv, G, GV, US, e, halo_size=2) + call calc_isoneutral_slopes(G, GV, US, h, e, tv, dt*1.e-7, .false., slope_x, slope_y) + call pass_vector(slope_x, slope_y, G%Domain) + do j=js-1,je+1; do i=is-1,ie+1 + slope_x_vert_avg(I,j) = vertical_average_interface(slope_x(i,j,:), h_u(i,j,:), GV%H_subroundoff) + slope_y_vert_avg(i,J) = vertical_average_interface(slope_y(i,j,:), h_v(i,j,:), GV%H_subroundoff) + enddo; enddo + slope_z(:,:) = 0. + + call pass_vector(slope_x_vert_avg, slope_y_vert_avg, G%Domain) + do j=js,je; do i=is,ie + ! Calculate weights for interpolation from velocity points to h points + sum_area = G%areaCu(I-1,j) + G%areaCu(I,j) + if (sum_area>0.0) then + Idenom = sqrt(0.5*G%IareaT(i,j) / sum_area) + a_w = G%areaCu(I-1,j) * Idenom + a_e = G%areaCu(I,j) * Idenom + else + a_w = 0.0 ; a_e = 0.0 + endif + + sum_area = G%areaCv(i,J-1) + G%areaCv(i,J) + if (sum_area>0.0) then + Idenom = sqrt(0.5*G%IareaT(i,j) / sum_area) + a_s = G%areaCv(i,J-1) * Idenom + a_n = G%areaCv(i,J) * Idenom + else + a_s = 0.0 ; a_n = 0.0 + endif + + ! Calculate mean kinetic energy + u_t = a_e*u(I,j,1)+a_w*u(I-1,j,1) + v_t = a_n*v(i,J,1)+a_s*v(i,J-1,1) + mke(i,j) = 0.5*( u_t*u_t + v_t*v_t ) + + ! Calculate the magnitude of the slope + slope_t = slope_x_vert_avg(I,j)*a_e+slope_x_vert_avg(I-1,j)*a_w + slope_z(i,j) = sqrt(slope_t*slope_t) + slope_t = slope_y_vert_avg(i,J)*a_n+slope_y_vert_avg(i,J-1)*a_s + slope_z(i,j) = 0.5*(slope_z(i,j) + sqrt(slope_t*slope_t))*G%mask2dT(i,j) + enddo; enddo + call pass_var(slope_z, G%Domain) + + ! Calculate relative vorticity + do J=Jsq-1,Jeq+1 ; do I=Isq-1,Ieq+1 + dvdx = (v(i+1,J,1)*G%dyCv(i+1,J) - v(i,J,1)*G%dyCv(i,J)) + dudy = (u(I,j+1,1)*G%dxCu(I,j+1) - u(I,j,1)*G%dxCu(I,j)) + ! Assumed no slip + rv_z(I,J) = (2.0-G%mask2dBu(I,J)) * (dvdx - dudy) * G%IareaBu(I,J) + enddo; enddo + ! Interpolate RV to t-point, revisit this calculation to include metrics + do j=js,je; do i=is,ie + rv_z_t(i,j) = 0.25*(rv_z(i-1,j) + rv_z(i,j) + rv_z(i-1,j-1) + rv_z(i,j-1)) + enddo; enddo + + + ! Construct the feature array + features_array(:,mke_idx) = pack(mke,.true.) + features_array(:,slope_z_idx) = pack(slope_z,.true.) + features_array(:,rd_dx_z_idx) = pack(Rd_dx_h,.true.) + features_array(:,rv_idx) = pack(rv_z_t,.true.) + + if (CS%id_rv>0) call post_data(CS%id_rv, rv_z, CS%diag) + if (CS%id_mke>0) call post_data(CS%id_mke, mke, CS%diag) + if (CS%id_slope_z>0) call post_data(CS%id_slope_z, slope_z, CS%diag) + if (CS%id_slope_x>0) call post_data(CS%id_slope_x, slope_x, CS%diag) + if (CS%id_slope_y>0) call post_data(CS%id_slope_y, slope_y, CS%diag) +end subroutine ML_MEKE_calculate_features + +!> Use the machine learning interface to predict EKE +subroutine predict_MEKE(G, CS, npts, Time, features_array, MEKE) + type(ocean_grid_type), intent(inout) :: G !< Ocean grid + type(MEKE_CS), intent(in ) :: CS !< Control structure for MEKE + integer, intent(in ) :: npts !< Number of T-grid cells on the local + !! domain + type(time_type), intent(in ) :: Time !< The current model time + real(kind=c_float), dimension(npts,num_features), intent(in ) :: features_array + !< The array of features needed for machine + !! learning inference + real, dimension(SZI_(G),SZJ_(G)), intent( out) :: MEKE !< Eddy kinetic energy [L2 T-2 ~> m2 s-2] + integer :: db_return_code + character(len=255), dimension(1) :: model_out, model_in + character(len=255) :: time_suffix + real(kind=c_float), dimension(SIZE(MEKE)) :: MEKE_vec + + integer :: i, j, is, ie, js, je + is = G%isc ; ie = G%iec ; js = G%jsc ; je = G%jec +!> Use the database client to call a machine learning model to predict eddy kinetic energy + call cpu_clock_begin(CS%id_put_tensor) + db_return_code = CS%client%put_tensor("features"//CS%key_suffix, features_array, shape(features_array)) + call cpu_clock_end(CS%id_put_tensor) + + ! Run the ML model to predict EKE and return the result + model_out(1) = "EKE"//CS%key_suffix + model_in(1) = "features"//CS%key_suffix + call cpu_clock_begin(CS%id_run_model) + db_return_code = CS%client%run_model(CS%model_key, model_in, model_out) + call cpu_clock_end(CS%id_run_model) + if (CS%client%SR_error_parser(db_return_code)) then + call MOM_error(FATAL, "MEKE: run_model failed") + endif + call cpu_clock_begin(CS%id_unpack_tensor) + db_return_code = CS%client%unpack_tensor( model_out(1), MEKE_vec, shape(MEKE_vec) ) + call cpu_clock_end(CS%id_unpack_tensor) + + MEKE = reshape(MEKE_vec, shape(MEKE)) + do j=js,je; do i=is,ie + MEKE(i,j) = MIN(MAX(exp(MEKE(i,j)),0.),CS%eke_max) + enddo; enddo + call pass_var(MEKE,G%Domain) + + if (CS%online_analysis) then + write(time_suffix,"(F16.0)") time_type_to_real(Time) + db_return_code = CS%client%put_tensor(trim("EKE_")//trim(adjustl(time_suffix))//CS%key_suffix, MEKE, shape(MEKE)) + endif +end subroutine predict_MEKE + +!> Compute average of interface quantities weighted by the thickness of the surrounding layers +real function vertical_average_interface(h, w, h_min) + + real, dimension(:), intent(in) :: h !< Layer Thicknesses + real, dimension(:), intent(in) :: w !< Quantity to average + real, intent(in) :: h_min !< The vanishingly small layer thickness + + real :: htot, inv_htot + integer :: k, nk + + nk = size(h) + htot = h_min + do k=2,nk + htot = htot + (h(k-1)+h(k)) + enddo + inv_htot = 1./htot + + vertical_average_interface = 0. + do K=2,nk + vertical_average_interface = vertical_average_interface + (w(k)*(h(k-1)+h(k)))*inv_htot + enddo +end function vertical_average_interface + !> Allocates memory and register restart fields for the MOM_MEKE module. subroutine MEKE_alloc_register_restart(HI, US, param_file, MEKE, restart_CS) ! Arguments From 94e00e2c8e0ab63e417590402eec239171886a44 Mon Sep 17 00:00:00 2001 From: Andrew Shao Date: Fri, 29 Jul 2022 14:17:41 -0500 Subject: [PATCH 36/40] Further genericize the database client The changes here remove all references to specific implementations of clients used to communicate with the database. Additionally, references within MOM6 now refer "MOM_database_comms" as the module name (with similarly named methods) versus the dblcient_type. Packages are now expected to provide the following implementations which are compatible with those found in config_src/external/database_comms/MOM_database_comms.F90: - dbclient_type - dbcomms_CS_type - subroutine database_comms_init --- .../MOM_database_comms.F90} | 29 ++-- config_src/external/database_comms/README.md | 23 +++ .../database_client_interface.F90} | 154 +++++++++--------- src/core/MOM.F90 | 8 +- src/parameterizations/lateral/MOM_MEKE.F90 | 37 ++--- 5 files changed, 135 insertions(+), 116 deletions(-) rename config_src/external/{dbclient/MOM_smartredis.F90 => database_comms/MOM_database_comms.F90} (57%) create mode 100644 config_src/external/database_comms/README.md rename config_src/external/{dbclient/smartredis_client.F90 => database_comms/database_client_interface.F90} (87%) diff --git a/config_src/external/dbclient/MOM_smartredis.F90 b/config_src/external/database_comms/MOM_database_comms.F90 similarity index 57% rename from config_src/external/dbclient/MOM_smartredis.F90 rename to config_src/external/database_comms/MOM_database_comms.F90 index 17175d3885..4c3eb38b5c 100644 --- a/config_src/external/dbclient/MOM_smartredis.F90 +++ b/config_src/external/database_comms/MOM_database_comms.F90 @@ -1,38 +1,37 @@ -!> Contains routines necessary to initialize the Database client -module MOM_dbclient - +!> Contains routines necessary to initialize communication with a database +module MOM_database_comms ! This file is part of MOM6. See LICENSE.md for the license. -use MOM_file_parser, only : param_file_type -use MOM_error_handler, only : MOM_error, WARNING -use smartredis_client, only : dbclient_type => client_type +use MOM_file_parser, only : param_file_type +use MOM_error_handler, only : MOM_error, WARNING +use database_client_interface, only : dbclient_type implicit none; private -!> Control structure to store Database client related parameters and objects -type, public :: dbclient_CS_type +!> Control structure to store Database communication related parameters and objects +type, public :: dbcomms_CS_type type(dbclient_type) :: client !< The Database client itself logical :: use_dbclient !< If True, use Database within MOM6 logical :: colocated !< If True, the orchestrator was setup in 'co-located' mode logical :: cluster !< If True, the orchestrator has three shards or more integer :: colocated_stride !< Sets which ranks will load the model from the file !! e.g. mod(rank,colocated_stride) == 0 -end type dbclient_CS_type +end type dbcomms_CS_type +public :: database_comms_init public :: dbclient_type -public :: dbclient_init contains -subroutine dbclient_init(param_file, CS, client_in) +subroutine database_comms_init(param_file, CS, client_in) type(param_file_type), intent(in ) :: param_file !< Parameter file structure - type(dbclient_CS_type), intent(inout) :: CS !< Control structure for Database + type(dbcomms_CS_type), intent(inout) :: CS !< Control structure for Database type(dbclient_type), optional, intent(in ) :: client_in !< If present, use a previously initialized !! Database client - call MOM_error(WARNING,"dbclient_init was compiled using the dummy module. If this was\n"//& + call MOM_error(WARNING,"dbcomms_init was compiled using the dummy module. If this was\n"//& "a mistake, please follow the instructions in:\n"//& "MOM6/config_src/external/dbclient/README.md") -end subroutine dbclient_init +end subroutine database_comms_init -end module MOM_dbclient +end module MOM_database_comms diff --git a/config_src/external/database_comms/README.md b/config_src/external/database_comms/README.md new file mode 100644 index 0000000000..4a406f9ebc --- /dev/null +++ b/config_src/external/database_comms/README.md @@ -0,0 +1,23 @@ +# Overview +This module is designed to be used in conjunction with the SmartSim and +SmartRedis libraries found at https://github.com/CrayLabs/. These +libraries are used to perform machine-learning inference and online +analysis using a Redis-based database. + +An earlier implementation of these routines was used in Partee et al. [2022]: +"Using Machine Learning at scale in numerical simulations with SmartSim: +An application to ocean climate modeling" (doi.org/10.1016/j.jocs.2022.101707) +to predict eddy kinetic energy for use in the MEKE module. The additional +scripts and installation instructions for compiling MOM6 for this case can +be found at: https://github.com/CrayLabs/NCAR_ML_EKE/. The substantive +code in the new implementation is part of `MOM_MEKE.F90`. + +# File description + +- `MOM_smartredis.F90` contains just method signatures and elements of the + control structure that are imported elsewhere within the primary MOM6 + code. This includes: `dbclient_CS_type`, `dbclient_type`, and `dbclient_init` + +- `smartredis_client.F90` is a skeleton of the actual SmartRedis library + used to ensure that the interfaces to the library are maintained without + requiring MOM6 users to compile in the the full library diff --git a/config_src/external/dbclient/smartredis_client.F90 b/config_src/external/database_comms/database_client_interface.F90 similarity index 87% rename from config_src/external/dbclient/smartredis_client.F90 rename to config_src/external/database_comms/database_client_interface.F90 index 9077d51577..06601c0dd9 100644 --- a/config_src/external/dbclient/smartredis_client.F90 +++ b/config_src/external/database_comms/database_client_interface.F90 @@ -1,4 +1,4 @@ -module smartredis_client +module database_client_interface ! This file is part of MOM6. See LICENSE.md for the license. use iso_c_binding, only : c_ptr, c_bool, c_null_ptr, c_char, c_int @@ -16,12 +16,12 @@ module smartredis_client private end type dataset_type - !> Stores all data and methods associated with the SmartRedis client that is used to communicate with the database - type, public :: client_type + !> Stores all data and methods associated with the communication client that is used to communicate with the database + type, public :: dbclient_type private logical(kind=c_bool) :: cluster = .false. !< True if a database cluster is being used - type(c_ptr) :: client_ptr = c_null_ptr !< Pointer to the initialized SmartRedisClient + type(c_ptr) :: client_ptr = c_null_ptr !< Pointer to the initialized communicationClient logical :: is_initialized = .false. !< True if client is initialized contains @@ -35,11 +35,11 @@ module smartredis_client !> Decode a response code from an API function procedure :: SR_error_parser - !> Initializes a new instance of the SmartRedis client + !> Initializes a new instance of the communication client procedure :: initialize => initialize_client - !> Check if a SmartRedis client has been initialized + !> Check if a communication client has been initialized procedure :: isinitialized - !> Destructs a new instance of the SmartRedis client + !> Destructs a new instance of the communication client procedure :: destructor !> Check the database for the existence of a specific model procedure :: model_exists @@ -99,9 +99,9 @@ module smartredis_client procedure :: delete_model !> Remove a model from the database with multiple GPUs procedure :: delete_model_multigpu - !> Put a SmartRedis dataset into the database + !> Put a communication dataset into the database procedure :: put_dataset - !> Retrieve a SmartRedis dataset from the database + !> Retrieve a communication dataset from the database procedure :: get_dataset !> Rename the dataset within the database procedure :: rename_dataset @@ -152,23 +152,23 @@ module smartredis_client procedure, private :: unpack_tensor_float !< Unpack a 32-bit real tensor into memory procedure, private :: unpack_tensor_double !< Unpack a 64-bit real tensor into memory - end type client_type + end type dbclient_type contains !> Decode a response code from an API function function SR_error_parser(self, response_code) result(is_error) - class(client_type), intent(in) :: self !< Receives the initialized client + class(dbclient_type), intent(in) :: self !< Receives the initialized client integer (kind=enum_kind), intent(in) :: response_code !< The response code to decode logical :: is_error !< Indicates whether this is an error response is_error = .true. end function SR_error_parser - !> Initializes a new instance of a SmartRedis client + !> Initializes a new instance of a communication client function initialize_client(self, cluster) integer(kind=enum_kind) :: initialize_client - class(client_type), intent(inout) :: self !< Receives the initialized client + class(dbclient_type), intent(inout) :: self !< Receives the initialized client logical, optional, intent(in ) :: cluster !< If true, client uses a database cluster (Default: .false.) initialize_client = -1 @@ -176,21 +176,21 @@ end function initialize_client !> Check whether the client has been initialized logical function isinitialized(this) - class(client_type) :: this + class(dbclient_type) :: this isinitialized = .false. end function isinitialized - !> A destructor for the SmartRedis client + !> A destructor for the communication client function destructor(self) integer(kind=enum_kind) :: destructor - class(client_type), intent(inout) :: self + class(dbclient_type), intent(inout) :: self destructor = -1 end function destructor !> Check if the specified key exists in the database function key_exists(self, key, exists) - class(client_type), intent(in) :: self !< The client + class(dbclient_type), intent(in) :: self !< The client character(len=*), intent(in) :: key !< The key to check logical(kind=c_bool), intent(out) :: exists !< Receives whether the key exists integer(kind=enum_kind) :: key_exists @@ -200,7 +200,7 @@ end function key_exists !> Check if the specified model exists in the database function model_exists(self, model_name, exists) result(code) - class(client_type), intent(in) :: self !< The client + class(dbclient_type), intent(in) :: self !< The client character(len=*), intent(in) :: model_name !< The model to check logical(kind=c_bool), intent(out) :: exists !< Receives whether the model exists integer(kind=enum_kind) :: code @@ -210,7 +210,7 @@ end function model_exists !> Check if the specified tensor exists in the database function tensor_exists(self, tensor_name, exists) result(code) - class(client_type), intent(in) :: self !< The client + class(dbclient_type), intent(in) :: self !< The client character(len=*), intent(in) :: tensor_name !< The tensor to check logical(kind=c_bool), intent(out) :: exists !< Receives whether the model exists integer(kind=enum_kind) :: code @@ -220,7 +220,7 @@ end function tensor_exists !> Check if the specified dataset exists in the database function dataset_exists(this, dataset_name, exists) result(code) - class(client_type), intent(in) :: this !< The client + class(dbclient_type), intent(in) :: this !< The client character(len=*), intent(in) :: dataset_name !< The dataset to check logical(kind=c_bool), intent(out) :: exists !< Receives whether the model exists integer(kind=enum_kind) :: code @@ -230,7 +230,7 @@ end function dataset_exists !> Repeatedly poll the database until the tensor exists or the number of tries is exceeded function poll_tensor(self, tensor_name, poll_frequency_ms, num_tries, exists) result(code) - class(client_type), intent(in) :: self !< The client + class(dbclient_type), intent(in) :: self !< The client character(len=*), intent(in) :: tensor_name !< name in the database to poll integer, intent(in) :: poll_frequency_ms !< Frequency at which to poll the database (ms) integer, intent(in) :: num_tries !< Number of times to poll the database before failing @@ -243,7 +243,7 @@ end function poll_tensor !> Repeatedly poll the database until the dataset exists or the number of tries is exceeded function poll_dataset(self, dataset_name, poll_frequency_ms, num_tries, exists) integer(kind=enum_kind) :: poll_dataset - class(client_type), intent(in) :: self !< The client + class(dbclient_type), intent(in) :: self !< The client character(len=*), intent(in) :: dataset_name !< Name in the database to poll integer, intent(in) :: poll_frequency_ms !< Frequency at which to poll the database (ms) integer, intent(in) :: num_tries !< Number of times to poll the database before failing @@ -254,7 +254,7 @@ end function poll_dataset !> Repeatedly poll the database until the model exists or the number of tries is exceeded function poll_model(self, model_name, poll_frequency_ms, num_tries, exists) result(code) - class(client_type), intent(in) :: self !< The client + class(dbclient_type), intent(in) :: self !< The client character(len=*), intent(in) :: model_name !< Name in the database to poll integer, intent(in) :: poll_frequency_ms !< Frequency at which to poll the database (ms) integer, intent(in) :: num_tries !< Number of times to poll the database before failing @@ -266,7 +266,7 @@ end function poll_model !> Repeatedly poll the database until the key exists or the number of tries is exceeded function poll_key(self, key, poll_frequency_ms, num_tries, exists) result(code) - class(client_type), intent(in) :: self !< The client + class(dbclient_type), intent(in) :: self !< The client character(len=*), intent(in) :: key !< Key in the database to poll integer, intent(in) :: poll_frequency_ms !< Frequency at which to poll the database (ms) integer, intent(in) :: num_tries !< Number of times to poll the database before failing @@ -279,7 +279,7 @@ end function poll_key !> Put a tensor whose Fortran type is the equivalent 'int8' C-type function put_tensor_i8(self, name, data, dims) result(code) integer(kind=c_int8_t), dimension(..), target, intent(in) :: data !< Data to be sent - class(client_type), intent(in) :: self !< Fortran SmartRedis client + class(dbclient_type), intent(in) :: self !< Fortran communication client character(len=*), intent(in) :: name !< The unique name used to store in the database integer, dimension(:), intent(in) :: dims !< The length of each dimension integer(kind=enum_kind) :: code @@ -290,7 +290,7 @@ end function put_tensor_i8 !> Put a tensor whose Fortran type is the equivalent 'int16' C-type function put_tensor_i16(self, name, data, dims) result(code) integer(kind=c_int16_t), dimension(..), target, intent(in) :: data !< Data to be sent - class(client_type), intent(in) :: self !< Fortran SmartRedis client + class(dbclient_type), intent(in) :: self !< Fortran communication client character(len=*), intent(in) :: name !< The unique name used to store in the database integer, dimension(:), intent(in) :: dims !< The length of each dimension integer(kind=enum_kind) :: code @@ -301,7 +301,7 @@ end function put_tensor_i16 !> Put a tensor whose Fortran type is the equivalent 'int32' C-type function put_tensor_i32(self, name, data, dims) result(code) integer(kind=c_int32_t), dimension(..), target, intent(in) :: data !< Data to be sent - class(client_type), intent(in) :: self !< Fortran SmartRedis client + class(dbclient_type), intent(in) :: self !< Fortran communication client character(len=*), intent(in) :: name !< The unique name used to store in the database integer, dimension(:), intent(in) :: dims !< The length of each dimension integer(kind=enum_kind) :: code @@ -312,7 +312,7 @@ end function put_tensor_i32 !> Put a tensor whose Fortran type is the equivalent 'int64' C-type function put_tensor_i64(self, name, data, dims) result(code) integer(kind=c_int64_t), dimension(..), target, intent(in) :: data !< Data to be sent - class(client_type), intent(in) :: self !< Fortran SmartRedis client + class(dbclient_type), intent(in) :: self !< Fortran communication client character(len=*), intent(in) :: name !< The unique name used to store in the database integer, dimension(:), intent(in) :: dims !< The length of each dimension integer(kind=enum_kind) :: code @@ -323,7 +323,7 @@ end function put_tensor_i64 !> Put a tensor whose Fortran type is the equivalent 'float' C-type function put_tensor_float(self, name, data, dims) result(code) real(kind=c_float), dimension(..), target, intent(in) :: data !< Data to be sent - class(client_type), intent(in) :: self !< Fortran SmartRedis client + class(dbclient_type), intent(in) :: self !< Fortran communication client character(len=*), intent(in) :: name !< The unique name used to store in the database integer, dimension(:), intent(in) :: dims !< The length of each dimension integer(kind=enum_kind) :: code @@ -334,7 +334,7 @@ end function put_tensor_float !> Put a tensor whose Fortran type is the equivalent 'double' C-type function put_tensor_double(self, name, data, dims) result(code) real(kind=c_double), dimension(..), target, intent(in) :: data !< Data to be sent - class(client_type), intent(in) :: self !< Fortran SmartRedis client + class(dbclient_type), intent(in) :: self !< Fortran communication client character(len=*), intent(in) :: name !< The unique name used to store in the database integer, dimension(:), intent(in) :: dims !< The length of each dimension integer(kind=enum_kind) :: code @@ -345,7 +345,7 @@ end function put_tensor_double !> Put a tensor whose Fortran type is the equivalent 'int8' C-type function unpack_tensor_i8(self, name, result, dims) result(code) integer(kind=c_int8_t), dimension(..), target, intent(out) :: result !< Data to be sent - class(client_type), intent(in) :: self !< Pointer to the initialized client + class(dbclient_type), intent(in) :: self !< Pointer to the initialized client character(len=*), intent(in) :: name !< The name to use to place the tensor integer, dimension(:), intent(in) :: dims !< Length along each dimension of the tensor integer(kind=enum_kind) :: code @@ -356,7 +356,7 @@ end function unpack_tensor_i8 !> Put a tensor whose Fortran type is the equivalent 'int16' C-type function unpack_tensor_i16(self, name, result, dims) result(code) integer(kind=c_int16_t), dimension(..), target, intent(out) :: result !< Data to be sent - class(client_type), intent(in) :: self !< Pointer to the initialized client + class(dbclient_type), intent(in) :: self !< Pointer to the initialized client character(len=*), intent(in) :: name !< The name to use to place the tensor integer, dimension(:), intent(in) :: dims !< Length along each dimension of the tensor integer(kind=enum_kind) :: code @@ -367,7 +367,7 @@ end function unpack_tensor_i16 !> Put a tensor whose Fortran type is the equivalent 'int32' C-type function unpack_tensor_i32(self, name, result, dims) result(code) integer(kind=c_int32_t), dimension(..), target, intent(out) :: result !< Data to be sent - class(client_type), intent(in) :: self !< Pointer to the initialized client + class(dbclient_type), intent(in) :: self !< Pointer to the initialized client character(len=*), intent(in) :: name !< The name to use to place the tensor integer, dimension(:), intent(in) :: dims !< Length along each dimension of the tensor integer(kind=enum_kind) :: code @@ -378,7 +378,7 @@ end function unpack_tensor_i32 !> Put a tensor whose Fortran type is the equivalent 'int64' C-type function unpack_tensor_i64(self, name, result, dims) result(code) integer(kind=c_int64_t), dimension(..), target, intent(out) :: result !< Data to be sent - class(client_type), intent(in) :: self !< Pointer to the initialized client + class(dbclient_type), intent(in) :: self !< Pointer to the initialized client character(len=*), intent(in) :: name !< The name to use to place the tensor integer, dimension(:), intent(in) :: dims !< Length along each dimension of the tensor integer(kind=enum_kind) :: code @@ -389,7 +389,7 @@ end function unpack_tensor_i64 !> Put a tensor whose Fortran type is the equivalent 'float' C-type function unpack_tensor_float(self, name, result, dims) result(code) real(kind=c_float), dimension(..), target, intent(out) :: result !< Data to be sent - class(client_type), intent(in) :: self !< Pointer to the initialized client + class(dbclient_type), intent(in) :: self !< Pointer to the initialized client character(len=*), intent(in) :: name !< The name to use to place the tensor integer, dimension(:), intent(in) :: dims !< Length along each dimension of the tensor integer(kind=enum_kind) :: code @@ -400,7 +400,7 @@ end function unpack_tensor_float !> Put a tensor whose Fortran type is the equivalent 'double' C-type function unpack_tensor_double(self, name, result, dims) result(code) real(kind=c_double), dimension(..), target, intent(out) :: result !< Data to be sent - class(client_type), intent(in) :: self !< Pointer to the initialized client + class(dbclient_type), intent(in) :: self !< Pointer to the initialized client character(len=*), intent(in) :: name !< The name to use to place the tensor integer, dimension(:), intent(in) :: dims !< Length along each dimension of the tensor integer(kind=enum_kind) :: code @@ -410,7 +410,7 @@ end function unpack_tensor_double !> Move a tensor to a new name function rename_tensor(self, old_name, new_name) result(code) - class(client_type), intent(in) :: self !< The initialized Fortran SmartRedis client + class(dbclient_type), intent(in) :: self !< The initialized Fortran communication client character(len=*), intent(in) :: old_name !< The current name for the tensor !! excluding null terminating character character(len=*), intent(in) :: new_name !< The new tensor name @@ -421,7 +421,7 @@ end function rename_tensor !> Delete a tensor function delete_tensor(self, name) result(code) - class(client_type), intent(in) :: self !< The initialized Fortran SmartRedis client + class(dbclient_type), intent(in) :: self !< The initialized Fortran communication client character(len=*), intent(in) :: name !< The name associated with the tensor integer(kind=enum_kind) :: code @@ -430,7 +430,7 @@ end function delete_tensor !> Copy a tensor to the destination name function copy_tensor(self, src_name, dest_name) result(code) - class(client_type), intent(in) :: self !< The initialized Fortran SmartRedis client + class(dbclient_type), intent(in) :: self !< The initialized Fortran communication client character(len=*), intent(in) :: src_name !< The name associated with the tensor !! excluding null terminating character character(len=*), intent(in) :: dest_name !< The new tensor name @@ -441,7 +441,7 @@ end function copy_tensor !> Retrieve the model from the database function get_model(self, name, model) result(code) - class(client_type), intent(in ) :: self !< An initialized SmartRedis client + class(dbclient_type), intent(in ) :: self !< An initialized communication client character(len=*), intent(in ) :: name !< The name associated with the model character(len=*), intent( out) :: model !< The model as a continuous buffer integer(kind=enum_kind) :: code @@ -452,7 +452,7 @@ end function get_model !> Load the machine learning model from a file and set the configuration function set_model_from_file(self, name, model_file, backend, device, batch_size, min_batch_size, tag, & inputs, outputs) result(code) - class(client_type), intent(in) :: self !< An initialized SmartRedis client + class(dbclient_type), intent(in) :: self !< An initialized communication client character(len=*), intent(in) :: name !< The name to use to place the model character(len=*), intent(in) :: model_file !< The file storing the model character(len=*), intent(in) :: backend !< The name of the backend @@ -475,7 +475,7 @@ end function set_model_from_file !> Load the machine learning model from a file and set the configuration for use in multi-GPU systems function set_model_from_file_multigpu(self, name, model_file, backend, first_gpu, num_gpus, batch_size, & min_batch_size, tag, inputs, outputs) result(code) - class(client_type), intent(in) :: self !< An initialized SmartRedis client + class(dbclient_type), intent(in) :: self !< An initialized communication client character(len=*), intent(in) :: name !< The name to use to place the model character(len=*), intent(in) :: model_file !< The file storing the model character(len=*), intent(in) :: backend !< The name of the backend @@ -499,7 +499,7 @@ end function set_model_from_file_multigpu !> Establish a model to run function set_model(self, name, model, backend, device, batch_size, min_batch_size, tag, & inputs, outputs) result(code) - class(client_type), intent(in) :: self !< An initialized SmartRedis client + class(dbclient_type), intent(in) :: self !< An initialized communication client character(len=*), intent(in) :: name !< The name to use to place the model character(len=*), intent(in) :: model !< The binary representation of the model character(len=*), intent(in) :: backend !< The name of the backend (TF, TFLITE, TORCH, ONNX) @@ -518,7 +518,7 @@ end function set_model !> Set a model from a byte string to run on a system with multiple GPUs function set_model_multigpu(self, name, model, backend, first_gpu, num_gpus, batch_size, min_batch_size, tag, & inputs, outputs) result(code) - class(client_type), intent(in) :: self !< An initialized SmartRedis client + class(dbclient_type), intent(in) :: self !< An initialized communication client character(len=*), intent(in) :: name !< The name to use to place the model character(len=*), intent(in) :: model !< The binary representation of the model character(len=*), intent(in) :: backend !< The name of the backend (TF, TFLITE, TORCH, ONNX) @@ -537,7 +537,7 @@ end function set_model_multigpu !> Run a model in the database using the specified input and output tensors function run_model(self, name, inputs, outputs) result(code) - class(client_type), intent(in) :: self !< An initialized SmartRedis client + class(dbclient_type), intent(in) :: self !< An initialized communication client character(len=*), intent(in) :: name !< The name to use to place the model character(len=*), dimension(:), intent(in) :: inputs !< One or more names of model input nodes (TF models) character(len=*), dimension(:), intent(in) :: outputs !< One or more names of model output nodes (TF models) @@ -548,7 +548,7 @@ end function run_model !> Run a model in the database using the specified input and output tensors in a multi-GPU system function run_model_multigpu(self, name, inputs, outputs, offset, first_gpu, num_gpus) result(code) - class(client_type), intent(in) :: self !< An initialized SmartRedis client + class(dbclient_type), intent(in) :: self !< An initialized communication client character(len=*), intent(in) :: name !< The name to use to place the model character(len=*), dimension(:), intent(in) :: inputs !< One or more names of model input nodes (TF models) character(len=*), dimension(:), intent(in) :: outputs !< One or more names of model output nodes (TF models) @@ -563,7 +563,7 @@ end function run_model_multigpu !> Remove a model from the database function delete_model(self, name) result(code) - class(client_type), intent(in) :: self !< An initialized SmartRedis client + class(dbclient_type), intent(in) :: self !< An initialized communication client character(len=*), intent(in) :: name !< The name to use to remove the model integer(kind=enum_kind) :: code @@ -572,7 +572,7 @@ end function delete_model !> Remove a model from the database function delete_model_multigpu(self, name, first_gpu, num_gpus) result(code) - class(client_type), intent(in) :: self !< An initialized SmartRedis client + class(dbclient_type), intent(in) :: self !< An initialized communication client character(len=*), intent(in) :: name !< The name to use to remove the model integer, intent(in) :: first_gpu !< The first GPU (zero-based) to use with the model integer, intent(in) :: num_gpus !< The number of GPUs to use with the model @@ -583,7 +583,7 @@ end function delete_model_multigpu !> Retrieve the script from the database function get_script(self, name, script) result(code) - class(client_type), intent(in ) :: self !< An initialized SmartRedis client + class(dbclient_type), intent(in ) :: self !< An initialized communication client character(len=*), intent(in ) :: name !< The name to use to place the script character(len=*), intent( out) :: script !< The script as a continuous buffer integer(kind=enum_kind) :: code @@ -593,7 +593,7 @@ end function get_script !> Set a script (from file) in the database for future execution function set_script_from_file(self, name, device, script_file) result(code) - class(client_type), intent(in) :: self !< An initialized SmartRedis client + class(dbclient_type), intent(in) :: self !< An initialized communication client character(len=*), intent(in) :: name !< The name to use to place the script character(len=*), intent(in) :: device !< The name of the device (CPU, GPU, GPU:0, GPU:1...) character(len=*), intent(in) :: script_file !< The file storing the script @@ -604,7 +604,7 @@ end function set_script_from_file !> Set a script (from file) in the database for future execution in a multi-GPU system function set_script_from_file_multigpu(self, name, script_file, first_gpu, num_gpus) result(code) - class(client_type), intent(in) :: self !< An initialized SmartRedis client + class(dbclient_type), intent(in) :: self !< An initialized communication client character(len=*), intent(in) :: name !< The name to use to place the script character(len=*), intent(in) :: script_file !< The file storing the script integer, intent(in) :: first_gpu !< The first GPU (zero-based) to use with the model @@ -616,7 +616,7 @@ end function set_script_from_file_multigpu !> Set a script (from buffer) in the database for future execution function set_script(self, name, device, script) result(code) - class(client_type), intent(in) :: self !< An initialized SmartRedis client + class(dbclient_type), intent(in) :: self !< An initialized communication client character(len=*), intent(in) :: name !< The name to use to place the script character(len=*), intent(in) :: device !< The name of the device (CPU, GPU, GPU:0, GPU:1...) character(len=*), intent(in) :: script !< The file storing the script @@ -627,7 +627,7 @@ end function set_script !> Set a script (from buffer) in the database for future execution in a multi-GPU system function set_script_multigpu(self, name, script, first_gpu, num_gpus) result(code) - class(client_type), intent(in) :: self !< An initialized SmartRedis client + class(dbclient_type), intent(in) :: self !< An initialized communication client character(len=*), intent(in) :: name !< The name to use to place the script character(len=*), intent(in) :: script !< The file storing the script integer, intent(in) :: first_gpu !< The first GPU (zero-based) to use with the model @@ -638,7 +638,7 @@ function set_script_multigpu(self, name, script, first_gpu, num_gpus) result(cod end function set_script_multigpu function run_script(self, name, func, inputs, outputs) result(code) - class(client_type), intent(in) :: self !< An initialized SmartRedis client + class(dbclient_type), intent(in) :: self !< An initialized communication client character(len=*), intent(in) :: name !< The name to use to place the script character(len=*), intent(in) :: func !< The name of the function in the script to call character(len=*), dimension(:), intent(in) :: inputs !< One or more names of script @@ -651,7 +651,7 @@ function run_script(self, name, func, inputs, outputs) result(code) end function run_script function run_script_multigpu(self, name, func, inputs, outputs, offset, first_gpu, num_gpus) result(code) - class(client_type), intent(in) :: self !< An initialized SmartRedis client + class(dbclient_type), intent(in) :: self !< An initialized communication client character(len=*), intent(in) :: name !< The name to use to place the script character(len=*), intent(in) :: func !< The name of the function in the script to call character(len=*), dimension(:), intent(in) :: inputs !< One or more names of script @@ -669,7 +669,7 @@ end function run_script_multigpu !> Remove a script from the database function delete_script(self, name) result(code) - class(client_type), intent(in) :: self !< An initialized SmartRedis client + class(dbclient_type), intent(in) :: self !< An initialized communication client character(len=*), intent(in) :: name !< The name to use to delete the script integer(kind=enum_kind) :: code @@ -678,7 +678,7 @@ end function delete_script !> Remove a script_multigpu from the database function delete_script_multigpu(self, name, first_gpu, num_gpus) result(code) - class(client_type), intent(in) :: self !< An initialized SmartRedis client + class(dbclient_type), intent(in) :: self !< An initialized communication client character(len=*), intent(in) :: name !< The name to use to delete the script_multigpu integer, intent(in) :: first_gpu !< The first GPU (zero-based) to use with the model integer, intent(in) :: num_gpus !< The number of GPUs to use with the model @@ -689,7 +689,7 @@ end function delete_script_multigpu !> Store a dataset in the database function put_dataset(self, dataset) result(code) - class(client_type), intent(in) :: self !< An initialized SmartRedis client + class(dbclient_type), intent(in) :: self !< An initialized communication client type(dataset_type), intent(in) :: dataset !< Dataset to store in the dataset integer(kind=enum_kind) :: code @@ -698,7 +698,7 @@ end function put_dataset !> Retrieve a dataset from the database function get_dataset(self, name, dataset) result(code) - class(client_type), intent(in ) :: self !< An initialized SmartRedis client + class(dbclient_type), intent(in ) :: self !< An initialized communication client character(len=*), intent(in ) :: name !< Name of the dataset to get type(dataset_type), intent( out) :: dataset !< receives the dataset integer(kind=enum_kind) :: code @@ -708,7 +708,7 @@ end function get_dataset !> Rename a dataset stored in the database function rename_dataset(self, name, new_name) result(code) - class(client_type), intent(in) :: self !< An initialized SmartRedis client + class(dbclient_type), intent(in) :: self !< An initialized communication client character(len=*), intent(in) :: name !< Original name of the dataset character(len=*), intent(in) :: new_name !< New name of the dataset integer(kind=enum_kind) :: code @@ -718,7 +718,7 @@ end function rename_dataset !> Copy a dataset within the database to a new name function copy_dataset(self, name, new_name) result(code) - class(client_type), intent(in) :: self !< An initialized SmartRedis client + class(dbclient_type), intent(in) :: self !< An initialized communication client character(len=*), intent(in) :: name !< Source name of the dataset character(len=*), intent(in) :: new_name !< Name of the new dataset integer(kind=enum_kind) :: code @@ -728,7 +728,7 @@ end function copy_dataset !> Delete a dataset stored within a database function delete_dataset(self, name) result(code) - class(client_type), intent(in) :: self !< An initialized SmartRedis client + class(dbclient_type), intent(in) :: self !< An initialized communication client character(len=*), intent(in) :: name !< Name of the dataset to delete integer(kind=enum_kind) :: code @@ -737,7 +737,7 @@ end function delete_dataset !> Set the data source (i.e. name prefix for get functions) function set_data_source(self, source_id) result(code) - class(client_type), intent(in) :: self !< An initialized SmartRedis client + class(dbclient_type), intent(in) :: self !< An initialized communication client character(len=*), intent(in) :: source_id !< The name prefix integer(kind=enum_kind) :: code @@ -749,7 +749,7 @@ end function set_data_source !! Keys of entities created before client function is called will not be affected. By default, the client does not !! prefix model and script names. function use_model_ensemble_prefix(self, use_prefix) result(code) - class(client_type), intent(in) :: self !< An initialized SmartRedis client + class(dbclient_type), intent(in) :: self !< An initialized communication client logical, intent(in) :: use_prefix !< The prefix setting integer(kind=enum_kind) :: code @@ -762,7 +762,7 @@ end function use_model_ensemble_prefix !! Keys of entities created before client function is called will not be affected. By default, the client prefixes !! tensor and dataset keys with the first prefix specified with the SSKEYIN and SSKEYOUT environment variables. function use_tensor_ensemble_prefix(self, use_prefix) result(code) - class(client_type), intent(in) :: self !< An initialized SmartRedis client + class(dbclient_type), intent(in) :: self !< An initialized communication client logical, intent(in) :: use_prefix !< The prefix setting integer(kind=enum_kind) :: code @@ -771,7 +771,7 @@ end function use_tensor_ensemble_prefix !> Control whether aggregation lists are prefixed function use_list_ensemble_prefix(self, use_prefix) result(code) - class(client_type), intent(in) :: self !< An initialized SmartRedis client + class(dbclient_type), intent(in) :: self !< An initialized communication client logical, intent(in) :: use_prefix !< The prefix setting integer(kind=enum_kind) :: code @@ -784,7 +784,7 @@ end function use_list_ensemble_prefix !! does not create a copy of the dataset. Also, for this reason, the dataset must have been previously !! placed into the database with a separate call to put_dataset(). function append_to_list(self, list_name, dataset) result(code) - class(client_type), intent(in) :: self !< An initialized SmartRedis client + class(dbclient_type), intent(in) :: self !< An initialized communication client character(len=*), intent(in) :: list_name !< Name of the dataset to get type(dataset_type), intent(in) :: dataset !< Dataset to append to the list integer(kind=enum_kind) :: code @@ -799,7 +799,7 @@ end function append_to_list !> Delete an aggregation list function delete_list(self, list_name) result(code) - class(client_type), intent(in) :: self !< An initialized SmartRedis client + class(dbclient_type), intent(in) :: self !< An initialized communication client character(len=*), intent(in) :: list_name !< Name of the aggregated dataset list to delete integer(kind=enum_kind) :: code @@ -814,7 +814,7 @@ end function delete_list !> Copy an aggregation list function copy_list(self, src_name, dest_name) result(code) - class(client_type), intent(in) :: self !< An initialized SmartRedis client + class(dbclient_type), intent(in) :: self !< An initialized communication client character(len=*), intent(in) :: src_name !< Name of the dataset to copy character(len=*), intent(in) :: dest_name !< The new list name integer(kind=enum_kind) :: code @@ -824,7 +824,7 @@ end function copy_list !> Rename an aggregation list function rename_list(self, src_name, dest_name) result(code) - class(client_type), intent(in) :: self !< An initialized SmartRedis client + class(dbclient_type), intent(in) :: self !< An initialized communication client character(len=*), intent(in) :: src_name !< Name of the dataset to rename character(len=*), intent(in) :: dest_name !< The new list name integer(kind=enum_kind) :: code @@ -834,7 +834,7 @@ end function rename_list !> Get the length of the aggregation list function get_list_length(self, list_name, result_length) result(code) - class(client_type), intent(in ) :: self !< An initialized SmartRedis client + class(dbclient_type), intent(in ) :: self !< An initialized communication client character(len=*), intent(in ) :: list_name !< Name of the dataset to get integer, intent( out) :: result_length !< The length of the list integer(kind=enum_kind) :: code @@ -844,7 +844,7 @@ end function get_list_length !> Get the length of the aggregation list function poll_list_length(self, list_name, list_length, poll_frequency_ms, num_tries, poll_result) result(code) - class(client_type), intent(in ) :: self !< An initialized SmartRedis client + class(dbclient_type), intent(in ) :: self !< An initialized communication client character(len=*), intent(in ) :: list_name !< Name of the dataset to get integer, intent(in ) :: list_length !< The desired length of the list integer, intent(in ) :: poll_frequency_ms !< Frequency at which to poll the database (ms) @@ -858,7 +858,7 @@ end function poll_list_length !> Get the length of the aggregation list function poll_list_length_gte(self, list_name, list_length, poll_frequency_ms, num_tries, poll_result) result(code) - class(client_type), intent(in ) :: self !< An initialized SmartRedis client + class(dbclient_type), intent(in ) :: self !< An initialized communication client character(len=*), intent(in ) :: list_name !< Name of the dataset to get integer, intent(in ) :: list_length !< The desired length of the list integer, intent(in ) :: poll_frequency_ms !< Frequency at which to poll the database (ms) @@ -872,7 +872,7 @@ end function poll_list_length_gte !> Get the length of the aggregation list function poll_list_length_lte(self, list_name, list_length, poll_frequency_ms, num_tries, poll_result) result(code) - class(client_type), intent(in) :: self !< An initialized SmartRedis client + class(dbclient_type), intent(in) :: self !< An initialized communication client character(len=*), intent(in) :: list_name !< Name of the dataset to get integer, intent(in) :: list_length !< The desired length of the list integer, intent(in) :: poll_frequency_ms !< Frequency at which to poll the database (ms) @@ -890,7 +890,7 @@ end function poll_list_length_lte !! extra query to the database to get the list length. This is for now necessary because !! difficulties in allocating memory for Fortran alloctables from within C. function get_datasets_from_list(self, list_name, datasets, num_datasets) result(code) - class(client_type), intent(in) :: self !< An initialized SmartRedis client + class(dbclient_type), intent(in) :: self !< An initialized communication client character(len=*), intent(in) :: list_name !< Name of the dataset to get type(dataset_type), dimension(:), allocatable, intent( out) :: datasets !< The array of datasets included integer(kind=enum_kind) :: code @@ -902,7 +902,7 @@ end function get_datasets_from_list !> Get datasets from an aggregation list over a given range by index. Note that this will deallocate an existing list function get_datasets_from_list_range(self, list_name, start_index, end_index, datasets) result(code) - class(client_type), intent(in) :: self !< An initialized SmartRedis client + class(dbclient_type), intent(in) :: self !< An initialized communication client character(len=*), intent(in) :: list_name !< Name of the dataset to get integer, intent(in) :: start_index !< The starting index of the range (inclusive, !! starting at zero). Negative values are @@ -922,5 +922,5 @@ function get_datasets_from_list_range(self, list_name, start_index, end_index, d code = -1 end function get_datasets_from_list_range - end module smartredis_client + end module database_client_interface diff --git a/src/core/MOM.F90 b/src/core/MOM.F90 index ebb6f9f48b..78170064ff 100644 --- a/src/core/MOM.F90 +++ b/src/core/MOM.F90 @@ -144,7 +144,7 @@ module MOM use MOM_porous_barriers, only : porous_widths ! Database client used for machine-learning interface -use MOM_dbclient, only : dbclient_CS_type, dbclient_init, dbclient_type +use MOM_database_comms, only : dbcomms_CS_type, database_comms_init, dbclient_type ! ODA modules use MOM_oda_driver_mod, only : ODA_CS, oda, init_oda, oda_end @@ -410,7 +410,7 @@ module MOM type(ODA_CS), pointer :: odaCS => NULL() !< a pointer to the control structure for handling !! ensemble model state vectors and data assimilation !! increments and priors - type(dbclient_CS_type) :: dbclient_CS !< Control structure for database client used for online ML/AI + type(dbcomms_CS_type) :: dbcomms_CS !< Control structure for database client used for online ML/AI type(porous_barrier_ptrs) :: pbv !< porous barrier fractional cell metrics real ALLOCABLE_, dimension(NIMEMB_PTR_,NJMEM_,NKMEM_) :: por_face_areaU !< fractional open area of U-faces [nondim] real ALLOCABLE_, dimension(NIMEM_,NJMEMB_PTR_,NKMEM_) :: por_face_areaV !< fractional open area of V-faces [nondim] @@ -2812,8 +2812,8 @@ subroutine initialize_MOM(Time, Time_init, param_file, dirs, CS, restart_CSp, & endif call cpu_clock_end(id_clock_MOM_init) - if (CS%use_dbclient) call dbclient_init(param_file, CS%dbclient_CS) - CS%useMEKE = MEKE_init(Time, G, US, param_file, diag, CS%dbclient_CS, CS%MEKE_CSp, CS%MEKE, & + if (CS%use_dbclient) call database_comms_init(param_file, CS%dbcomms_CS) + CS%useMEKE = MEKE_init(Time, G, US, param_file, diag, CS%dbcomms_CS, CS%MEKE_CSp, CS%MEKE, & restart_CSp, CS%MEKE_in_dynamics) call VarMix_init(Time, G, GV, US, param_file, diag, CS%VarMix) diff --git a/src/parameterizations/lateral/MOM_MEKE.F90 b/src/parameterizations/lateral/MOM_MEKE.F90 index 01bc878da2..9b024e62b0 100644 --- a/src/parameterizations/lateral/MOM_MEKE.F90 +++ b/src/parameterizations/lateral/MOM_MEKE.F90 @@ -4,10 +4,10 @@ module MOM_MEKE ! This file is part of MOM6. See LICENSE.md for the license. -use iso_c_binding, only : c_float +use iso_fortran_env, only : real32 use MOM_coms, only : PE_here -use MOM_dbclient, only : dbclient_type, dbclient_CS_type +use MOM_database_comms, only : dbclient_type, dbcomms_CS_type use MOM_debugging, only : hchksum, uvchksum use MOM_cpu_clock, only : cpu_clock_id, cpu_clock_begin, cpu_clock_end, CLOCK_ROUTINE use MOM_diag_mediator, only : post_data, register_diag_field, safe_alloc_ptr @@ -38,13 +38,6 @@ module MOM_MEKE public step_forward_MEKE, MEKE_init, MEKE_alloc_register_restart, MEKE_end -!> Private enum to define the source of the EKE used in MEKE -enum, bind(c) - enumerator :: EKE_PROG !< Use prognostic equation to calcualte EKE - enumerator :: EKE_FILE !< Read in EKE from a file - enumerator :: EKE_DBCLIENT !< Infer EKE using a neural network -end enum - ! Constants for this module integer, parameter :: NUM_FEATURES = 4 !< How many features used to predict EKE integer, parameter :: MKE_IDX = 1 !< Index of mean kinetic energy in the feature array @@ -52,6 +45,10 @@ module MOM_MEKE integer, parameter :: RV_IDX = 3 !< Index of surface relative vorticity in the feature array integer, parameter :: RD_DX_Z_IDX = 4 !< Index of the radius of deformation over the grid size in the feature array +integer, parameter :: EKE_PROG = 1 !< Use prognostic equation to calcualte EKE +integer, parameter :: EKE_FILE = 2 !< Read in EKE from a file +integer, parameter :: EKE_DBCLIENT = 3 !< Infer EKE using a neural network + !> Control structure that contains MEKE parameters and diagnostics handles type, public :: MEKE_CS ; private logical :: initialized = .false. !< True if this control structure has been initialized. @@ -226,7 +223,7 @@ subroutine step_forward_MEKE(MEKE, h, SN_u, SN_v, visc, dt, G, GV, US, CS, hu, h real :: sdt_damp ! dt for damping [T ~> s] (sdt could be split). logical :: use_drag_rate ! Flag to indicate drag_rate is finite integer :: i, j, k, is, ie, js, je, Isq, Ieq, Jsq, Jeq, nz - real(kind=c_float), dimension(size(MEKE%MEKE),NUM_FEATURES) :: features_array + real(kind=real32), dimension(size(MEKE%MEKE),NUM_FEATURES) :: features_array is = G%isc ; ie = G%iec ; js = G%jsc ; je = G%jec ; nz = GV%ke Isq = G%IscB ; Ieq = G%IecB ; Jsq = G%JscB ; Jeq = G%JecB @@ -1087,12 +1084,12 @@ end subroutine MEKE_lengthScales_0d !> Initializes the MOM_MEKE module and reads parameters. !! Returns True if module is to be used, otherwise returns False. -logical function MEKE_init(Time, G, US, param_file, diag, dbclient_CS, CS, MEKE, restart_CS, meke_in_dynamics) +logical function MEKE_init(Time, G, US, param_file, diag, dbcomms_CS, CS, MEKE, restart_CS, meke_in_dynamics) type(time_type), intent(in) :: Time !< The current model time. type(ocean_grid_type), intent(inout) :: G !< The ocean's grid structure. type(unit_scale_type), intent(in) :: US !< A dimensional unit scaling type type(param_file_type), intent(in) :: param_file !< Parameter file parser structure. - type(dbclient_CS_type), intent(in) :: dbclient_CS !< client + type(dbcomms_CS_type), intent(in) :: dbcomms_CS !< Database communications control structure type(diag_ctrl), target, intent(inout) :: diag !< Diagnostics structure. type(MEKE_CS), intent(inout) :: CS !< MEKE control structure. type(MEKE_type), intent(inout) :: MEKE !< MEKE fields @@ -1228,7 +1225,7 @@ logical function MEKE_init(Time, G, US, param_file, diag, dbclient_CS, CS, MEKE, units="nondim", default=1.0) case("dbclient") CS%eke_src = EKE_DBCLIENT - call ML_MEKE_init(diag, G, US, Time, param_file, dbclient_CS, CS) + call ML_MEKE_init(diag, G, US, Time, param_file, dbcomms_CS, CS) case default call MOM_error(FATAL, "Invalid method selected for calculating EKE") end select @@ -1491,13 +1488,13 @@ logical function MEKE_init(Time, G, US, param_file, diag, dbclient_CS, CS, MEKE, end function MEKE_init !> Initializer for the variant of MEKE that uses ML to predict eddy kinetic energy -subroutine ML_MEKE_init(diag, G, US, Time, param_file, dbclient_CS, CS) +subroutine ML_MEKE_init(diag, G, US, Time, param_file, dbcomms_CS, CS) type(diag_ctrl), target, intent(inout) :: diag !< Diagnostics structure. type(ocean_grid_type), intent(inout) :: G !< The ocean's grid structure. type(unit_scale_type), intent(in) :: US !< A dimensional unit scaling type type(time_type), intent(in) :: Time !< The current model time. type(param_file_type), intent(in) :: param_file !< Parameter file parser structure. - type(dbclient_CS_type), intent(in) :: dbclient_CS !< Control structure for the database client + type(dbcomms_CS_type), intent(in) :: dbcomms_CS !< Control structure for database communication type(MEKE_CS), intent(inout) :: CS !< Control structure for this module character(len=200) :: inputdir, backend, model_filename @@ -1528,8 +1525,8 @@ subroutine ML_MEKE_init(diag, G, US, Time, param_file, dbclient_CS, CS) "Maximum value of EKE allowed when inferring EKE", default=2., scale=US%L_T_to_m_s**2) ! Set the machine learning model - if (dbclient_CS%colocated) then - if (modulo(PE_here(),dbclient_CS%colocated_stride) == 0) then + if (dbcomms_CS%colocated) then + if (modulo(PE_here(),dbcomms_CS%colocated_stride) == 0) then db_return_code = CS%client%set_model_from_file(CS%model_key, trim(inputdir)//trim(model_filename), & "TORCH", backend, batch_size=batch_size) endif @@ -1579,7 +1576,7 @@ subroutine ML_MEKE_calculate_features(G, GV, US, CS, Rd_dx_h, u, v, tv, h, dt, f type(thermo_var_ptrs), intent(in) :: tv !< Type containing thermodynamic variables real, dimension(SZI_(G),SZJ_(G),SZK_(GV)), intent(in) :: h !< Layer thickness [H ~> m or kg m-2]. real, intent(in) :: dt !< Model(baroclinic) time-step [T ~> s]. - real(kind=c_float), dimension(SIZE(h),num_features), intent( out) :: features_array + real(kind=real32), dimension(SIZE(h),num_features), intent( out) :: features_array !< The array of features needed for machine !! learning inference @@ -1687,14 +1684,14 @@ subroutine predict_MEKE(G, CS, npts, Time, features_array, MEKE) integer, intent(in ) :: npts !< Number of T-grid cells on the local !! domain type(time_type), intent(in ) :: Time !< The current model time - real(kind=c_float), dimension(npts,num_features), intent(in ) :: features_array + real(kind=real32), dimension(npts,num_features), intent(in ) :: features_array !< The array of features needed for machine !! learning inference real, dimension(SZI_(G),SZJ_(G)), intent( out) :: MEKE !< Eddy kinetic energy [L2 T-2 ~> m2 s-2] integer :: db_return_code character(len=255), dimension(1) :: model_out, model_in character(len=255) :: time_suffix - real(kind=c_float), dimension(SIZE(MEKE)) :: MEKE_vec + real(kind=real32), dimension(SIZE(MEKE)) :: MEKE_vec integer :: i, j, is, ie, js, je is = G%isc ; ie = G%iec ; js = G%jsc ; je = G%jec From 18412ed54718c94ab6b9af90b3da264117b3d6e7 Mon Sep 17 00:00:00 2001 From: Andrew Shao Date: Tue, 2 Aug 2022 19:28:45 -0500 Subject: [PATCH 37/40] Remove implicit references to iso_c_binding The stub code for database_client_interface still contained references to iso_c_binding. This removes mention from it there instead requiring that various types are part of iso_fortran_env. Implementations of the methods and types by specific packages must comply with these specific types through a wrapper or directly. --- config_src/external/database_comms/README.md | 4 +- .../database_client_interface.F90 | 194 ++++++++---------- 2 files changed, 89 insertions(+), 109 deletions(-) diff --git a/config_src/external/database_comms/README.md b/config_src/external/database_comms/README.md index 4a406f9ebc..53a06b0a4a 100644 --- a/config_src/external/database_comms/README.md +++ b/config_src/external/database_comms/README.md @@ -14,9 +14,9 @@ code in the new implementation is part of `MOM_MEKE.F90`. # File description -- `MOM_smartredis.F90` contains just method signatures and elements of the +- `MOM_database_comms` contains just method signatures and elements of the control structure that are imported elsewhere within the primary MOM6 - code. This includes: `dbclient_CS_type`, `dbclient_type`, and `dbclient_init` + code. This includes: `dbcomms_CS_type`, `dbclient_type`, and `database_comms_init` - `smartredis_client.F90` is a skeleton of the actual SmartRedis library used to ensure that the interfaces to the library are maintained without diff --git a/config_src/external/database_comms/database_client_interface.F90 b/config_src/external/database_comms/database_client_interface.F90 index 06601c0dd9..f6a8f9ed1f 100644 --- a/config_src/external/database_comms/database_client_interface.F90 +++ b/config_src/external/database_comms/database_client_interface.F90 @@ -1,16 +1,10 @@ module database_client_interface ! This file is part of MOM6. See LICENSE.md for the license. - use iso_c_binding, only : c_ptr, c_bool, c_null_ptr, c_char, c_int - use iso_c_binding, only : c_int8_t, c_int16_t, c_int32_t, c_int64_t, c_float, c_double, c_size_t - use iso_c_binding, only : c_loc, c_f_pointer - - use, intrinsic :: iso_fortran_env, only: stderr => error_unit + use iso_fortran_env, only : int8, int16, int32, int64, real32, real64 implicit none; private - integer, parameter, public :: enum_kind = c_int - !> Dummy type for dataset type, public :: dataset_type private @@ -20,9 +14,6 @@ module database_client_interface type, public :: dbclient_type private - logical(kind=c_bool) :: cluster = .false. !< True if a database cluster is being used - type(c_ptr) :: client_ptr = c_null_ptr !< Pointer to the initialized communicationClient - logical :: is_initialized = .false. !< True if client is initialized contains ! Public procedures @@ -159,7 +150,7 @@ module database_client_interface !> Decode a response code from an API function function SR_error_parser(self, response_code) result(is_error) class(dbclient_type), intent(in) :: self !< Receives the initialized client - integer (kind=enum_kind), intent(in) :: response_code !< The response code to decode + integer, intent(in) :: response_code !< The response code to decode logical :: is_error !< Indicates whether this is an error response is_error = .true. @@ -167,7 +158,7 @@ end function SR_error_parser !> Initializes a new instance of a communication client function initialize_client(self, cluster) - integer(kind=enum_kind) :: initialize_client + integer :: initialize_client class(dbclient_type), intent(inout) :: self !< Receives the initialized client logical, optional, intent(in ) :: cluster !< If true, client uses a database cluster (Default: .false.) @@ -182,7 +173,7 @@ end function isinitialized !> A destructor for the communication client function destructor(self) - integer(kind=enum_kind) :: destructor + integer :: destructor class(dbclient_type), intent(inout) :: self destructor = -1 @@ -192,8 +183,8 @@ end function destructor function key_exists(self, key, exists) class(dbclient_type), intent(in) :: self !< The client character(len=*), intent(in) :: key !< The key to check - logical(kind=c_bool), intent(out) :: exists !< Receives whether the key exists - integer(kind=enum_kind) :: key_exists + logical, intent(out) :: exists !< Receives whether the key exists + integer :: key_exists key_exists = -1 end function key_exists @@ -202,8 +193,8 @@ end function key_exists function model_exists(self, model_name, exists) result(code) class(dbclient_type), intent(in) :: self !< The client character(len=*), intent(in) :: model_name !< The model to check - logical(kind=c_bool), intent(out) :: exists !< Receives whether the model exists - integer(kind=enum_kind) :: code + logical, intent(out) :: exists !< Receives whether the model exists + integer :: code code = -1 end function model_exists @@ -212,8 +203,8 @@ end function model_exists function tensor_exists(self, tensor_name, exists) result(code) class(dbclient_type), intent(in) :: self !< The client character(len=*), intent(in) :: tensor_name !< The tensor to check - logical(kind=c_bool), intent(out) :: exists !< Receives whether the model exists - integer(kind=enum_kind) :: code + logical, intent(out) :: exists !< Receives whether the model exists + integer :: code code = -1 end function tensor_exists @@ -222,8 +213,8 @@ end function tensor_exists function dataset_exists(this, dataset_name, exists) result(code) class(dbclient_type), intent(in) :: this !< The client character(len=*), intent(in) :: dataset_name !< The dataset to check - logical(kind=c_bool), intent(out) :: exists !< Receives whether the model exists - integer(kind=enum_kind) :: code + logical, intent(out) :: exists !< Receives whether the model exists + integer :: code code = -1 end function dataset_exists @@ -234,20 +225,20 @@ function poll_tensor(self, tensor_name, poll_frequency_ms, num_tries, exists) re character(len=*), intent(in) :: tensor_name !< name in the database to poll integer, intent(in) :: poll_frequency_ms !< Frequency at which to poll the database (ms) integer, intent(in) :: num_tries !< Number of times to poll the database before failing - logical(kind=c_bool), intent(out) :: exists !< Receives whether the tensor exists - integer(kind=enum_kind) :: code + logical, intent(out) :: exists !< Receives whether the tensor exists + integer :: code code = -1 end function poll_tensor !> Repeatedly poll the database until the dataset exists or the number of tries is exceeded function poll_dataset(self, dataset_name, poll_frequency_ms, num_tries, exists) - integer(kind=enum_kind) :: poll_dataset + integer :: poll_dataset class(dbclient_type), intent(in) :: self !< The client character(len=*), intent(in) :: dataset_name !< Name in the database to poll integer, intent(in) :: poll_frequency_ms !< Frequency at which to poll the database (ms) integer, intent(in) :: num_tries !< Number of times to poll the database before failing - logical(kind=c_bool), intent(out) :: exists !< Receives whether the tensor exists + logical, intent(out) :: exists !< Receives whether the tensor exists poll_dataset = -1 end function poll_dataset @@ -258,8 +249,8 @@ function poll_model(self, model_name, poll_frequency_ms, num_tries, exists) resu character(len=*), intent(in) :: model_name !< Name in the database to poll integer, intent(in) :: poll_frequency_ms !< Frequency at which to poll the database (ms) integer, intent(in) :: num_tries !< Number of times to poll the database before failing - logical(kind=c_bool), intent(out) :: exists !< Receives whether the model exists - integer(kind=enum_kind) :: code + logical, intent(out) :: exists !< Receives whether the model exists + integer :: code code = -1 end function poll_model @@ -270,140 +261,140 @@ function poll_key(self, key, poll_frequency_ms, num_tries, exists) result(code) character(len=*), intent(in) :: key !< Key in the database to poll integer, intent(in) :: poll_frequency_ms !< Frequency at which to poll the database (ms) integer, intent(in) :: num_tries !< Number of times to poll the database before failing - logical(kind=c_bool), intent(out) :: exists !< Receives whether the key exists - integer(kind=enum_kind) :: code + logical, intent(out) :: exists !< Receives whether the key exists + integer :: code code = -1 end function poll_key !> Put a tensor whose Fortran type is the equivalent 'int8' C-type function put_tensor_i8(self, name, data, dims) result(code) - integer(kind=c_int8_t), dimension(..), target, intent(in) :: data !< Data to be sent + integer(kind=int8), dimension(..), target, intent(in) :: data !< Data to be sent class(dbclient_type), intent(in) :: self !< Fortran communication client character(len=*), intent(in) :: name !< The unique name used to store in the database integer, dimension(:), intent(in) :: dims !< The length of each dimension - integer(kind=enum_kind) :: code + integer :: code code = -1 end function put_tensor_i8 !> Put a tensor whose Fortran type is the equivalent 'int16' C-type function put_tensor_i16(self, name, data, dims) result(code) - integer(kind=c_int16_t), dimension(..), target, intent(in) :: data !< Data to be sent + integer(kind=int16), dimension(..), target, intent(in) :: data !< Data to be sent class(dbclient_type), intent(in) :: self !< Fortran communication client character(len=*), intent(in) :: name !< The unique name used to store in the database integer, dimension(:), intent(in) :: dims !< The length of each dimension - integer(kind=enum_kind) :: code + integer :: code code = -1 end function put_tensor_i16 !> Put a tensor whose Fortran type is the equivalent 'int32' C-type function put_tensor_i32(self, name, data, dims) result(code) - integer(kind=c_int32_t), dimension(..), target, intent(in) :: data !< Data to be sent + integer(kind=int32), dimension(..), target, intent(in) :: data !< Data to be sent class(dbclient_type), intent(in) :: self !< Fortran communication client character(len=*), intent(in) :: name !< The unique name used to store in the database integer, dimension(:), intent(in) :: dims !< The length of each dimension - integer(kind=enum_kind) :: code + integer :: code code = -1 end function put_tensor_i32 !> Put a tensor whose Fortran type is the equivalent 'int64' C-type function put_tensor_i64(self, name, data, dims) result(code) - integer(kind=c_int64_t), dimension(..), target, intent(in) :: data !< Data to be sent + integer(kind=int64), dimension(..), target, intent(in) :: data !< Data to be sent class(dbclient_type), intent(in) :: self !< Fortran communication client character(len=*), intent(in) :: name !< The unique name used to store in the database integer, dimension(:), intent(in) :: dims !< The length of each dimension - integer(kind=enum_kind) :: code + integer :: code code = -1 end function put_tensor_i64 !> Put a tensor whose Fortran type is the equivalent 'float' C-type function put_tensor_float(self, name, data, dims) result(code) - real(kind=c_float), dimension(..), target, intent(in) :: data !< Data to be sent + real(kind=real32), dimension(..), target, intent(in) :: data !< Data to be sent class(dbclient_type), intent(in) :: self !< Fortran communication client character(len=*), intent(in) :: name !< The unique name used to store in the database integer, dimension(:), intent(in) :: dims !< The length of each dimension - integer(kind=enum_kind) :: code + integer :: code code = -1 end function put_tensor_float !> Put a tensor whose Fortran type is the equivalent 'double' C-type function put_tensor_double(self, name, data, dims) result(code) - real(kind=c_double), dimension(..), target, intent(in) :: data !< Data to be sent + real(kind=real64), dimension(..), target, intent(in) :: data !< Data to be sent class(dbclient_type), intent(in) :: self !< Fortran communication client character(len=*), intent(in) :: name !< The unique name used to store in the database integer, dimension(:), intent(in) :: dims !< The length of each dimension - integer(kind=enum_kind) :: code + integer :: code code = -1 end function put_tensor_double !> Put a tensor whose Fortran type is the equivalent 'int8' C-type function unpack_tensor_i8(self, name, result, dims) result(code) - integer(kind=c_int8_t), dimension(..), target, intent(out) :: result !< Data to be sent + integer(kind=int8), dimension(..), target, intent(out) :: result !< Data to be sent class(dbclient_type), intent(in) :: self !< Pointer to the initialized client character(len=*), intent(in) :: name !< The name to use to place the tensor integer, dimension(:), intent(in) :: dims !< Length along each dimension of the tensor - integer(kind=enum_kind) :: code + integer :: code code = -1 end function unpack_tensor_i8 !> Put a tensor whose Fortran type is the equivalent 'int16' C-type function unpack_tensor_i16(self, name, result, dims) result(code) - integer(kind=c_int16_t), dimension(..), target, intent(out) :: result !< Data to be sent + integer(kind=int16), dimension(..), target, intent(out) :: result !< Data to be sent class(dbclient_type), intent(in) :: self !< Pointer to the initialized client character(len=*), intent(in) :: name !< The name to use to place the tensor integer, dimension(:), intent(in) :: dims !< Length along each dimension of the tensor - integer(kind=enum_kind) :: code + integer :: code code = -1 end function unpack_tensor_i16 !> Put a tensor whose Fortran type is the equivalent 'int32' C-type function unpack_tensor_i32(self, name, result, dims) result(code) - integer(kind=c_int32_t), dimension(..), target, intent(out) :: result !< Data to be sent + integer(kind=int32), dimension(..), target, intent(out) :: result !< Data to be sent class(dbclient_type), intent(in) :: self !< Pointer to the initialized client character(len=*), intent(in) :: name !< The name to use to place the tensor integer, dimension(:), intent(in) :: dims !< Length along each dimension of the tensor - integer(kind=enum_kind) :: code + integer :: code code = -1 end function unpack_tensor_i32 !> Put a tensor whose Fortran type is the equivalent 'int64' C-type function unpack_tensor_i64(self, name, result, dims) result(code) - integer(kind=c_int64_t), dimension(..), target, intent(out) :: result !< Data to be sent + integer(kind=int64), dimension(..), target, intent(out) :: result !< Data to be sent class(dbclient_type), intent(in) :: self !< Pointer to the initialized client character(len=*), intent(in) :: name !< The name to use to place the tensor integer, dimension(:), intent(in) :: dims !< Length along each dimension of the tensor - integer(kind=enum_kind) :: code + integer :: code code = -1 end function unpack_tensor_i64 !> Put a tensor whose Fortran type is the equivalent 'float' C-type function unpack_tensor_float(self, name, result, dims) result(code) - real(kind=c_float), dimension(..), target, intent(out) :: result !< Data to be sent + real(kind=real32), dimension(..), target, intent(out) :: result !< Data to be sent class(dbclient_type), intent(in) :: self !< Pointer to the initialized client character(len=*), intent(in) :: name !< The name to use to place the tensor integer, dimension(:), intent(in) :: dims !< Length along each dimension of the tensor - integer(kind=enum_kind) :: code + integer :: code code = -1 end function unpack_tensor_float !> Put a tensor whose Fortran type is the equivalent 'double' C-type function unpack_tensor_double(self, name, result, dims) result(code) - real(kind=c_double), dimension(..), target, intent(out) :: result !< Data to be sent + real(kind=real64), dimension(..), target, intent(out) :: result !< Data to be sent class(dbclient_type), intent(in) :: self !< Pointer to the initialized client character(len=*), intent(in) :: name !< The name to use to place the tensor integer, dimension(:), intent(in) :: dims !< Length along each dimension of the tensor - integer(kind=enum_kind) :: code + integer :: code code = -1 end function unpack_tensor_double @@ -414,7 +405,7 @@ function rename_tensor(self, old_name, new_name) result(code) character(len=*), intent(in) :: old_name !< The current name for the tensor !! excluding null terminating character character(len=*), intent(in) :: new_name !< The new tensor name - integer(kind=enum_kind) :: code + integer :: code code = -1 end function rename_tensor @@ -423,7 +414,7 @@ end function rename_tensor function delete_tensor(self, name) result(code) class(dbclient_type), intent(in) :: self !< The initialized Fortran communication client character(len=*), intent(in) :: name !< The name associated with the tensor - integer(kind=enum_kind) :: code + integer :: code code = -1 end function delete_tensor @@ -434,7 +425,7 @@ function copy_tensor(self, src_name, dest_name) result(code) character(len=*), intent(in) :: src_name !< The name associated with the tensor !! excluding null terminating character character(len=*), intent(in) :: dest_name !< The new tensor name - integer(kind=enum_kind) :: code + integer :: code code = -1 end function copy_tensor @@ -444,7 +435,7 @@ function get_model(self, name, model) result(code) class(dbclient_type), intent(in ) :: self !< An initialized communication client character(len=*), intent(in ) :: name !< The name associated with the model character(len=*), intent( out) :: model !< The model as a continuous buffer - integer(kind=enum_kind) :: code + integer :: code code = -1 end function get_model @@ -467,7 +458,7 @@ function set_model_from_file(self, name, model_file, backend, device, batch_size !! input nodes (TF models) character(len=*), dimension(:), optional, intent(in) :: outputs !< One or more names of model !! output nodes (TF models) - integer(kind=enum_kind) :: code + integer :: code code = -1 end function set_model_from_file @@ -491,7 +482,7 @@ function set_model_from_file_multigpu(self, name, model_file, backend, first_gpu !! input nodes (TF models) character(len=*), dimension(:), optional, intent(in) :: outputs !< One or more names of model !! output nodes (TF models) - integer(kind=enum_kind) :: code + integer :: code code = -1 end function set_model_from_file_multigpu @@ -510,7 +501,7 @@ function set_model(self, name, model, backend, device, batch_size, min_batch_siz !! information purposes character(len=*), dimension(:), intent(in) :: inputs !< One or more names of model input nodes (TF models) character(len=*), dimension(:), intent(in) :: outputs !< One or more names of model output nodes (TF models) - integer(kind=enum_kind) :: code + integer :: code code = -1 end function set_model @@ -530,7 +521,7 @@ function set_model_multigpu(self, name, model, backend, first_gpu, num_gpus, bat !! information purposes character(len=*), dimension(:), intent(in) :: inputs !< One or more names of model input nodes (TF models) character(len=*), dimension(:), intent(in) :: outputs !< One or more names of model output nodes (TF models) - integer(kind=enum_kind) :: code + integer :: code code = -1 end function set_model_multigpu @@ -541,7 +532,7 @@ function run_model(self, name, inputs, outputs) result(code) character(len=*), intent(in) :: name !< The name to use to place the model character(len=*), dimension(:), intent(in) :: inputs !< One or more names of model input nodes (TF models) character(len=*), dimension(:), intent(in) :: outputs !< One or more names of model output nodes (TF models) - integer(kind=enum_kind) :: code + integer :: code code = -1 end function run_model @@ -556,7 +547,7 @@ function run_model_multigpu(self, name, inputs, outputs, offset, first_gpu, num_ !! or MPI rank integer, intent(in) :: first_gpu !< The first GPU (zero-based) to use with the model integer, intent(in) :: num_gpus !< The number of GPUs to use with the model - integer(kind=enum_kind) :: code + integer :: code code = -1 end function run_model_multigpu @@ -565,7 +556,7 @@ end function run_model_multigpu function delete_model(self, name) result(code) class(dbclient_type), intent(in) :: self !< An initialized communication client character(len=*), intent(in) :: name !< The name to use to remove the model - integer(kind=enum_kind) :: code + integer :: code code = -1 end function delete_model @@ -576,7 +567,7 @@ function delete_model_multigpu(self, name, first_gpu, num_gpus) result(code) character(len=*), intent(in) :: name !< The name to use to remove the model integer, intent(in) :: first_gpu !< The first GPU (zero-based) to use with the model integer, intent(in) :: num_gpus !< The number of GPUs to use with the model - integer(kind=enum_kind) :: code + integer :: code code = -1 end function delete_model_multigpu @@ -586,7 +577,7 @@ function get_script(self, name, script) result(code) class(dbclient_type), intent(in ) :: self !< An initialized communication client character(len=*), intent(in ) :: name !< The name to use to place the script character(len=*), intent( out) :: script !< The script as a continuous buffer - integer(kind=enum_kind) :: code + integer :: code code = -1 end function get_script @@ -597,7 +588,7 @@ function set_script_from_file(self, name, device, script_file) result(code) character(len=*), intent(in) :: name !< The name to use to place the script character(len=*), intent(in) :: device !< The name of the device (CPU, GPU, GPU:0, GPU:1...) character(len=*), intent(in) :: script_file !< The file storing the script - integer(kind=enum_kind) :: code + integer :: code code = -1 end function set_script_from_file @@ -609,7 +600,7 @@ function set_script_from_file_multigpu(self, name, script_file, first_gpu, num_g character(len=*), intent(in) :: script_file !< The file storing the script integer, intent(in) :: first_gpu !< The first GPU (zero-based) to use with the model integer, intent(in) :: num_gpus !< The number of GPUs to use with the model - integer(kind=enum_kind) :: code + integer :: code code = -1 end function set_script_from_file_multigpu @@ -620,7 +611,7 @@ function set_script(self, name, device, script) result(code) character(len=*), intent(in) :: name !< The name to use to place the script character(len=*), intent(in) :: device !< The name of the device (CPU, GPU, GPU:0, GPU:1...) character(len=*), intent(in) :: script !< The file storing the script - integer(kind=enum_kind) :: code + integer :: code code = -1 end function set_script @@ -632,7 +623,7 @@ function set_script_multigpu(self, name, script, first_gpu, num_gpus) result(cod character(len=*), intent(in) :: script !< The file storing the script integer, intent(in) :: first_gpu !< The first GPU (zero-based) to use with the model integer, intent(in) :: num_gpus !< The number of GPUs to use with the model - integer(kind=enum_kind) :: code + integer :: code code = -1 end function set_script_multigpu @@ -645,7 +636,7 @@ function run_script(self, name, func, inputs, outputs) result(code) !! input nodes (TF scripts) character(len=*), dimension(:), intent(in) :: outputs !< One or more names of script !! output nodes (TF scripts) - integer(kind=enum_kind) :: code + integer :: code code = -1 end function run_script @@ -662,7 +653,7 @@ function run_script_multigpu(self, name, func, inputs, outputs, offset, first_gp !! or MPI rank integer, intent(in) :: first_gpu !< The first GPU (zero-based) to use with the model integer, intent(in) :: num_gpus !< The number of GPUs to use with the model - integer(kind=enum_kind) :: code + integer :: code code = -1 end function run_script_multigpu @@ -671,7 +662,7 @@ end function run_script_multigpu function delete_script(self, name) result(code) class(dbclient_type), intent(in) :: self !< An initialized communication client character(len=*), intent(in) :: name !< The name to use to delete the script - integer(kind=enum_kind) :: code + integer :: code code = -1 end function delete_script @@ -682,7 +673,7 @@ function delete_script_multigpu(self, name, first_gpu, num_gpus) result(code) character(len=*), intent(in) :: name !< The name to use to delete the script_multigpu integer, intent(in) :: first_gpu !< The first GPU (zero-based) to use with the model integer, intent(in) :: num_gpus !< The number of GPUs to use with the model - integer(kind=enum_kind) :: code + integer :: code code = -1 end function delete_script_multigpu @@ -691,7 +682,7 @@ end function delete_script_multigpu function put_dataset(self, dataset) result(code) class(dbclient_type), intent(in) :: self !< An initialized communication client type(dataset_type), intent(in) :: dataset !< Dataset to store in the dataset - integer(kind=enum_kind) :: code + integer :: code code = -1 end function put_dataset @@ -701,7 +692,7 @@ function get_dataset(self, name, dataset) result(code) class(dbclient_type), intent(in ) :: self !< An initialized communication client character(len=*), intent(in ) :: name !< Name of the dataset to get type(dataset_type), intent( out) :: dataset !< receives the dataset - integer(kind=enum_kind) :: code + integer :: code code = -1 end function get_dataset @@ -711,7 +702,7 @@ function rename_dataset(self, name, new_name) result(code) class(dbclient_type), intent(in) :: self !< An initialized communication client character(len=*), intent(in) :: name !< Original name of the dataset character(len=*), intent(in) :: new_name !< New name of the dataset - integer(kind=enum_kind) :: code + integer :: code code = -1 end function rename_dataset @@ -721,7 +712,7 @@ function copy_dataset(self, name, new_name) result(code) class(dbclient_type), intent(in) :: self !< An initialized communication client character(len=*), intent(in) :: name !< Source name of the dataset character(len=*), intent(in) :: new_name !< Name of the new dataset - integer(kind=enum_kind) :: code + integer :: code code = -1 end function copy_dataset @@ -730,7 +721,7 @@ end function copy_dataset function delete_dataset(self, name) result(code) class(dbclient_type), intent(in) :: self !< An initialized communication client character(len=*), intent(in) :: name !< Name of the dataset to delete - integer(kind=enum_kind) :: code + integer :: code code = -1 end function delete_dataset @@ -739,7 +730,7 @@ end function delete_dataset function set_data_source(self, source_id) result(code) class(dbclient_type), intent(in) :: self !< An initialized communication client character(len=*), intent(in) :: source_id !< The name prefix - integer(kind=enum_kind) :: code + integer :: code code = -1 end function set_data_source @@ -751,7 +742,7 @@ end function set_data_source function use_model_ensemble_prefix(self, use_prefix) result(code) class(dbclient_type), intent(in) :: self !< An initialized communication client logical, intent(in) :: use_prefix !< The prefix setting - integer(kind=enum_kind) :: code + integer :: code code = -1 end function use_model_ensemble_prefix @@ -764,7 +755,7 @@ end function use_model_ensemble_prefix function use_tensor_ensemble_prefix(self, use_prefix) result(code) class(dbclient_type), intent(in) :: self !< An initialized communication client logical, intent(in) :: use_prefix !< The prefix setting - integer(kind=enum_kind) :: code + integer :: code code = -1 end function use_tensor_ensemble_prefix @@ -773,7 +764,7 @@ end function use_tensor_ensemble_prefix function use_list_ensemble_prefix(self, use_prefix) result(code) class(dbclient_type), intent(in) :: self !< An initialized communication client logical, intent(in) :: use_prefix !< The prefix setting - integer(kind=enum_kind) :: code + integer :: code code = -1 end function use_list_ensemble_prefix @@ -787,13 +778,8 @@ function append_to_list(self, list_name, dataset) result(code) class(dbclient_type), intent(in) :: self !< An initialized communication client character(len=*), intent(in) :: list_name !< Name of the dataset to get type(dataset_type), intent(in) :: dataset !< Dataset to append to the list - integer(kind=enum_kind) :: code - - integer(kind=c_size_t) :: list_name_length - character(kind=c_char,len=len_trim(list_name)) :: list_name_c + integer :: code - list_name_c = trim(list_name) - list_name_length = len_trim(list_name) code = -1 end function append_to_list @@ -801,13 +787,7 @@ end function append_to_list function delete_list(self, list_name) result(code) class(dbclient_type), intent(in) :: self !< An initialized communication client character(len=*), intent(in) :: list_name !< Name of the aggregated dataset list to delete - integer(kind=enum_kind) :: code - - integer(kind=c_size_t) :: list_name_length - character(kind=c_char,len=len_trim(list_name)) :: list_name_c - - list_name_c = trim(list_name) - list_name_length = len_trim(list_name) + integer :: code code = -1 end function delete_list @@ -817,7 +797,7 @@ function copy_list(self, src_name, dest_name) result(code) class(dbclient_type), intent(in) :: self !< An initialized communication client character(len=*), intent(in) :: src_name !< Name of the dataset to copy character(len=*), intent(in) :: dest_name !< The new list name - integer(kind=enum_kind) :: code + integer :: code code = -1 end function copy_list @@ -827,7 +807,7 @@ function rename_list(self, src_name, dest_name) result(code) class(dbclient_type), intent(in) :: self !< An initialized communication client character(len=*), intent(in) :: src_name !< Name of the dataset to rename character(len=*), intent(in) :: dest_name !< The new list name - integer(kind=enum_kind) :: code + integer :: code code = -1 end function rename_list @@ -837,7 +817,7 @@ function get_list_length(self, list_name, result_length) result(code) class(dbclient_type), intent(in ) :: self !< An initialized communication client character(len=*), intent(in ) :: list_name !< Name of the dataset to get integer, intent( out) :: result_length !< The length of the list - integer(kind=enum_kind) :: code + integer :: code code = -1 end function get_list_length @@ -849,9 +829,9 @@ function poll_list_length(self, list_name, list_length, poll_frequency_ms, num_t integer, intent(in ) :: list_length !< The desired length of the list integer, intent(in ) :: poll_frequency_ms !< Frequency at which to poll the database (ms) integer, intent(in ) :: num_tries !< Number of times to poll the database before failing - logical(kind=c_bool), intent( out) :: poll_result !< True if the list is the requested length, + logical, intent( out) :: poll_result !< True if the list is the requested length, !! False if not after num_tries. - integer(kind=enum_kind) :: code + integer :: code code = -1 end function poll_list_length @@ -863,9 +843,9 @@ function poll_list_length_gte(self, list_name, list_length, poll_frequency_ms, n integer, intent(in ) :: list_length !< The desired length of the list integer, intent(in ) :: poll_frequency_ms !< Frequency at which to poll the database (ms) integer, intent(in ) :: num_tries !< Number of times to poll the database before failing - logical(kind=c_bool), intent( out) :: poll_result !< True if the list is the requested length, + logical, intent( out) :: poll_result !< True if the list is the requested length, !! False if not after num_tries. - integer(kind=enum_kind) :: code + integer :: code code = -1 end function poll_list_length_gte @@ -877,10 +857,10 @@ function poll_list_length_lte(self, list_name, list_length, poll_frequency_ms, n integer, intent(in) :: list_length !< The desired length of the list integer, intent(in) :: poll_frequency_ms !< Frequency at which to poll the database (ms) integer, intent(in) :: num_tries !< Number of times to poll the database before failing - logical(kind=c_bool), intent( out) :: poll_result !< True if the list is the requested length, + logical, intent( out) :: poll_result !< True if the list is the requested length, !! False if not after num_tries. - integer(kind=enum_kind) :: code + integer :: code code = -1 end function poll_list_length_lte @@ -893,7 +873,7 @@ function get_datasets_from_list(self, list_name, datasets, num_datasets) result( class(dbclient_type), intent(in) :: self !< An initialized communication client character(len=*), intent(in) :: list_name !< Name of the dataset to get type(dataset_type), dimension(:), allocatable, intent( out) :: datasets !< The array of datasets included - integer(kind=enum_kind) :: code + integer :: code !! in the list integer, intent(out) :: num_datasets !< The numbr of datasets returned @@ -916,7 +896,7 @@ function get_datasets_from_list_range(self, list_name, start_index, end_index, d !! the last element of the list. type(dataset_type), dimension(:), allocatable, intent( out) :: datasets !< The array of datasets included - integer(kind=enum_kind) :: code + integer :: code !! in the list code = -1 From ef74015f3329e95654d80755712603307a2b2160 Mon Sep 17 00:00:00 2001 From: Andrew Shao Date: Wed, 3 Aug 2022 18:56:40 -0500 Subject: [PATCH 38/40] Fix documentation of database client stub Doxygen is wrongly flagging that the dummy methods in the database client stub are undocumented. This may have been related to a different change during the previous refactor of this stub. The solution for now is to simple move the docstring to precede the function --- .../database_client_interface.F90 | 40 ++++++++++++------- 1 file changed, 26 insertions(+), 14 deletions(-) diff --git a/config_src/external/database_comms/database_client_interface.F90 b/config_src/external/database_comms/database_client_interface.F90 index f6a8f9ed1f..8d4abacae9 100644 --- a/config_src/external/database_comms/database_client_interface.F90 +++ b/config_src/external/database_comms/database_client_interface.F90 @@ -107,7 +107,7 @@ module database_client_interface procedure :: use_model_ensemble_prefix !> If true, preprend the ensemble id for dataset list-related keys procedure :: use_list_ensemble_prefix - !> Specify a specific source of data (e.g. another ensemble member) + !> Specify a specific source of data procedure :: set_data_source !> Append a dataset to a list for aggregation @@ -126,22 +126,34 @@ module database_client_interface procedure :: poll_list_length_gte !> Repeatedly check the length of the list until it less than or equal to the given size procedure :: poll_list_length_lte - !> Retrieve vector of datasetes from the list + !> Retrieve vector of datasets from the list procedure :: get_datasets_from_list ! Private procedures - procedure, private :: put_tensor_i8 !< Put 8-bit integer tensor into database - procedure, private :: put_tensor_i16 !< Put 16-bit integer tensor into database - procedure, private :: put_tensor_i32 !< Put 32-bit integer tensor into database - procedure, private :: put_tensor_i64 !< Put 64-bit tensor into database - procedure, private :: put_tensor_float !< Put 32-bit real tensor into database - procedure, private :: put_tensor_double !< Put 64-bit real tensor into database - procedure, private :: unpack_tensor_i8 !< Unpack a 8-bit integer tensor into memory - procedure, private :: unpack_tensor_i16 !< Unpack a 16-bit integer tensor into memory - procedure, private :: unpack_tensor_i32 !< Unpack a 32-bit integer tensor into memory - procedure, private :: unpack_tensor_i64 !< Unpack a 64-bit integer tensor into memory - procedure, private :: unpack_tensor_float !< Unpack a 32-bit real tensor into memory - procedure, private :: unpack_tensor_double !< Unpack a 64-bit real tensor into memory + !> Put 8-bit integer tensor into database + procedure, private :: put_tensor_i8 + !> Put 16-bit integer tensor into database + procedure, private :: put_tensor_i16 + !> Put 32-bit integer tensor into database + procedure, private :: put_tensor_i32 + !> Put 64-bit tensor into database + procedure, private :: put_tensor_i64 + !> Put 32-bit real tensor into database + procedure, private :: put_tensor_float + !> Put 64-bit real tensor into database + procedure, private :: put_tensor_double + !> Unpack a 8-bit integer tensor into memory + procedure, private :: unpack_tensor_i8 + !> Unpack a 16-bit integer tensor into memory + procedure, private :: unpack_tensor_i16 + !> Unpack a 32-bit integer tensor into memory + procedure, private :: unpack_tensor_i32 + !> Unpack a 64-bit integer tensor into memory + procedure, private :: unpack_tensor_i64 + !> Unpack a 32-bit real tensor into memory + procedure, private :: unpack_tensor_float + !> Unpack a 64-bit real tensor into memory + procedure, private :: unpack_tensor_double end type dbclient_type From 4d93e3a5db24018f16225f29fd3a239a7719a698 Mon Sep 17 00:00:00 2001 From: Andrew Shao Date: Fri, 5 Aug 2022 18:14:33 -0500 Subject: [PATCH 39/40] Refactor database_client_interface stub The database client stub used Fortran 2018 features which do not adhere to the MOM6 allowing only up to Fortran 2008. To fix this, separate interfaces for tensors with 1-4 dimensions replace the assumed rank subroutines. This does not interfere with the SmartRedis library as the public (overloaded) interface still retains the same API. Additionally, some methods which are likely unique to the SmartRedis client have been removed to enhance the likelihood of providing a general database client API to be used by other implementations. --- config_src/external/database_comms/README.md | 8 +- .../database_client_interface.F90 | 622 ++++++++---------- 2 files changed, 264 insertions(+), 366 deletions(-) diff --git a/config_src/external/database_comms/README.md b/config_src/external/database_comms/README.md index 53a06b0a4a..05f1f07259 100644 --- a/config_src/external/database_comms/README.md +++ b/config_src/external/database_comms/README.md @@ -18,6 +18,8 @@ code in the new implementation is part of `MOM_MEKE.F90`. control structure that are imported elsewhere within the primary MOM6 code. This includes: `dbcomms_CS_type`, `dbclient_type`, and `database_comms_init` -- `smartredis_client.F90` is a skeleton of the actual SmartRedis library - used to ensure that the interfaces to the library are maintained without - requiring MOM6 users to compile in the the full library +- `database_client_interface.F90` contains the methods for a communication client + to transfer data and/or commands between MOM6 and a remote database. This is + roughly based on the SmartRedis library, though only the methods that are most + likely to be used with MOM6 are retained. This is to ensure that the API can be + tested without requiring MOM6 users to compile in the the full library. diff --git a/config_src/external/database_comms/database_client_interface.F90 b/config_src/external/database_comms/database_client_interface.F90 index 8d4abacae9..9b57628921 100644 --- a/config_src/external/database_comms/database_client_interface.F90 +++ b/config_src/external/database_comms/database_client_interface.F90 @@ -17,12 +17,17 @@ module database_client_interface contains ! Public procedures - !> Puts a tensor into the database (overloaded) - generic :: put_tensor => put_tensor_i8, put_tensor_i16, put_tensor_i32, put_tensor_i64, & - put_tensor_float, put_tensor_double - !> Retrieve the tensor in the database into already allocated memory (overloaded) - generic :: unpack_tensor => unpack_tensor_i8, unpack_tensor_i16, unpack_tensor_i32, unpack_tensor_i64, & - unpack_tensor_float, unpack_tensor_double + !> Puts a tensor into the database for a variety of datatypes + generic :: put_tensor => put_tensor_float_1d, put_tensor_float_2d, put_tensor_float_3d, put_tensor_float_4d, & + put_tensor_double_1d, put_tensor_double_2d, put_tensor_double_3d, put_tensor_double_4d, & + put_tensor_int32_1d, put_tensor_int32_2d, put_tensor_int32_3d, put_tensor_int32_4d + !> Retrieve the tensor in the database into already allocated memory for a variety of datatypesm + generic :: unpack_tensor => unpack_tensor_float_1d, unpack_tensor_float_2d, & + unpack_tensor_float_3d, unpack_tensor_float_4d, & + unpack_tensor_double_1d, unpack_tensor_double_2d, & + unpack_tensor_double_3d, unpack_tensor_double_4d, & + unpack_tensor_int32_1d, unpack_tensor_int32_2d, & + unpack_tensor_int32_3d, unpack_tensor_int32_4d !> Decode a response code from an API function procedure :: SR_error_parser @@ -32,22 +37,6 @@ module database_client_interface procedure :: isinitialized !> Destructs a new instance of the communication client procedure :: destructor - !> Check the database for the existence of a specific model - procedure :: model_exists - !> Check the database for the existence of a specific tensor - procedure :: tensor_exists - !> Check the database for the existence of a specific key - procedure :: key_exists - !> Check the database for the existence of a specific dataset - procedure :: dataset_exists - !> Poll the database and return if the model exists - procedure :: poll_model - !> Poll the database and return if the tensor exists - procedure :: poll_tensor - !> Poll the database and return if the datasaet exists - procedure :: poll_dataset - !> Poll the database and return if the key exists - procedure :: poll_key !> Rename a tensor within the database procedure :: rename_tensor !> Delete a tensor from the database @@ -101,59 +90,55 @@ module database_client_interface !> Delete the dataset from the database procedure :: delete_dataset - !> If true, preprend the ensemble id for tensor-related keys - procedure :: use_tensor_ensemble_prefix - !> If true, preprend the ensemble id for model-related keys - procedure :: use_model_ensemble_prefix - !> If true, preprend the ensemble id for dataset list-related keys - procedure :: use_list_ensemble_prefix - !> Specify a specific source of data - procedure :: set_data_source - - !> Append a dataset to a list for aggregation - procedure :: append_to_list - !> Delete an aggregation list - procedure :: delete_list - !> Copy an aggregation list - procedure :: copy_list - !> Rename an existing aggregation list - procedure :: rename_list - !> Retrieve the number of datasets in the list - procedure :: get_list_length - !> Repeatedly check the length of the list until it is a given size - procedure :: poll_list_length - !> Repeatedly check the length of the list until it greater than or equal to the given size - procedure :: poll_list_length_gte - !> Repeatedly check the length of the list until it less than or equal to the given size - procedure :: poll_list_length_lte - !> Retrieve vector of datasets from the list - procedure :: get_datasets_from_list - ! Private procedures - !> Put 8-bit integer tensor into database - procedure, private :: put_tensor_i8 - !> Put 16-bit integer tensor into database - procedure, private :: put_tensor_i16 - !> Put 32-bit integer tensor into database - procedure, private :: put_tensor_i32 - !> Put 64-bit tensor into database - procedure, private :: put_tensor_i64 - !> Put 32-bit real tensor into database - procedure, private :: put_tensor_float - !> Put 64-bit real tensor into database - procedure, private :: put_tensor_double - !> Unpack a 8-bit integer tensor into memory - procedure, private :: unpack_tensor_i8 - !> Unpack a 16-bit integer tensor into memory - procedure, private :: unpack_tensor_i16 - !> Unpack a 32-bit integer tensor into memory - procedure, private :: unpack_tensor_i32 - !> Unpack a 64-bit integer tensor into memory - procedure, private :: unpack_tensor_i64 - !> Unpack a 32-bit real tensor into memory - procedure, private :: unpack_tensor_float - !> Unpack a 64-bit real tensor into memory - procedure, private :: unpack_tensor_double + !> Put a 1d, 32-bit real tensor into database + procedure, private :: put_tensor_float_1d + !> Put a 2d, 32-bit real tensor into database + procedure, private :: put_tensor_float_2d + !> Put a 3d, 32-bit real tensor into database + procedure, private :: put_tensor_float_3d + !> Put a 4d, 32-bit real tensor into database + procedure, private :: put_tensor_float_4d + !> Put a 1d, 64-bit real tensor into database + procedure, private :: put_tensor_double_1d + !> Put a 2d, 64-bit real tensor into database + procedure, private :: put_tensor_double_2d + !> Put a 3d, 64-bit real tensor into database + procedure, private :: put_tensor_double_3d + !> Put a 4d, 64-bit real tensor into database + procedure, private :: put_tensor_double_4d + !> Put a 1d, 32-bit integer tensor into database + procedure, private :: put_tensor_int32_1d + !> Put a 2d, 32-bit integer tensor into database + procedure, private :: put_tensor_int32_2d + !> Put a 3d, 32-bit integer tensor into database + procedure, private :: put_tensor_int32_3d + !> Put a 4d, 32-bit integer tensor into database + procedure, private :: put_tensor_int32_4d + !> Unpack a 1d, 32-bit real tensor from the database + procedure, private :: unpack_tensor_float_1d + !> Unpack a 2d, 32-bit real tensor from the database + procedure, private :: unpack_tensor_float_2d + !> Unpack a 3d, 32-bit real tensor from the database + procedure, private :: unpack_tensor_float_3d + !> Unpack a 4d, 32-bit real tensor from the database + procedure, private :: unpack_tensor_float_4d + !> Unpack a 1d, 64-bit real tensor from the database + procedure, private :: unpack_tensor_double_1d + !> Unpack a 2d, 64-bit real tensor from the database + procedure, private :: unpack_tensor_double_2d + !> Unpack a 3d, 64-bit real tensor from the database + procedure, private :: unpack_tensor_double_3d + !> Unpack a 4d, 64-bit real tensor from the database + procedure, private :: unpack_tensor_double_4d + !> Unpack a 1d, 32-bit integer tensor from the database + procedure, private :: unpack_tensor_int32_1d + !> Unpack a 2d, 32-bit integer tensor from the database + procedure, private :: unpack_tensor_int32_2d + !> Unpack a 3d, 32-bit integer tensor from the database + procedure, private :: unpack_tensor_int32_3d + !> Unpack a 4d, 32-bit integer tensor from the database + procedure, private :: unpack_tensor_int32_4d end type dbclient_type @@ -191,225 +176,269 @@ function destructor(self) destructor = -1 end function destructor - !> Check if the specified key exists in the database - function key_exists(self, key, exists) - class(dbclient_type), intent(in) :: self !< The client - character(len=*), intent(in) :: key !< The key to check - logical, intent(out) :: exists !< Receives whether the key exists - integer :: key_exists + !> Put a 32-bit real 1d tensor into the database + function put_tensor_float_1d(self, name, data, dims) result(code) + real(kind=real32), dimension(:), intent(in) :: data !< Data to be sent + class(dbclient_type), intent(in) :: self !< Fortran communication client + character(len=*), intent(in) :: name !< The unique name used to store in the database + integer, dimension(:), intent(in) :: dims !< The length of each dimension + integer :: code + + code = -1 + end function put_tensor_float_1d + + !> Put a 32-bit real 2d tensor into the database + function put_tensor_float_2d(self, name, data, dims) result(code) + real(kind=real32), dimension(:,:), intent(in) :: data !< Data to be sent + class(dbclient_type), intent(in) :: self !< Fortran communication client + character(len=*), intent(in) :: name !< The unique name used to store in the database + integer, dimension(:), intent(in) :: dims !< The length of each dimension + integer :: code + + code = -1 + end function put_tensor_float_2d + + !> Put a 32-bit real 3d tensor into the database + function put_tensor_float_3d(self, name, data, dims) result(code) + real(kind=real32), dimension(:,:,:), intent(in) :: data !< Data to be sent + class(dbclient_type), intent(in) :: self !< Fortran communication client + character(len=*), intent(in) :: name !< The unique name used to store in the database + integer, dimension(:), intent(in) :: dims !< The length of each dimension + integer :: code + + code = -1 + end function put_tensor_float_3d + + !> Put a 32-bit real 4d tensor into the database + function put_tensor_float_4d(self, name, data, dims) result(code) + real(kind=real32), dimension(:,:,:,:), intent(in) :: data !< Data to be sent + class(dbclient_type), intent(in) :: self !< Fortran communication client + character(len=*), intent(in) :: name !< The unique name used to store in the database + integer, dimension(:), intent(in) :: dims !< The length of each dimension + integer :: code + + code = -1 + end function put_tensor_float_4d + + !> Put a 64-bit real 1d tensor into the database + function put_tensor_double_1d(self, name, data, dims) result(code) + real(kind=real64), dimension(:), intent(in) :: data !< Data to be sent + class(dbclient_type), intent(in) :: self !< Fortran communication client + character(len=*), intent(in) :: name !< The unique name used to store in the database + integer, dimension(:), intent(in) :: dims !< The length of each dimension + integer :: code - key_exists = -1 - end function key_exists + code = -1 + end function put_tensor_double_1d - !> Check if the specified model exists in the database - function model_exists(self, model_name, exists) result(code) - class(dbclient_type), intent(in) :: self !< The client - character(len=*), intent(in) :: model_name !< The model to check - logical, intent(out) :: exists !< Receives whether the model exists - integer :: code + !> Put a 64-bit real 2d tensor into the database + function put_tensor_double_2d(self, name, data, dims) result(code) + real(kind=real64), dimension(:,:), intent(in) :: data !< Data to be sent + class(dbclient_type), intent(in) :: self !< Fortran communication client + character(len=*), intent(in) :: name !< The unique name used to store in the database + integer, dimension(:), intent(in) :: dims !< The length of each dimension + integer :: code code = -1 - end function model_exists + end function put_tensor_double_2d - !> Check if the specified tensor exists in the database - function tensor_exists(self, tensor_name, exists) result(code) - class(dbclient_type), intent(in) :: self !< The client - character(len=*), intent(in) :: tensor_name !< The tensor to check - logical, intent(out) :: exists !< Receives whether the model exists - integer :: code + !> Put a 64-bit real 3d tensor into the database + function put_tensor_double_3d(self, name, data, dims) result(code) + real(kind=real64), dimension(:,:,:), intent(in) :: data !< Data to be sent + class(dbclient_type), intent(in) :: self !< Fortran communication client + character(len=*), intent(in) :: name !< The unique name used to store in the database + integer, dimension(:), intent(in) :: dims !< The length of each dimension + integer :: code code = -1 - end function tensor_exists + end function put_tensor_double_3d - !> Check if the specified dataset exists in the database - function dataset_exists(this, dataset_name, exists) result(code) - class(dbclient_type), intent(in) :: this !< The client - character(len=*), intent(in) :: dataset_name !< The dataset to check - logical, intent(out) :: exists !< Receives whether the model exists - integer :: code + !> Put a 64-bit real 4d tensor into the database + function put_tensor_double_4d(self, name, data, dims) result(code) + real(kind=real64), dimension(:,:,:,:), intent(in) :: data !< Data to be sent + class(dbclient_type), intent(in) :: self !< Fortran communication client + character(len=*), intent(in) :: name !< The unique name used to store in the database + integer, dimension(:), intent(in) :: dims !< The length of each dimension + integer :: code code = -1 - end function dataset_exists + end function put_tensor_double_4d - !> Repeatedly poll the database until the tensor exists or the number of tries is exceeded - function poll_tensor(self, tensor_name, poll_frequency_ms, num_tries, exists) result(code) - class(dbclient_type), intent(in) :: self !< The client - character(len=*), intent(in) :: tensor_name !< name in the database to poll - integer, intent(in) :: poll_frequency_ms !< Frequency at which to poll the database (ms) - integer, intent(in) :: num_tries !< Number of times to poll the database before failing - logical, intent(out) :: exists !< Receives whether the tensor exists - integer :: code + !> Put a 32-bit integer 1d tensor into the database + function put_tensor_int32_1d(self, name, data, dims) result(code) + integer(kind=int32), dimension(:), intent(in) :: data !< Data to be sent + class(dbclient_type), intent(in) :: self !< Fortran communication client + character(len=*), intent(in) :: name !< The unique name used to store in the database + integer, dimension(:), intent(in) :: dims !< The length of each dimension + integer :: code code = -1 - end function poll_tensor + end function put_tensor_int32_1d - !> Repeatedly poll the database until the dataset exists or the number of tries is exceeded - function poll_dataset(self, dataset_name, poll_frequency_ms, num_tries, exists) - integer :: poll_dataset - class(dbclient_type), intent(in) :: self !< The client - character(len=*), intent(in) :: dataset_name !< Name in the database to poll - integer, intent(in) :: poll_frequency_ms !< Frequency at which to poll the database (ms) - integer, intent(in) :: num_tries !< Number of times to poll the database before failing - logical, intent(out) :: exists !< Receives whether the tensor exists + !> Put a 32-bit integer 2d tensor into the database + function put_tensor_int32_2d(self, name, data, dims) result(code) + integer(kind=int32), dimension(:,:), intent(in) :: data !< Data to be sent + class(dbclient_type), intent(in) :: self !< Fortran communication client + character(len=*), intent(in) :: name !< The unique name used to store in the database + integer, dimension(:), intent(in) :: dims !< The length of each dimension + integer :: code - poll_dataset = -1 - end function poll_dataset + code = -1 + end function put_tensor_int32_2d - !> Repeatedly poll the database until the model exists or the number of tries is exceeded - function poll_model(self, model_name, poll_frequency_ms, num_tries, exists) result(code) - class(dbclient_type), intent(in) :: self !< The client - character(len=*), intent(in) :: model_name !< Name in the database to poll - integer, intent(in) :: poll_frequency_ms !< Frequency at which to poll the database (ms) - integer, intent(in) :: num_tries !< Number of times to poll the database before failing - logical, intent(out) :: exists !< Receives whether the model exists - integer :: code + !> Put a 32-bit integer 3d tensor into the database + function put_tensor_int32_3d(self, name, data, dims) result(code) + integer(kind=int32), dimension(:,:,:), intent(in) :: data !< Data to be sent + class(dbclient_type), intent(in) :: self !< Fortran communication client + character(len=*), intent(in) :: name !< The unique name used to store in the database + integer, dimension(:), intent(in) :: dims !< The length of each dimension + integer :: code code = -1 - end function poll_model + end function put_tensor_int32_3d - !> Repeatedly poll the database until the key exists or the number of tries is exceeded - function poll_key(self, key, poll_frequency_ms, num_tries, exists) result(code) - class(dbclient_type), intent(in) :: self !< The client - character(len=*), intent(in) :: key !< Key in the database to poll - integer, intent(in) :: poll_frequency_ms !< Frequency at which to poll the database (ms) - integer, intent(in) :: num_tries !< Number of times to poll the database before failing - logical, intent(out) :: exists !< Receives whether the key exists - integer :: code + !> Put a 32-bit integer 4d tensor into the database + function put_tensor_int32_4d(self, name, data, dims) result(code) + integer(kind=int32), dimension(:,:,:,:), intent(in) :: data !< Data to be sent + class(dbclient_type), intent(in) :: self !< Fortran communication client + character(len=*), intent(in) :: name !< The unique name used to store in the database + integer, dimension(:), intent(in) :: dims !< The length of each dimension + integer :: code code = -1 - end function poll_key + end function put_tensor_int32_4d - !> Put a tensor whose Fortran type is the equivalent 'int8' C-type - function put_tensor_i8(self, name, data, dims) result(code) - integer(kind=int8), dimension(..), target, intent(in) :: data !< Data to be sent - class(dbclient_type), intent(in) :: self !< Fortran communication client - character(len=*), intent(in) :: name !< The unique name used to store in the database - integer, dimension(:), intent(in) :: dims !< The length of each dimension + !> Unpack a 32-bit real 1d tensor from the database + function unpack_tensor_float_1d(self, name, data, dims) result(code) + real(kind=real32), dimension(:), intent( out) :: data !< Data to be received + class(dbclient_type), intent(in) :: self !< Fortran communication client + character(len=*), intent(in) :: name !< The unique name used to store in the database + integer, dimension(:), intent(in) :: dims !< The length of each dimension integer :: code code = -1 - end function put_tensor_i8 + end function unpack_tensor_float_1d - !> Put a tensor whose Fortran type is the equivalent 'int16' C-type - function put_tensor_i16(self, name, data, dims) result(code) - integer(kind=int16), dimension(..), target, intent(in) :: data !< Data to be sent - class(dbclient_type), intent(in) :: self !< Fortran communication client - character(len=*), intent(in) :: name !< The unique name used to store in the database - integer, dimension(:), intent(in) :: dims !< The length of each dimension + !> Unpack a 32-bit real 2d tensor from the database + function unpack_tensor_float_2d(self, name, data, dims) result(code) + real(kind=real32), dimension(:,:), intent( out) :: data !< Data to be received + class(dbclient_type), intent(in) :: self !< Fortran communication client + character(len=*), intent(in) :: name !< The unique name used to store in the database + integer, dimension(:), intent(in) :: dims !< The length of each dimension integer :: code code = -1 - end function put_tensor_i16 + end function unpack_tensor_float_2d - !> Put a tensor whose Fortran type is the equivalent 'int32' C-type - function put_tensor_i32(self, name, data, dims) result(code) - integer(kind=int32), dimension(..), target, intent(in) :: data !< Data to be sent - class(dbclient_type), intent(in) :: self !< Fortran communication client - character(len=*), intent(in) :: name !< The unique name used to store in the database - integer, dimension(:), intent(in) :: dims !< The length of each dimension + !> Unpack a 32-bit real 3d tensor from the database + function unpack_tensor_float_3d(self, name, data, dims) result(code) + real(kind=real32), dimension(:,:,:), intent( out) :: data !< Data to be received + class(dbclient_type), intent(in) :: self !< Fortran communication client + character(len=*), intent(in) :: name !< The unique name used to store in the database + integer, dimension(:), intent(in) :: dims !< The length of each dimension integer :: code code = -1 - end function put_tensor_i32 + end function unpack_tensor_float_3d - !> Put a tensor whose Fortran type is the equivalent 'int64' C-type - function put_tensor_i64(self, name, data, dims) result(code) - integer(kind=int64), dimension(..), target, intent(in) :: data !< Data to be sent - class(dbclient_type), intent(in) :: self !< Fortran communication client - character(len=*), intent(in) :: name !< The unique name used to store in the database - integer, dimension(:), intent(in) :: dims !< The length of each dimension + !> Unpack a 32-bit real 4d tensor from the database + function unpack_tensor_float_4d(self, name, data, dims) result(code) + real(kind=real32), dimension(:,:,:,:), intent( out) :: data !< Data to be received + class(dbclient_type), intent(in) :: self !< Fortran communication client + character(len=*), intent(in) :: name !< The unique name used to store in the database + integer, dimension(:), intent(in) :: dims !< The length of each dimension integer :: code code = -1 - end function put_tensor_i64 + end function unpack_tensor_float_4d - !> Put a tensor whose Fortran type is the equivalent 'float' C-type - function put_tensor_float(self, name, data, dims) result(code) - real(kind=real32), dimension(..), target, intent(in) :: data !< Data to be sent - class(dbclient_type), intent(in) :: self !< Fortran communication client - character(len=*), intent(in) :: name !< The unique name used to store in the database - integer, dimension(:), intent(in) :: dims !< The length of each dimension + !> Unpack a 64-bit real 1d tensor from the database + function unpack_tensor_double_1d(self, name, data, dims) result(code) + real(kind=real64), dimension(:), intent( out) :: data !< Data to be received + class(dbclient_type), intent(in) :: self !< Fortran communication client + character(len=*), intent(in) :: name !< The unique name used to store in the database + integer, dimension(:), intent(in) :: dims !< The length of each dimension integer :: code code = -1 - end function put_tensor_float + end function unpack_tensor_double_1d - !> Put a tensor whose Fortran type is the equivalent 'double' C-type - function put_tensor_double(self, name, data, dims) result(code) - real(kind=real64), dimension(..), target, intent(in) :: data !< Data to be sent - class(dbclient_type), intent(in) :: self !< Fortran communication client - character(len=*), intent(in) :: name !< The unique name used to store in the database - integer, dimension(:), intent(in) :: dims !< The length of each dimension + !> Unpack a 64-bit real 2d tensor from the database + function unpack_tensor_double_2d(self, name, data, dims) result(code) + real(kind=real64), dimension(:,:), intent( out) :: data !< Data to be received + class(dbclient_type), intent(in) :: self !< Fortran communication client + character(len=*), intent(in) :: name !< The unique name used to store in the database + integer, dimension(:), intent(in) :: dims !< The length of each dimension integer :: code code = -1 - end function put_tensor_double + end function unpack_tensor_double_2d - !> Put a tensor whose Fortran type is the equivalent 'int8' C-type - function unpack_tensor_i8(self, name, result, dims) result(code) - integer(kind=int8), dimension(..), target, intent(out) :: result !< Data to be sent - class(dbclient_type), intent(in) :: self !< Pointer to the initialized client - character(len=*), intent(in) :: name !< The name to use to place the tensor - integer, dimension(:), intent(in) :: dims !< Length along each dimension of the tensor - integer :: code + !> Unpack a 64-bit real 3d tensor from the database + function unpack_tensor_double_3d(self, name, data, dims) result(code) + real(kind=real64), dimension(:,:,:), intent( out) :: data !< Data to be received + class(dbclient_type), intent(in) :: self !< Fortran communication client + character(len=*), intent(in) :: name !< The unique name used to store in the database + integer, dimension(:), intent(in) :: dims !< The length of each dimension + integer :: code code = -1 - end function unpack_tensor_i8 + end function unpack_tensor_double_3d - !> Put a tensor whose Fortran type is the equivalent 'int16' C-type - function unpack_tensor_i16(self, name, result, dims) result(code) - integer(kind=int16), dimension(..), target, intent(out) :: result !< Data to be sent - class(dbclient_type), intent(in) :: self !< Pointer to the initialized client - character(len=*), intent(in) :: name !< The name to use to place the tensor - integer, dimension(:), intent(in) :: dims !< Length along each dimension of the tensor - integer :: code + !> Unpack a 64-bit real 4d tensor from the database + function unpack_tensor_double_4d(self, name, data, dims) result(code) + real(kind=real64), dimension(:,:,:,:), intent( out) :: data !< Data to be received + class(dbclient_type), intent(in) :: self !< Fortran communication client + character(len=*), intent(in) :: name !< The unique name used to store in the database + integer, dimension(:), intent(in) :: dims !< The length of each dimension + integer :: code code = -1 - end function unpack_tensor_i16 + end function unpack_tensor_double_4d - !> Put a tensor whose Fortran type is the equivalent 'int32' C-type - function unpack_tensor_i32(self, name, result, dims) result(code) - integer(kind=int32), dimension(..), target, intent(out) :: result !< Data to be sent - class(dbclient_type), intent(in) :: self !< Pointer to the initialized client - character(len=*), intent(in) :: name !< The name to use to place the tensor - integer, dimension(:), intent(in) :: dims !< Length along each dimension of the tensor - integer :: code + !> Unpack a 32-bit integer 1d tensor from the database + function unpack_tensor_int32_1d(self, name, data, dims) result(code) + integer(kind=int32), dimension(:), intent( out) :: data !< Data to be received + class(dbclient_type), intent(in) :: self !< Fortran communication client + character(len=*), intent(in) :: name !< The unique name used to store in the database + integer, dimension(:), intent(in) :: dims !< The length of each dimension + integer :: code code = -1 - end function unpack_tensor_i32 + end function unpack_tensor_int32_1d - !> Put a tensor whose Fortran type is the equivalent 'int64' C-type - function unpack_tensor_i64(self, name, result, dims) result(code) - integer(kind=int64), dimension(..), target, intent(out) :: result !< Data to be sent - class(dbclient_type), intent(in) :: self !< Pointer to the initialized client - character(len=*), intent(in) :: name !< The name to use to place the tensor - integer, dimension(:), intent(in) :: dims !< Length along each dimension of the tensor - integer :: code + !> Unpack a 32-bit integer 2d tensor from the database + function unpack_tensor_int32_2d(self, name, data, dims) result(code) + integer(kind=int32), dimension(:,:), intent( out) :: data !< Data to be received + class(dbclient_type), intent(in) :: self !< Fortran communication client + character(len=*), intent(in) :: name !< The unique name used to store in the database + integer, dimension(:), intent(in) :: dims !< The length of each dimension + integer :: code code = -1 - end function unpack_tensor_i64 + end function unpack_tensor_int32_2d - !> Put a tensor whose Fortran type is the equivalent 'float' C-type - function unpack_tensor_float(self, name, result, dims) result(code) - real(kind=real32), dimension(..), target, intent(out) :: result !< Data to be sent - class(dbclient_type), intent(in) :: self !< Pointer to the initialized client - character(len=*), intent(in) :: name !< The name to use to place the tensor - integer, dimension(:), intent(in) :: dims !< Length along each dimension of the tensor - integer :: code + !> Unpack a 32-bit integer 3d tensor from the database + function unpack_tensor_int32_3d(self, name, data, dims) result(code) + integer(kind=int32), dimension(:,:,:), intent( out) :: data !< Data to be received + class(dbclient_type), intent(in) :: self !< Fortran communication client + character(len=*), intent(in) :: name !< The unique name used to store in the database + integer, dimension(:), intent(in) :: dims !< The length of each dimension + integer :: code code = -1 - end function unpack_tensor_float + end function unpack_tensor_int32_3d - !> Put a tensor whose Fortran type is the equivalent 'double' C-type - function unpack_tensor_double(self, name, result, dims) result(code) - real(kind=real64), dimension(..), target, intent(out) :: result !< Data to be sent - class(dbclient_type), intent(in) :: self !< Pointer to the initialized client - character(len=*), intent(in) :: name !< The name to use to place the tensor - integer, dimension(:), intent(in) :: dims !< Length along each dimension of the tensor - integer :: code + !> Unpack a 32-bit integer 4d tensor from the database + function unpack_tensor_int32_4d(self, name, data, dims) result(code) + integer(kind=int32), dimension(:,:,:,:), intent( out) :: data !< Data to be received + class(dbclient_type), intent(in) :: self !< Fortran communication client + character(len=*), intent(in) :: name !< The unique name used to store in the database + integer, dimension(:), intent(in) :: dims !< The length of each dimension + integer :: code code = -1 - end function unpack_tensor_double + end function unpack_tensor_int32_4d !> Move a tensor to a new name function rename_tensor(self, old_name, new_name) result(code) @@ -738,49 +767,6 @@ function delete_dataset(self, name) result(code) code = -1 end function delete_dataset - !> Set the data source (i.e. name prefix for get functions) - function set_data_source(self, source_id) result(code) - class(dbclient_type), intent(in) :: self !< An initialized communication client - character(len=*), intent(in) :: source_id !< The name prefix - integer :: code - - code = -1 - end function set_data_source - - !> Set whether names of model and script entities should be prefixed (e.g. in an ensemble) to form database names. - !! Prefixes will only be used if they were previously set through the environment variables SSKEYOUT and SSKEYIN. - !! Keys of entities created before client function is called will not be affected. By default, the client does not - !! prefix model and script names. - function use_model_ensemble_prefix(self, use_prefix) result(code) - class(dbclient_type), intent(in) :: self !< An initialized communication client - logical, intent(in) :: use_prefix !< The prefix setting - integer :: code - - code = -1 - end function use_model_ensemble_prefix - - - !> Set whether names of tensor and dataset entities should be prefixed (e.g. in an ensemble) to form database keys. - !! Prefixes will only be used if they were previously set through the environment variables SSKEYOUT and SSKEYIN. - !! Keys of entities created before client function is called will not be affected. By default, the client prefixes - !! tensor and dataset keys with the first prefix specified with the SSKEYIN and SSKEYOUT environment variables. - function use_tensor_ensemble_prefix(self, use_prefix) result(code) - class(dbclient_type), intent(in) :: self !< An initialized communication client - logical, intent(in) :: use_prefix !< The prefix setting - integer :: code - - code = -1 - end function use_tensor_ensemble_prefix - - !> Control whether aggregation lists are prefixed - function use_list_ensemble_prefix(self, use_prefix) result(code) - class(dbclient_type), intent(in) :: self !< An initialized communication client - logical, intent(in) :: use_prefix !< The prefix setting - integer :: code - - code = -1 - end function use_list_ensemble_prefix - !> Appends a dataset to the aggregation list When appending a dataset to an aggregation list, the list will !! automatically be created if it does not exist (i.e. this is the first entry in the list). Aggregation !! lists work by referencing the dataset by storing its key, so appending a dataset to an aggregation list @@ -824,95 +810,5 @@ function rename_list(self, src_name, dest_name) result(code) code = -1 end function rename_list - !> Get the length of the aggregation list - function get_list_length(self, list_name, result_length) result(code) - class(dbclient_type), intent(in ) :: self !< An initialized communication client - character(len=*), intent(in ) :: list_name !< Name of the dataset to get - integer, intent( out) :: result_length !< The length of the list - integer :: code - - code = -1 - end function get_list_length - - !> Get the length of the aggregation list - function poll_list_length(self, list_name, list_length, poll_frequency_ms, num_tries, poll_result) result(code) - class(dbclient_type), intent(in ) :: self !< An initialized communication client - character(len=*), intent(in ) :: list_name !< Name of the dataset to get - integer, intent(in ) :: list_length !< The desired length of the list - integer, intent(in ) :: poll_frequency_ms !< Frequency at which to poll the database (ms) - integer, intent(in ) :: num_tries !< Number of times to poll the database before failing - logical, intent( out) :: poll_result !< True if the list is the requested length, - !! False if not after num_tries. - integer :: code - - code = -1 - end function poll_list_length - - !> Get the length of the aggregation list - function poll_list_length_gte(self, list_name, list_length, poll_frequency_ms, num_tries, poll_result) result(code) - class(dbclient_type), intent(in ) :: self !< An initialized communication client - character(len=*), intent(in ) :: list_name !< Name of the dataset to get - integer, intent(in ) :: list_length !< The desired length of the list - integer, intent(in ) :: poll_frequency_ms !< Frequency at which to poll the database (ms) - integer, intent(in ) :: num_tries !< Number of times to poll the database before failing - logical, intent( out) :: poll_result !< True if the list is the requested length, - !! False if not after num_tries. - integer :: code - - code = -1 - end function poll_list_length_gte - - !> Get the length of the aggregation list - function poll_list_length_lte(self, list_name, list_length, poll_frequency_ms, num_tries, poll_result) result(code) - class(dbclient_type), intent(in) :: self !< An initialized communication client - character(len=*), intent(in) :: list_name !< Name of the dataset to get - integer, intent(in) :: list_length !< The desired length of the list - integer, intent(in) :: poll_frequency_ms !< Frequency at which to poll the database (ms) - integer, intent(in) :: num_tries !< Number of times to poll the database before failing - logical, intent( out) :: poll_result !< True if the list is the requested length, - !! False if not after num_tries. - - integer :: code - - code = -1 - end function poll_list_length_lte - - !> Get datasets from an aggregation list. Note that this will deallocate an existing list. - !! NOTE: This potentially be less performant than get_datasets_from_list_range due to an - !! extra query to the database to get the list length. This is for now necessary because - !! difficulties in allocating memory for Fortran alloctables from within C. - function get_datasets_from_list(self, list_name, datasets, num_datasets) result(code) - class(dbclient_type), intent(in) :: self !< An initialized communication client - character(len=*), intent(in) :: list_name !< Name of the dataset to get - type(dataset_type), dimension(:), allocatable, intent( out) :: datasets !< The array of datasets included - integer :: code - !! in the list - integer, intent(out) :: num_datasets !< The numbr of datasets returned - - code = -1 - end function get_datasets_from_list - - !> Get datasets from an aggregation list over a given range by index. Note that this will deallocate an existing list - function get_datasets_from_list_range(self, list_name, start_index, end_index, datasets) result(code) - class(dbclient_type), intent(in) :: self !< An initialized communication client - character(len=*), intent(in) :: list_name !< Name of the dataset to get - integer, intent(in) :: start_index !< The starting index of the range (inclusive, - !! starting at zero). Negative values are - !! supported. A negative value indicates offsets - !! starting at the end of the list. For example, -1 is - !! the last element of the list. - integer, intent(in) :: end_index !< The ending index of the range (inclusive, - !! starting at zero). Negative values are - !! supported. A negative value indicates offsets - !! starting at the end of the list. For example, -1 is - !! the last element of the list. - - type(dataset_type), dimension(:), allocatable, intent( out) :: datasets !< The array of datasets included - integer :: code - !! in the list - - code = -1 - end function get_datasets_from_list_range - end module database_client_interface From 1f1b8ad10aa66fa13bb746d002e3536dab715b60 Mon Sep 17 00:00:00 2001 From: Marshall Ward Date: Wed, 10 Aug 2022 09:41:03 -0400 Subject: [PATCH 40/40] CI: Restore MacOS compiler variables GitHub appears to have restored their compiler alias conventions on their MacOS nodes. This patch restores those variables. --- .github/workflows/macos-regression.yml | 3 ++- .github/workflows/macos-stencil.yml | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/macos-regression.yml b/.github/workflows/macos-regression.yml index e8f7469cca..d975854e0c 100644 --- a/.github/workflows/macos-regression.yml +++ b/.github/workflows/macos-regression.yml @@ -8,7 +8,8 @@ jobs: runs-on: macOS-latest env: - FC: gfortran + CC: gcc-11 + FC: gfortran-11 defaults: run: diff --git a/.github/workflows/macos-stencil.yml b/.github/workflows/macos-stencil.yml index e0fcfeef8e..33436c221f 100644 --- a/.github/workflows/macos-stencil.yml +++ b/.github/workflows/macos-stencil.yml @@ -8,7 +8,8 @@ jobs: runs-on: macOS-latest env: - FC: gfortran + CC: gcc-11 + FC: gfortran-11 defaults: run: