From 5bc0a343ed543bd773f37dedaaafcd555cb38e6f Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Tue, 17 May 2022 14:45:38 -0700 Subject: [PATCH 1/4] Clean up implementation following cgridDEV merge (#719) * Clean up implementation following cgridDEV merge, based on comments in https://github.com/CICE-Consortium/CICE/pull/715 Refactor capping namelist, added capping_method variable string for namelist, options are "hibler" or "kreyscher" equivalent to 1 and 0 in old capping namelist. capping variable still exists and is set depending on capping_method setting. Update ice_in and other set_nml options. Add box2001 option for ice_data_conc. Does same things at p5, but makes decreases confusion in box2001 usage. Updated set_nml options. Remove support for ice_data_type = smallblock, bigblock, easthalf. Remove a couple tests that were using these. Capitalize last letter in variables dxt, dyt, dxe, dye, dxn, dyn, dxu, dyu -> dxT, dyT, dxE, dyE, dxN, dyN, dxU, dyU. This is the general direction we want to go, but this was done to make "dyn" look less like "dynamics". Modify some comment indentation in ice_init.F90 Reorder grid_average_X2Y calls in ice_dyn_evp.F90 for readability Remove older code in ice_dyn_shared.F90 referencing dte, dte2T, tdamp2, etc. Clean up comments in ice_history_shared.F90, ice_dyn_evp.F90, ice_transport_driver.F90, ice_flux.F90, ice_forcing.F90, ice_grid.F90 Remove commented out code in ice_dyn_shared.F90 around computation of iceumask Remove commented out code in ice_forcing.F90 related to box2001_data_ocn Update documentation. capping_method namelist table was reviewed and updated for consistency with code ice_ic change from default to internal dxt -> dxT, etc * update andacc namelist options, comment out for now * switch capping_method options to max and sum * Add alt07 tests for new dynamics namelist options --- cicecore/cicedynB/analysis/ice_history.F90 | 10 +- .../cicedynB/analysis/ice_history_shared.F90 | 2 +- cicecore/cicedynB/dynamics/ice_dyn_eap.F90 | 82 +++---- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 50 ++--- cicecore/cicedynB/dynamics/ice_dyn_evp_1d.F90 | 204 ++++++++--------- cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 88 +++----- cicecore/cicedynB/dynamics/ice_dyn_vp.F90 | 174 +++++++-------- .../dynamics/ice_transport_driver.F90 | 6 +- .../cicedynB/dynamics/ice_transport_remap.F90 | 10 +- cicecore/cicedynB/general/ice_flux.F90 | 24 +- cicecore/cicedynB/general/ice_forcing.F90 | 21 +- cicecore/cicedynB/general/ice_init.F90 | 208 ++++++++---------- cicecore/cicedynB/infrastructure/ice_grid.F90 | 157 +++++++------ .../io/io_netcdf/ice_history_write.F90 | 18 +- .../io/io_pio2/ice_history_write.F90 | 18 +- cicecore/drivers/nuopc/cmeps/ice_mesh_mod.F90 | 10 +- cicecore/drivers/nuopc/dmi/cice_cap.info | 2 +- configuration/scripts/ice_in | 4 +- configuration/scripts/options/set_nml.alt07 | 6 + configuration/scripts/options/set_nml.box2001 | 2 +- configuration/scripts/options/set_nml.boxadv | 2 +- .../scripts/options/set_nml.boxnodyn | 2 +- .../scripts/options/set_nml.boxrestore | 2 +- configuration/scripts/options/set_nml.boxwall | 58 ----- .../scripts/options/set_nml.boxwallp5 | 58 ----- .../scripts/options/set_nml.dynanderson | 2 +- .../scripts/options/set_nml.dynpicard | 2 +- configuration/scripts/options/set_nml.gbox12 | 2 +- configuration/scripts/options/set_nml.gbox128 | 2 +- configuration/scripts/options/set_nml.gbox180 | 2 +- configuration/scripts/options/set_nml.gbox80 | 2 +- configuration/scripts/tests/base_suite.ts | 2 + configuration/scripts/tests/gridsys_suite.ts | 6 - configuration/scripts/tests/io_suite.ts | 6 + configuration/scripts/tests/nothread_suite.ts | 2 + configuration/scripts/tests/omp_suite.ts | 6 + doc/source/cice_index.rst | 20 +- doc/source/science_guide/sg_dynamics.rst | 2 +- doc/source/user_guide/ug_case_settings.rst | 52 +++-- doc/source/user_guide/ug_implementation.rst | 20 +- 40 files changed, 606 insertions(+), 740 deletions(-) create mode 100644 configuration/scripts/options/set_nml.alt07 delete mode 100644 configuration/scripts/options/set_nml.boxwall delete mode 100644 configuration/scripts/options/set_nml.boxwallp5 diff --git a/cicecore/cicedynB/analysis/ice_history.F90 b/cicecore/cicedynB/analysis/ice_history.F90 index 94ee4f956..2fc57044e 100644 --- a/cicecore/cicedynB/analysis/ice_history.F90 +++ b/cicecore/cicedynB/analysis/ice_history.F90 @@ -2091,7 +2091,7 @@ subroutine accum_hist (dt) use ice_blocks, only: block, get_block, nx_block, ny_block use ice_domain, only: blocks_ice, nblocks use ice_domain_size, only: nfsd - use ice_grid, only: tmask, lmask_n, lmask_s, dxu, dyu, grid_ice + use ice_grid, only: tmask, lmask_n, lmask_s, dxU, dyU, grid_ice use ice_calendar, only: new_year, write_history, & write_ic, timesecs, histfreq, nstreams, mmonth, & new_month @@ -2765,8 +2765,8 @@ subroutine accum_hist (dt) do j = jlo, jhi do i = ilo, ihi if (aice(i,j,iblk) > puny) & - worka(i,j) = (rhoi*p5*(vice(i+1,j,iblk)+vice(i,j,iblk))*dyu(i,j,iblk) & - + rhos*p5*(vsno(i+1,j,iblk)+vsno(i,j,iblk))*dyu(i,j,iblk)) & + worka(i,j) = (rhoi*p5*(vice(i+1,j,iblk)+vice(i,j,iblk))*dyU(i,j,iblk) & + + rhos*p5*(vsno(i+1,j,iblk)+vsno(i,j,iblk))*dyU(i,j,iblk)) & * p5*(uvel(i,j-1,iblk)+uvel(i,j,iblk)) enddo enddo @@ -2778,8 +2778,8 @@ subroutine accum_hist (dt) do j = jlo, jhi do i = ilo, ihi if (aice(i,j,iblk) > puny) & - worka(i,j) = (rhoi*p5*(vice(i,j+1,iblk)+vice(i,j,iblk))*dxu(i,j,iblk) & - + rhos*p5*(vsno(i,j+1,iblk)+vsno(i,j,iblk))*dxu(i,j,iblk)) & + worka(i,j) = (rhoi*p5*(vice(i,j+1,iblk)+vice(i,j,iblk))*dxU(i,j,iblk) & + + rhos*p5*(vsno(i,j+1,iblk)+vsno(i,j,iblk))*dxU(i,j,iblk)) & * p5*(vvel(i-1,j,iblk)+vvel(i,j,iblk)) enddo enddo diff --git a/cicecore/cicedynB/analysis/ice_history_shared.F90 b/cicecore/cicedynB/analysis/ice_history_shared.F90 index aea1d4bcf..66c4401c7 100644 --- a/cicecore/cicedynB/analysis/ice_history_shared.F90 +++ b/cicecore/cicedynB/analysis/ice_history_shared.F90 @@ -402,7 +402,7 @@ module ice_history_shared f_Tsfc, f_aice , & f_uvel, f_vvel , & f_icespd, f_icedir , & -! For now, don't allow the users to modify the CD grid quantities. +! For now, C and CD grid quantities are controlled by the generic (originally B-grid) namelist flag ! f_uvelE, f_vvelE , & ! f_icespdE, f_icedirE , & ! f_uvelN, f_vvelN , & diff --git a/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 b/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 index 2b356ace0..7a660a031 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 @@ -143,7 +143,7 @@ subroutine eap (dt) stressp_1, stressp_2, stressp_3, stressp_4, & stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4 - use ice_grid, only: tmask, umask, dxt, dyt, dxhy, dyhx, cxp, cyp, cxm, cym, & + use ice_grid, only: tmask, umask, dxT, dyT, dxhy, dyhx, cxp, cyp, cxm, cym, & tarear, uarear, grid_average_X2Y, & grid_atm_dynu, grid_atm_dynv, grid_ocn_dynu, grid_ocn_dynv use ice_state, only: aice, vice, vsno, uvel, vvel, divu, shear, & @@ -453,7 +453,7 @@ subroutine eap (dt) indxti (:,iblk), indxtj (:,iblk), & arlx1i, denom1, & uvel (:,:,iblk), vvel (:,:,iblk), & - dxt (:,:,iblk), dyt (:,:,iblk), & + dxT (:,:,iblk), dyT (:,:,iblk), & dxhy (:,:,iblk), dyhx (:,:,iblk), & cxp (:,:,iblk), cyp (:,:,iblk), & cxm (:,:,iblk), cym (:,:,iblk), & @@ -1165,7 +1165,7 @@ subroutine stress_eap (nx_block, ny_block, & indxti, indxtj, & arlx1i, denom1, & uvel, vvel, & - dxt, dyt, & + dxT, dyT, & dxhy, dyhx, & cxp, cyp, & cxm, cym, & @@ -1208,8 +1208,8 @@ subroutine stress_eap (nx_block, ny_block, & strength , & ! ice strength (N/m) uvel , & ! x-component of velocity (m/s) vvel , & ! y-component of velocity (m/s) - dxt , & ! width of T-cell through the middle (m) - dyt , & ! height of T-cell through the middle (m) + dxT , & ! width of T-cell through the middle (m) + dyT , & ! height of T-cell through the middle (m) dxhy , & ! 0.5*(HTE - HTW) dyhx , & ! 0.5*(HTN - HTS) cyp , & ! 1.5*HTE - 0.5*HTW @@ -1291,34 +1291,34 @@ subroutine stress_eap (nx_block, ny_block, & !----------------------------------------------------------------- ! divergence = e_11 + e_22 - divune = cyp(i,j)*uvel(i ,j ) - dyt(i,j)*uvel(i-1,j ) & - + cxp(i,j)*vvel(i ,j ) - dxt(i,j)*vvel(i ,j-1) - divunw = cym(i,j)*uvel(i-1,j ) + dyt(i,j)*uvel(i ,j ) & - + cxp(i,j)*vvel(i-1,j ) - dxt(i,j)*vvel(i-1,j-1) - divusw = cym(i,j)*uvel(i-1,j-1) + dyt(i,j)*uvel(i ,j-1) & - + cxm(i,j)*vvel(i-1,j-1) + dxt(i,j)*vvel(i-1,j ) - divuse = cyp(i,j)*uvel(i ,j-1) - dyt(i,j)*uvel(i-1,j-1) & - + cxm(i,j)*vvel(i ,j-1) + dxt(i,j)*vvel(i ,j ) + divune = cyp(i,j)*uvel(i ,j ) - dyT(i,j)*uvel(i-1,j ) & + + cxp(i,j)*vvel(i ,j ) - dxT(i,j)*vvel(i ,j-1) + divunw = cym(i,j)*uvel(i-1,j ) + dyT(i,j)*uvel(i ,j ) & + + cxp(i,j)*vvel(i-1,j ) - dxT(i,j)*vvel(i-1,j-1) + divusw = cym(i,j)*uvel(i-1,j-1) + dyT(i,j)*uvel(i ,j-1) & + + cxm(i,j)*vvel(i-1,j-1) + dxT(i,j)*vvel(i-1,j ) + divuse = cyp(i,j)*uvel(i ,j-1) - dyT(i,j)*uvel(i-1,j-1) & + + cxm(i,j)*vvel(i ,j-1) + dxT(i,j)*vvel(i ,j ) ! tension strain rate = e_11 - e_22 - tensionne = -cym(i,j)*uvel(i ,j ) - dyt(i,j)*uvel(i-1,j ) & - + cxm(i,j)*vvel(i ,j ) + dxt(i,j)*vvel(i ,j-1) - tensionnw = -cyp(i,j)*uvel(i-1,j ) + dyt(i,j)*uvel(i ,j ) & - + cxm(i,j)*vvel(i-1,j ) + dxt(i,j)*vvel(i-1,j-1) - tensionsw = -cyp(i,j)*uvel(i-1,j-1) + dyt(i,j)*uvel(i ,j-1) & - + cxp(i,j)*vvel(i-1,j-1) - dxt(i,j)*vvel(i-1,j ) - tensionse = -cym(i,j)*uvel(i ,j-1) - dyt(i,j)*uvel(i-1,j-1) & - + cxp(i,j)*vvel(i ,j-1) - dxt(i,j)*vvel(i ,j ) + tensionne = -cym(i,j)*uvel(i ,j ) - dyT(i,j)*uvel(i-1,j ) & + + cxm(i,j)*vvel(i ,j ) + dxT(i,j)*vvel(i ,j-1) + tensionnw = -cyp(i,j)*uvel(i-1,j ) + dyT(i,j)*uvel(i ,j ) & + + cxm(i,j)*vvel(i-1,j ) + dxT(i,j)*vvel(i-1,j-1) + tensionsw = -cyp(i,j)*uvel(i-1,j-1) + dyT(i,j)*uvel(i ,j-1) & + + cxp(i,j)*vvel(i-1,j-1) - dxT(i,j)*vvel(i-1,j ) + tensionse = -cym(i,j)*uvel(i ,j-1) - dyT(i,j)*uvel(i-1,j-1) & + + cxp(i,j)*vvel(i ,j-1) - dxT(i,j)*vvel(i ,j ) ! shearing strain rate = 2*e_12 - shearne = -cym(i,j)*vvel(i ,j ) - dyt(i,j)*vvel(i-1,j ) & - - cxm(i,j)*uvel(i ,j ) - dxt(i,j)*uvel(i ,j-1) - shearnw = -cyp(i,j)*vvel(i-1,j ) + dyt(i,j)*vvel(i ,j ) & - - cxm(i,j)*uvel(i-1,j ) - dxt(i,j)*uvel(i-1,j-1) - shearsw = -cyp(i,j)*vvel(i-1,j-1) + dyt(i,j)*vvel(i ,j-1) & - - cxp(i,j)*uvel(i-1,j-1) + dxt(i,j)*uvel(i-1,j ) - shearse = -cym(i,j)*vvel(i ,j-1) - dyt(i,j)*vvel(i-1,j-1) & - - cxp(i,j)*uvel(i ,j-1) + dxt(i,j)*uvel(i ,j ) + shearne = -cym(i,j)*vvel(i ,j ) - dyT(i,j)*vvel(i-1,j ) & + - cxm(i,j)*uvel(i ,j ) - dxT(i,j)*uvel(i ,j-1) + shearnw = -cyp(i,j)*vvel(i-1,j ) + dyT(i,j)*vvel(i ,j ) & + - cxm(i,j)*uvel(i-1,j ) - dxT(i,j)*uvel(i-1,j-1) + shearsw = -cyp(i,j)*vvel(i-1,j-1) + dyT(i,j)*vvel(i ,j-1) & + - cxp(i,j)*uvel(i-1,j-1) + dxT(i,j)*uvel(i-1,j ) + shearse = -cym(i,j)*vvel(i ,j-1) - dyT(i,j)*vvel(i-1,j-1) & + - cxp(i,j)*uvel(i ,j-1) + dxT(i,j)*uvel(i ,j ) !----------------------------------------------------------------- ! Stress updated depending on strain rate and structure tensor @@ -1499,17 +1499,17 @@ subroutine stress_eap (nx_block, ny_block, & csig12se = p222*stress12_4(i,j) + ssig121 & + p055*stress12_2(i,j) - str12ew = p5*dxt(i,j)*(p333*ssig12e + p166*ssig12w) - str12we = p5*dxt(i,j)*(p333*ssig12w + p166*ssig12e) - str12ns = p5*dyt(i,j)*(p333*ssig12n + p166*ssig12s) - str12sn = p5*dyt(i,j)*(p333*ssig12s + p166*ssig12n) + str12ew = p5*dxT(i,j)*(p333*ssig12e + p166*ssig12w) + str12we = p5*dxT(i,j)*(p333*ssig12w + p166*ssig12e) + str12ns = p5*dyT(i,j)*(p333*ssig12n + p166*ssig12s) + str12sn = p5*dyT(i,j)*(p333*ssig12s + p166*ssig12n) !----------------------------------------------------------------- ! for dF/dx (u momentum) !----------------------------------------------------------------- - strp_tmp = p25*dyt(i,j)*(p333*ssigpn + p166*ssigps) - strm_tmp = p25*dyt(i,j)*(p333*ssigmn + p166*ssigms) + strp_tmp = p25*dyT(i,j)*(p333*ssigpn + p166*ssigps) + strm_tmp = p25*dyT(i,j)*(p333*ssigmn + p166*ssigms) ! northeast (i,j) strtmp(i,j,1) = -strp_tmp - strm_tmp - str12ew & @@ -1519,8 +1519,8 @@ subroutine stress_eap (nx_block, ny_block, & strtmp(i,j,2) = strp_tmp + strm_tmp - str12we & + dxhy(i,j)*(-csigpnw + csigmnw) + dyhx(i,j)*csig12nw - strp_tmp = p25*dyt(i,j)*(p333*ssigps + p166*ssigpn) - strm_tmp = p25*dyt(i,j)*(p333*ssigms + p166*ssigmn) + strp_tmp = p25*dyT(i,j)*(p333*ssigps + p166*ssigpn) + strm_tmp = p25*dyT(i,j)*(p333*ssigms + p166*ssigmn) ! southeast (i,j+1) strtmp(i,j,3) = -strp_tmp - strm_tmp + str12ew & @@ -1534,8 +1534,8 @@ subroutine stress_eap (nx_block, ny_block, & ! for dF/dy (v momentum) !----------------------------------------------------------------- - strp_tmp = p25*dxt(i,j)*(p333*ssigpe + p166*ssigpw) - strm_tmp = p25*dxt(i,j)*(p333*ssigme + p166*ssigmw) + strp_tmp = p25*dxT(i,j)*(p333*ssigpe + p166*ssigpw) + strm_tmp = p25*dxT(i,j)*(p333*ssigme + p166*ssigmw) ! northeast (i,j) strtmp(i,j,5) = -strp_tmp + strm_tmp - str12ns & @@ -1545,8 +1545,8 @@ subroutine stress_eap (nx_block, ny_block, & strtmp(i,j,6) = strp_tmp - strm_tmp - str12sn & - dyhx(i,j)*(csigpse + csigmse) + dxhy(i,j)*csig12se - strp_tmp = p25*dxt(i,j)*(p333*ssigpw + p166*ssigpe) - strm_tmp = p25*dxt(i,j)*(p333*ssigmw + p166*ssigme) + strp_tmp = p25*dxT(i,j)*(p333*ssigpw + p166*ssigpe) + strm_tmp = p25*dxT(i,j)*(p333*ssigmw + p166*ssigme) ! northwest (i+1,j) strtmp(i,j,7) = -strp_tmp + strm_tmp + str12ns & diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index f18e60802..3c417e8b8 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -99,7 +99,7 @@ subroutine evp (dt) stresspT, stressmT, stress12T, & stresspU, stressmU, stress12U use ice_grid, only: hm, tmask, umask, umaskCD, nmask, emask, uvm, epm, npm, & - dxe, dxn, dxt, dxu, dye, dyn, dyt, dyu, & + dxE, dxN, dxT, dxU, dyE, dyN, dyT, dyU, & ratiodxN, ratiodxNr, ratiodyE, ratiodyEr, & dxhy, dyhx, cxp, cyp, cxm, cym, & tarear, uarear, earear, narear, grid_average_X2Y, tarea, uarea, & @@ -185,9 +185,9 @@ subroutine evp (dt) emassdti ! mass of E-cell/dte (kg/m^2 s) real (kind=dbl_kind), allocatable :: & - fld2(:,:,:,:) , & ! bundled fields size 2 - fld3(:,:,:,:) , & ! bundled fields size 3 - fld4(:,:,:,:) ! bundled fields size 4 + fld2(:,:,:,:) , & ! 2 bundled fields + fld3(:,:,:,:) , & ! 3 bundled fields + fld4(:,:,:,:) ! 4 bundled fields real (kind=dbl_kind), allocatable :: & strengthU(:,:,:), & ! strength averaged to U points @@ -326,12 +326,12 @@ subroutine evp (dt) if (grid_ice == 'CD' .or. grid_ice == 'C') then call grid_average_X2Y('F', tmass , 'T' , emass, 'E') call grid_average_X2Y('F', aice_init, 'T' , aie , 'E') + call grid_average_X2Y('S', uocn , grid_ocn_dynu, uocnE, 'E') + call grid_average_X2Y('S', vocn , grid_ocn_dynv, vocnE, 'E') call grid_average_X2Y('F', tmass , 'T' , nmass, 'N') call grid_average_X2Y('F', aice_init, 'T' , ain , 'N') call grid_average_X2Y('S', uocn , grid_ocn_dynu, uocnN, 'N') call grid_average_X2Y('S', vocn , grid_ocn_dynv, vocnN, 'N') - call grid_average_X2Y('S', uocn , grid_ocn_dynu, uocnE, 'E') - call grid_average_X2Y('S', vocn , grid_ocn_dynv, vocnE, 'E') endif !---------------------------------------------------------------- ! Set wind stress to values supplied via NEMO or other forcing @@ -722,7 +722,7 @@ subroutine evp (dt) icetmask, iceumask, & cdn_ocn,aiu,uocnU,vocnU,forcex,forcey,Tbu, & umassdti,fm,uarear,tarear,strintx,strinty,uvel_init,vvel_init,& - strength,uvel,vvel,dxt,dyt, & + strength,uvel,vvel,dxT,dyT, & stressp_1 ,stressp_2, stressp_3, stressp_4, & stressm_1 ,stressm_2, stressm_3, stressm_4, & stress12_1,stress12_2,stress12_3,stress12_4 ) @@ -754,7 +754,7 @@ subroutine evp (dt) icellt (iblk), & indxti (:,iblk), indxtj (:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & - dxt (:,:,iblk), dyt (:,:,iblk), & + dxT (:,:,iblk), dyT (:,:,iblk), & dxhy (:,:,iblk), dyhx (:,:,iblk), & cxp (:,:,iblk), cyp (:,:,iblk), & cxm (:,:,iblk), cym (:,:,iblk), & @@ -776,7 +776,7 @@ subroutine evp (dt) icellt (iblk), & indxti (:,iblk), indxtj (:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & - dxt (:,:,iblk), dyt (:,:,iblk), & + dxT (:,:,iblk), dyT (:,:,iblk), & cxp (:,:,iblk), cyp (:,:,iblk), & cxm (:,:,iblk), cym (:,:,iblk), & tarear (:,:,iblk), & @@ -1335,7 +1335,7 @@ subroutine stress (nx_block, ny_block, & icellt, & indxti, indxtj, & uvel, vvel, & - dxt, dyt, & + dxT, dyT, & dxhy, dyhx, & cxp, cyp, & cxm, cym, & @@ -1363,8 +1363,8 @@ subroutine stress (nx_block, ny_block, & strength , & ! ice strength (N/m) uvel , & ! x-component of velocity (m/s) vvel , & ! y-component of velocity (m/s) - dxt , & ! width of T-cell through the middle (m) - dyt , & ! height of T-cell through the middle (m) + dxT , & ! width of T-cell through the middle (m) + dyT , & ! height of T-cell through the middle (m) dxhy , & ! 0.5*(HTE - HTW) dyhx , & ! 0.5*(HTN - HTS) cyp , & ! 1.5*HTE - 0.5*HTW @@ -1425,7 +1425,7 @@ subroutine stress (nx_block, ny_block, & call strain_rates (nx_block, ny_block, & i, j, & uvel, vvel, & - dxt, dyt, & + dxT, dyT, & cxp, cyp, & cxm, cym, & divune, divunw, & @@ -1561,16 +1561,16 @@ subroutine stress (nx_block, ny_block, & csig12se = p222*stress12_4(i,j) + ssig121 & + p055*stress12_2(i,j) - str12ew = p5*dxt(i,j)*(p333*ssig12e + p166*ssig12w) - str12we = p5*dxt(i,j)*(p333*ssig12w + p166*ssig12e) - str12ns = p5*dyt(i,j)*(p333*ssig12n + p166*ssig12s) - str12sn = p5*dyt(i,j)*(p333*ssig12s + p166*ssig12n) + str12ew = p5*dxT(i,j)*(p333*ssig12e + p166*ssig12w) + str12we = p5*dxT(i,j)*(p333*ssig12w + p166*ssig12e) + str12ns = p5*dyT(i,j)*(p333*ssig12n + p166*ssig12s) + str12sn = p5*dyT(i,j)*(p333*ssig12s + p166*ssig12n) !----------------------------------------------------------------- ! for dF/dx (u momentum) !----------------------------------------------------------------- - strp_tmp = p25*dyt(i,j)*(p333*ssigpn + p166*ssigps) - strm_tmp = p25*dyt(i,j)*(p333*ssigmn + p166*ssigms) + strp_tmp = p25*dyT(i,j)*(p333*ssigpn + p166*ssigps) + strm_tmp = p25*dyT(i,j)*(p333*ssigmn + p166*ssigms) ! northeast (i,j) str(i,j,1) = -strp_tmp - strm_tmp - str12ew & @@ -1580,8 +1580,8 @@ subroutine stress (nx_block, ny_block, & str(i,j,2) = strp_tmp + strm_tmp - str12we & + dxhy(i,j)*(-csigpnw + csigmnw) + dyhx(i,j)*csig12nw - strp_tmp = p25*dyt(i,j)*(p333*ssigps + p166*ssigpn) - strm_tmp = p25*dyt(i,j)*(p333*ssigms + p166*ssigmn) + strp_tmp = p25*dyT(i,j)*(p333*ssigps + p166*ssigpn) + strm_tmp = p25*dyT(i,j)*(p333*ssigms + p166*ssigmn) ! southeast (i,j+1) str(i,j,3) = -strp_tmp - strm_tmp + str12ew & @@ -1594,8 +1594,8 @@ subroutine stress (nx_block, ny_block, & !----------------------------------------------------------------- ! for dF/dy (v momentum) !----------------------------------------------------------------- - strp_tmp = p25*dxt(i,j)*(p333*ssigpe + p166*ssigpw) - strm_tmp = p25*dxt(i,j)*(p333*ssigme + p166*ssigmw) + strp_tmp = p25*dxT(i,j)*(p333*ssigpe + p166*ssigpw) + strm_tmp = p25*dxT(i,j)*(p333*ssigme + p166*ssigmw) ! northeast (i,j) str(i,j,5) = -strp_tmp + strm_tmp - str12ns & @@ -1605,8 +1605,8 @@ subroutine stress (nx_block, ny_block, & str(i,j,6) = strp_tmp - strm_tmp - str12sn & - dyhx(i,j)*(csigpse + csigmse) + dxhy(i,j)*csig12se - strp_tmp = p25*dxt(i,j)*(p333*ssigpw + p166*ssigpe) - strm_tmp = p25*dxt(i,j)*(p333*ssigmw + p166*ssigme) + strp_tmp = p25*dxT(i,j)*(p333*ssigpw + p166*ssigpe) + strm_tmp = p25*dxT(i,j)*(p333*ssigmw + p166*ssigme) ! northwest (i+1,j) str(i,j,7) = -strp_tmp + strm_tmp + str12ns & diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp_1d.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp_1d.F90 index 1ca41898d..2f5389d06 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp_1d.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp_1d.F90 @@ -31,7 +31,7 @@ module ice_dyn_evp_1d real(kind=dbl_kind), dimension(:), allocatable :: cdn_ocn, aiu, & uocn, vocn, forcex, forcey, Tbu, tarear, umassdti, fm, uarear, & strintx, strinty, uvel_init, vvel_init, strength, uvel, vvel, & - dxt, dyt, stressp_1, stressp_2, stressp_3, stressp_4, stressm_1, & + dxT, dyT, stressp_1, stressp_2, stressp_3, stressp_4, stressm_1, & stressm_2, stressm_3, stressm_4, stress12_1, stress12_2, & stress12_3, stress12_4, divu, rdg_conv, rdg_shear, shear, taubx, & tauby, str1, str2, str3, str4, str5, str6, str7, str8, HTE, HTN, & @@ -123,8 +123,8 @@ end subroutine domp_get_domain !======================================================================= - subroutine stress_iter(NA_len, ee, ne, se, lb, ub, uvel, vvel, dxt, & - dyt, hte, htn, htem1, htnm1, strength, stressp_1, stressp_2, & + subroutine stress_iter(NA_len, ee, ne, se, lb, ub, uvel, vvel, dxT, & + dyT, hte, htn, htem1, htnm1, strength, stressp_1, stressp_2, & stressp_3, stressp_4, stressm_1, stressm_2, stressm_3, & stressm_4, stress12_1, stress12_2, stress12_3, stress12_4, str1, & str2, str3, str4, str5, str6, str7, str8, skiptcell) @@ -141,7 +141,7 @@ subroutine stress_iter(NA_len, ee, ne, se, lb, ub, uvel, vvel, dxt, & integer(kind=int_kind), dimension(:), intent(in), contiguous :: & ee, ne, se real(kind=dbl_kind), dimension(:), intent(in), contiguous :: & - strength, uvel, vvel, dxt, dyt, hte, htn, htem1, htnm1 + strength, uvel, vvel, dxT, dyT, hte, htn, htem1, htnm1 logical(kind=log_kind), intent(in), dimension(:) :: skiptcell real(kind=dbl_kind), dimension(:), intent(inout), contiguous :: & stressp_1, stressp_2, stressp_3, stressp_4, stressm_1, & @@ -170,7 +170,7 @@ subroutine stress_iter(NA_len, ee, ne, se, lb, ub, uvel, vvel, dxt, & #ifdef _OPENACC !$acc parallel & - !$acc present(ee, ne, se, strength, uvel, vvel, dxt, dyt, hte, & + !$acc present(ee, ne, se, strength, uvel, vvel, dxT, dyT, hte, & !$acc htn, htem1, htnm1, str1, str2, str3, str4, str5, str6, & !$acc str7, str8, stressp_1, stressp_2, stressp_3, stressp_4, & !$acc stressm_1, stressm_2, stressm_3, stressm_4, stress12_1, & @@ -184,7 +184,7 @@ subroutine stress_iter(NA_len, ee, ne, se, lb, ub, uvel, vvel, dxt, & if (skiptcell(iw)) cycle - tmparea = dxt(iw) * dyt(iw) ! necessary to split calc of DminTarea. Otherwize not binary identical + tmparea = dxT(iw) * dyT(iw) ! necessary to split calc of DminTarea. Otherwize not binary identical DminTarea = deltaminEVP * tmparea dxhy = p5 * (hte(iw) - htem1(iw)) dyhx = p5 * (htn(iw) - htnm1(iw)) @@ -206,34 +206,34 @@ subroutine stress_iter(NA_len, ee, ne, se, lb, ub, uvel, vvel, dxt, & tmp_vvel_se = vvel(se(iw)) tmp_vvel_ne = vvel(ne(iw)) ! divergence = e_11 + e_22 - divune = cyp * uvel(iw) - dyt(iw) * tmp_uvel_ee & - + cxp * vvel(iw) - dxt(iw) * tmp_vvel_se - divunw = cym * tmp_uvel_ee + dyt(iw) * uvel(iw) & - + cxp * tmp_vvel_ee - dxt(iw) * tmp_vvel_ne - divusw = cym * tmp_uvel_ne + dyt(iw) * tmp_uvel_se & - + cxm * tmp_vvel_ne + dxt(iw) * tmp_vvel_ee - divuse = cyp * tmp_uvel_se - dyt(iw) * tmp_uvel_ne & - + cxm * tmp_vvel_se + dxt(iw) * vvel(iw) + divune = cyp * uvel(iw) - dyT(iw) * tmp_uvel_ee & + + cxp * vvel(iw) - dxT(iw) * tmp_vvel_se + divunw = cym * tmp_uvel_ee + dyT(iw) * uvel(iw) & + + cxp * tmp_vvel_ee - dxT(iw) * tmp_vvel_ne + divusw = cym * tmp_uvel_ne + dyT(iw) * tmp_uvel_se & + + cxm * tmp_vvel_ne + dxT(iw) * tmp_vvel_ee + divuse = cyp * tmp_uvel_se - dyT(iw) * tmp_uvel_ne & + + cxm * tmp_vvel_se + dxT(iw) * vvel(iw) ! tension strain rate = e_11 - e_22 - tensionne = -cym * uvel(iw) - dyt(iw) * tmp_uvel_ee & - + cxm * vvel(iw) + dxt(iw) * tmp_vvel_se - tensionnw = -cyp * tmp_uvel_ee + dyt(iw) * uvel(iw) & - + cxm * tmp_vvel_ee + dxt(iw) * tmp_vvel_ne - tensionsw = -cyp * tmp_uvel_ne + dyt(iw) * tmp_uvel_se & - + cxp * tmp_vvel_ne - dxt(iw) * tmp_vvel_ee - tensionse = -cym * tmp_uvel_se - dyt(iw) * tmp_uvel_ne & - + cxp * tmp_vvel_se - dxt(iw) * vvel(iw) + tensionne = -cym * uvel(iw) - dyT(iw) * tmp_uvel_ee & + + cxm * vvel(iw) + dxT(iw) * tmp_vvel_se + tensionnw = -cyp * tmp_uvel_ee + dyT(iw) * uvel(iw) & + + cxm * tmp_vvel_ee + dxT(iw) * tmp_vvel_ne + tensionsw = -cyp * tmp_uvel_ne + dyT(iw) * tmp_uvel_se & + + cxp * tmp_vvel_ne - dxT(iw) * tmp_vvel_ee + tensionse = -cym * tmp_uvel_se - dyT(iw) * tmp_uvel_ne & + + cxp * tmp_vvel_se - dxT(iw) * vvel(iw) ! shearing strain rate = 2 * e_12 - shearne = -cym * vvel(iw) - dyt(iw) * tmp_vvel_ee & - - cxm * uvel(iw) - dxt(iw) * tmp_uvel_se - shearnw = -cyp * tmp_vvel_ee + dyt(iw) * vvel(iw) & - - cxm * tmp_uvel_ee - dxt(iw) * tmp_uvel_ne - shearsw = -cyp * tmp_vvel_ne + dyt(iw) * tmp_vvel_se & - - cxp * tmp_uvel_ne + dxt(iw) * tmp_uvel_ee - shearse = -cym * tmp_vvel_se - dyt(iw) * tmp_vvel_ne & - - cxp * tmp_uvel_se + dxt(iw) * uvel(iw) + shearne = -cym * vvel(iw) - dyT(iw) * tmp_vvel_ee & + - cxm * uvel(iw) - dxT(iw) * tmp_uvel_se + shearnw = -cyp * tmp_vvel_ee + dyT(iw) * vvel(iw) & + - cxm * tmp_uvel_ee - dxT(iw) * tmp_uvel_ne + shearsw = -cyp * tmp_vvel_ne + dyT(iw) * tmp_vvel_se & + - cxp * tmp_uvel_ne + dxT(iw) * tmp_uvel_ee + shearse = -cym * tmp_vvel_se - dyT(iw) * tmp_vvel_ne & + - cxp * tmp_uvel_se + dxT(iw) * uvel(iw) ! Delta (in the denominator of zeta and eta) Deltane = sqrt(divune**2 + ecci * (tensionne**2 + shearne**2)) @@ -326,17 +326,17 @@ subroutine stress_iter(NA_len, ee, ne, se, lb, ub, uvel, vvel, dxt, & csig12sw = p222 * stress12_3(iw) + ssig122 + p055 * stress12_1(iw) csig12se = p222 * stress12_4(iw) + ssig121 + p055 * stress12_2(iw) - str12ew = p5 * dxt(iw) * (p333 * ssig12e + p166 * ssig12w) - str12we = p5 * dxt(iw) * (p333 * ssig12w + p166 * ssig12e) - str12ns = p5 * dyt(iw) * (p333 * ssig12n + p166 * ssig12s) - str12sn = p5 * dyt(iw) * (p333 * ssig12s + p166 * ssig12n) + str12ew = p5 * dxT(iw) * (p333 * ssig12e + p166 * ssig12w) + str12we = p5 * dxT(iw) * (p333 * ssig12w + p166 * ssig12e) + str12ns = p5 * dyT(iw) * (p333 * ssig12n + p166 * ssig12s) + str12sn = p5 * dyT(iw) * (p333 * ssig12s + p166 * ssig12n) !-------------------------------------------------------------- ! for dF/dx (u momentum) !-------------------------------------------------------------- - strp_tmp = p25 * dyt(iw) * (p333 * ssigpn + p166 * ssigps) - strm_tmp = p25 * dyt(iw) * (p333 * ssigmn + p166 * ssigms) + strp_tmp = p25 * dyT(iw) * (p333 * ssigpn + p166 * ssigps) + strm_tmp = p25 * dyT(iw) * (p333 * ssigmn + p166 * ssigms) ! northeast (i,j) str1(iw) = -strp_tmp - strm_tmp - str12ew & @@ -346,8 +346,8 @@ subroutine stress_iter(NA_len, ee, ne, se, lb, ub, uvel, vvel, dxt, & str2(iw) = strp_tmp + strm_tmp - str12we & + dxhy * (-csigpnw + csigmnw) + dyhx * csig12nw - strp_tmp = p25 * dyt(iw) * (p333 * ssigps + p166 * ssigpn) - strm_tmp = p25 * dyt(iw) * (p333 * ssigms + p166 * ssigmn) + strp_tmp = p25 * dyT(iw) * (p333 * ssigps + p166 * ssigpn) + strm_tmp = p25 * dyT(iw) * (p333 * ssigms + p166 * ssigmn) ! southeast (i,j+1) str3(iw) = -strp_tmp - strm_tmp + str12ew & @@ -361,8 +361,8 @@ subroutine stress_iter(NA_len, ee, ne, se, lb, ub, uvel, vvel, dxt, & ! for dF/dy (v momentum) !-------------------------------------------------------------- - strp_tmp = p25 * dxt(iw) * (p333 * ssigpe + p166 * ssigpw) - strm_tmp = p25 * dxt(iw) * (p333 * ssigme + p166 * ssigmw) + strp_tmp = p25 * dxT(iw) * (p333 * ssigpe + p166 * ssigpw) + strm_tmp = p25 * dxT(iw) * (p333 * ssigme + p166 * ssigmw) ! northeast (i,j) str5(iw) = -strp_tmp + strm_tmp - str12ns & @@ -372,8 +372,8 @@ subroutine stress_iter(NA_len, ee, ne, se, lb, ub, uvel, vvel, dxt, & str6(iw) = strp_tmp - strm_tmp - str12sn & - dyhx * (csigpse + csigmse) + dxhy * csig12se - strp_tmp = p25 * dxt(iw) * (p333 * ssigpw + p166 * ssigpe) - strm_tmp = p25 * dxt(iw) * (p333 * ssigmw + p166 * ssigme) + strp_tmp = p25 * dxT(iw) * (p333 * ssigpw + p166 * ssigpe) + strm_tmp = p25 * dxT(iw) * (p333 * ssigmw + p166 * ssigme) ! northwest (i+1,j) str7(iw) = -strp_tmp + strm_tmp + str12ns & @@ -392,8 +392,8 @@ end subroutine stress_iter !======================================================================= - subroutine stress_last(NA_len, ee, ne, se, lb, ub, uvel, vvel, dxt, & - dyt, hte, htn, htem1, htnm1, strength, stressp_1, stressp_2, & + subroutine stress_last(NA_len, ee, ne, se, lb, ub, uvel, vvel, dxT, & + dyT, hte, htn, htem1, htnm1, strength, stressp_1, stressp_2, & stressp_3, stressp_4, stressm_1, stressm_2, stressm_3, & stressm_4, stress12_1, stress12_2, stress12_3, stress12_4, str1, & str2, str3, str4, str5, str6, str7, str8, skiptcell, tarear, divu, & @@ -411,7 +411,7 @@ subroutine stress_last(NA_len, ee, ne, se, lb, ub, uvel, vvel, dxt, & integer(kind=int_kind), dimension(:), intent(in), contiguous :: & ee, ne, se real(kind=dbl_kind), dimension(:), intent(in), contiguous :: & - strength, uvel, vvel, dxt, dyt, hte, htn, htem1, htnm1, tarear + strength, uvel, vvel, dxT, dyT, hte, htn, htem1, htnm1, tarear logical(kind=log_kind), intent(in), dimension(:) :: skiptcell real(kind=dbl_kind), dimension(:), intent(inout), contiguous :: & stressp_1, stressp_2, stressp_3, stressp_4, stressm_1, & @@ -441,7 +441,7 @@ subroutine stress_last(NA_len, ee, ne, se, lb, ub, uvel, vvel, dxt, & #ifdef _OPENACC !$acc parallel & - !$acc present(ee, ne, se, strength, uvel, vvel, dxt, dyt, hte, & + !$acc present(ee, ne, se, strength, uvel, vvel, dxT, dyT, hte, & !$acc htn, htem1, htnm1, str1, str2, str3, str4, str5, str6, & !$acc str7, str8, stressp_1, stressp_2, stressp_3, stressp_4, & !$acc stressm_1, stressm_2, stressm_3, stressm_4, stress12_1, & @@ -456,7 +456,7 @@ subroutine stress_last(NA_len, ee, ne, se, lb, ub, uvel, vvel, dxt, & if (skiptcell(iw)) cycle - tmparea = dxt(iw) * dyt(iw) ! necessary to split calc of DminTarea. Otherwize not binary identical + tmparea = dxT(iw) * dyT(iw) ! necessary to split calc of DminTarea. Otherwize not binary identical DminTarea = deltaminEVP * tmparea dxhy = p5 * (hte(iw) - htem1(iw)) dyhx = p5 * (htn(iw) - htnm1(iw)) @@ -479,34 +479,34 @@ subroutine stress_last(NA_len, ee, ne, se, lb, ub, uvel, vvel, dxt, & tmp_vvel_ne = vvel(ne(iw)) ! divergence = e_11 + e_22 - divune = cyp * uvel(iw) - dyt(iw) * tmp_uvel_ee & - + cxp * vvel(iw) - dxt(iw) * tmp_vvel_se - divunw = cym * tmp_uvel_ee + dyt(iw) * uvel(iw) & - + cxp * tmp_vvel_ee - dxt(iw) * tmp_vvel_ne - divusw = cym * tmp_uvel_ne + dyt(iw) * tmp_uvel_se & - + cxm * tmp_vvel_ne + dxt(iw) * tmp_vvel_ee - divuse = cyp * tmp_uvel_se - dyt(iw) * tmp_uvel_ne & - + cxm * tmp_vvel_se + dxt(iw) * vvel(iw) + divune = cyp * uvel(iw) - dyT(iw) * tmp_uvel_ee & + + cxp * vvel(iw) - dxT(iw) * tmp_vvel_se + divunw = cym * tmp_uvel_ee + dyT(iw) * uvel(iw) & + + cxp * tmp_vvel_ee - dxT(iw) * tmp_vvel_ne + divusw = cym * tmp_uvel_ne + dyT(iw) * tmp_uvel_se & + + cxm * tmp_vvel_ne + dxT(iw) * tmp_vvel_ee + divuse = cyp * tmp_uvel_se - dyT(iw) * tmp_uvel_ne & + + cxm * tmp_vvel_se + dxT(iw) * vvel(iw) ! tension strain rate = e_11 - e_22 - tensionne = -cym * uvel(iw) - dyt(iw) * tmp_uvel_ee & - + cxm * vvel(iw) + dxt(iw) * tmp_vvel_se - tensionnw = -cyp * tmp_uvel_ee + dyt(iw) * uvel(iw) & - + cxm * tmp_vvel_ee + dxt(iw) * tmp_vvel_ne - tensionsw = -cyp * tmp_uvel_ne + dyt(iw) * tmp_uvel_se & - + cxp * tmp_vvel_ne - dxt(iw) * tmp_vvel_ee - tensionse = -cym * tmp_uvel_se - dyt(iw) * tmp_uvel_ne & - + cxp * tmp_vvel_se - dxt(iw) * vvel(iw) + tensionne = -cym * uvel(iw) - dyT(iw) * tmp_uvel_ee & + + cxm * vvel(iw) + dxT(iw) * tmp_vvel_se + tensionnw = -cyp * tmp_uvel_ee + dyT(iw) * uvel(iw) & + + cxm * tmp_vvel_ee + dxT(iw) * tmp_vvel_ne + tensionsw = -cyp * tmp_uvel_ne + dyT(iw) * tmp_uvel_se & + + cxp * tmp_vvel_ne - dxT(iw) * tmp_vvel_ee + tensionse = -cym * tmp_uvel_se - dyT(iw) * tmp_uvel_ne & + + cxp * tmp_vvel_se - dxT(iw) * vvel(iw) ! shearing strain rate = 2 * e_12 - shearne = -cym * vvel(iw) - dyt(iw) * tmp_vvel_ee & - - cxm * uvel(iw) - dxt(iw) * tmp_uvel_se - shearnw = -cyp * tmp_vvel_ee + dyt(iw) * vvel(iw) & - - cxm * tmp_uvel_ee - dxt(iw) * tmp_uvel_ne - shearsw = -cyp * tmp_vvel_ne + dyt(iw) * tmp_vvel_se & - - cxp * tmp_uvel_ne + dxt(iw) * tmp_uvel_ee - shearse = -cym * tmp_vvel_se - dyt(iw) * tmp_vvel_ne & - - cxp * tmp_uvel_se + dxt(iw) * uvel(iw) + shearne = -cym * vvel(iw) - dyT(iw) * tmp_vvel_ee & + - cxm * uvel(iw) - dxT(iw) * tmp_uvel_se + shearnw = -cyp * tmp_vvel_ee + dyT(iw) * vvel(iw) & + - cxm * tmp_uvel_ee - dxT(iw) * tmp_uvel_ne + shearsw = -cyp * tmp_vvel_ne + dyT(iw) * tmp_vvel_se & + - cxp * tmp_uvel_ne + dxT(iw) * tmp_uvel_ee + shearse = -cym * tmp_vvel_se - dyT(iw) * tmp_vvel_ne & + - cxp * tmp_uvel_se + dxT(iw) * uvel(iw) ! Delta (in the denominator of zeta and eta) Deltane = sqrt(divune**2 + ecci * (tensionne**2 + shearne**2)) @@ -613,17 +613,17 @@ subroutine stress_last(NA_len, ee, ne, se, lb, ub, uvel, vvel, dxt, & csig12sw = p222 * stress12_3(iw) + ssig122 + p055 * stress12_1(iw) csig12se = p222 * stress12_4(iw) + ssig121 + p055 * stress12_2(iw) - str12ew = p5 * dxt(iw) * (p333 * ssig12e + p166 * ssig12w) - str12we = p5 * dxt(iw) * (p333 * ssig12w + p166 * ssig12e) - str12ns = p5 * dyt(iw) * (p333 * ssig12n + p166 * ssig12s) - str12sn = p5 * dyt(iw) * (p333 * ssig12s + p166 * ssig12n) + str12ew = p5 * dxT(iw) * (p333 * ssig12e + p166 * ssig12w) + str12we = p5 * dxT(iw) * (p333 * ssig12w + p166 * ssig12e) + str12ns = p5 * dyT(iw) * (p333 * ssig12n + p166 * ssig12s) + str12sn = p5 * dyT(iw) * (p333 * ssig12s + p166 * ssig12n) !-------------------------------------------------------------- ! for dF/dx (u momentum) !-------------------------------------------------------------- - strp_tmp = p25 * dyt(iw) * (p333 * ssigpn + p166 * ssigps) - strm_tmp = p25 * dyt(iw) * (p333 * ssigmn + p166 * ssigms) + strp_tmp = p25 * dyT(iw) * (p333 * ssigpn + p166 * ssigps) + strm_tmp = p25 * dyT(iw) * (p333 * ssigmn + p166 * ssigms) ! northeast (i,j) str1(iw) = -strp_tmp - strm_tmp - str12ew & @@ -633,8 +633,8 @@ subroutine stress_last(NA_len, ee, ne, se, lb, ub, uvel, vvel, dxt, & str2(iw) = strp_tmp + strm_tmp - str12we & + dxhy * (-csigpnw + csigmnw) + dyhx * csig12nw - strp_tmp = p25 * dyt(iw) * (p333 * ssigps + p166 * ssigpn) - strm_tmp = p25 * dyt(iw) * (p333 * ssigms + p166 * ssigmn) + strp_tmp = p25 * dyT(iw) * (p333 * ssigps + p166 * ssigpn) + strm_tmp = p25 * dyT(iw) * (p333 * ssigms + p166 * ssigmn) ! southeast (i,j+1) str3(iw) = -strp_tmp - strm_tmp + str12ew & @@ -648,8 +648,8 @@ subroutine stress_last(NA_len, ee, ne, se, lb, ub, uvel, vvel, dxt, & ! for dF/dy (v momentum) !-------------------------------------------------------------- - strp_tmp = p25 * dxt(iw) * (p333 * ssigpe + p166 * ssigpw) - strm_tmp = p25 * dxt(iw) * (p333 * ssigme + p166 * ssigmw) + strp_tmp = p25 * dxT(iw) * (p333 * ssigpe + p166 * ssigpw) + strm_tmp = p25 * dxT(iw) * (p333 * ssigme + p166 * ssigmw) ! northeast (i,j) str5(iw) = -strp_tmp + strm_tmp - str12ns & @@ -659,8 +659,8 @@ subroutine stress_last(NA_len, ee, ne, se, lb, ub, uvel, vvel, dxt, & str6(iw) = strp_tmp - strm_tmp - str12sn & - dyhx * (csigpse + csigmse) + dxhy * csig12se - strp_tmp = p25 * dxt(iw) * (p333 * ssigpw + p166 * ssigpe) - strm_tmp = p25 * dxt(iw) * (p333 * ssigmw + p166 * ssigme) + strp_tmp = p25 * dxT(iw) * (p333 * ssigpw + p166 * ssigpe) + strm_tmp = p25 * dxT(iw) * (p333 * ssigmw + p166 * ssigme) ! northwest (i+1,j) str7(iw) = -strp_tmp + strm_tmp + str12ns & @@ -937,7 +937,7 @@ subroutine alloc1d(na) ! grid distances and their "-1 neighbours" HTE(1:na), HTN(1:na), HTEm1(1:na), HTNm1(1:na), & ! T cells - strength(1:na), dxt(1:na), dyt(1:na), tarear(1:na), & + strength(1:na), dxT(1:na), dyT(1:na), tarear(1:na), & stressp_1(1:na), stressp_2(1:na), stressp_3(1:na), & stressp_4(1:na), stressm_1(1:na), stressm_2(1:na), & stressm_3(1:na), stressm_4(1:na), stress12_1(1:na), & @@ -998,7 +998,7 @@ subroutine dealloc1d ! grid distances and their "-1 neighbours" HTE, HTN, HTEm1, HTNm1, & ! T cells - strength, dxt, dyt, tarear, stressp_1, stressp_2, stressp_3, & + strength, dxT, dyT, tarear, stressp_1, stressp_2, stressp_3, & stressp_4, stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4, str1, str2, & str3, str4, str5, str6, str7, str8, divu, rdg_conv, & @@ -1021,7 +1021,7 @@ subroutine ice_dyn_evp_1d_copyin(nx, ny, nblk, nx_glob, ny_glob, & I_icetmask, I_iceumask, I_cdn_ocn, I_aiu, I_uocn, I_vocn, & I_forcex, I_forcey, I_Tbu, I_umassdti, I_fm, I_uarear, I_tarear, & I_strintx, I_strinty, I_uvel_init, I_vvel_init, I_strength, & - I_uvel, I_vvel, I_dxt, I_dyt, I_stressp_1, I_stressp_2, & + I_uvel, I_vvel, I_dxT, I_dyT, I_stressp_1, I_stressp_2, & I_stressp_3, I_stressp_4, I_stressm_1, I_stressm_2, I_stressm_3, & I_stressm_4, I_stress12_1, I_stress12_2, I_stress12_3, & I_stress12_4) @@ -1042,8 +1042,8 @@ subroutine ice_dyn_evp_1d_copyin(nx, ny, nblk, nx_glob, ny_glob, & real(kind=dbl_kind), dimension(nx, ny, nblk), intent(in) :: & I_cdn_ocn, I_aiu, I_uocn, I_vocn, I_forcex, I_forcey, I_Tbu, & I_umassdti, I_fm, I_uarear, I_tarear, I_strintx, I_strinty, & - I_uvel_init, I_vvel_init, I_strength, I_uvel, I_vvel, I_dxt, & - I_dyt, I_stressp_1, I_stressp_2, I_stressp_3, I_stressp_4, & + I_uvel_init, I_vvel_init, I_strength, I_uvel, I_vvel, I_dxT, & + I_dyT, I_stressp_1, I_stressp_2, I_stressp_3, I_stressp_4, & I_stressm_1, I_stressm_2, I_stressm_3, I_stressm_4, & I_stress12_1, I_stress12_2, I_stress12_3, I_stress12_4 @@ -1056,8 +1056,8 @@ subroutine ice_dyn_evp_1d_copyin(nx, ny, nblk, nx_glob, ny_glob, & real(kind=dbl_kind), dimension(nx_glob, ny_glob) :: & G_cdn_ocn, G_aiu, G_uocn, G_vocn, G_forcex, G_forcey, G_Tbu, & G_umassdti, G_fm, G_uarear, G_tarear, G_strintx, G_strinty, & - G_uvel_init, G_vvel_init, G_strength, G_uvel, G_vvel, G_dxt, & - G_dyt, G_stressp_1, G_stressp_2, G_stressp_3, G_stressp_4, & + G_uvel_init, G_vvel_init, G_strength, G_uvel, G_vvel, G_dxT, & + G_dyT, G_stressp_1, G_stressp_2, G_stressp_3, G_stressp_4, & G_stressm_1, G_stressm_2, G_stressm_3, G_stressm_4, & G_stress12_1, G_stress12_2, G_stress12_3, G_stress12_4 @@ -1084,8 +1084,8 @@ subroutine ice_dyn_evp_1d_copyin(nx, ny, nblk, nx_glob, ny_glob, & call gather_global_ext(G_strength, I_strength, master_task, distrb_info ) call gather_global_ext(G_uvel, I_uvel, master_task, distrb_info, c0) call gather_global_ext(G_vvel, I_vvel, master_task, distrb_info, c0) - call gather_global_ext(G_dxt, I_dxt, master_task, distrb_info ) - call gather_global_ext(G_dyt, I_dyt, master_task, distrb_info ) + call gather_global_ext(G_dxT, I_dxT, master_task, distrb_info ) + call gather_global_ext(G_dyT, I_dyT, master_task, distrb_info ) call gather_global_ext(G_stressp_1, I_stressp_1, master_task, distrb_info ) call gather_global_ext(G_stressp_2, I_stressp_2, master_task, distrb_info ) call gather_global_ext(G_stressp_3, I_stressp_3, master_task, distrb_info ) @@ -1117,7 +1117,7 @@ subroutine ice_dyn_evp_1d_copyin(nx, ny, nblk, nx_glob, ny_glob, & G_HTE, G_HTN, G_cdn_ocn, G_aiu, G_uocn, G_vocn, G_forcex, & G_forcey, G_Tbu, G_umassdti, G_fm, G_uarear, G_tarear, & G_strintx, G_strinty, G_uvel_init, G_vvel_init, & - G_strength, G_uvel, G_vvel, G_dxt, G_dyt, G_stressp_1, & + G_strength, G_uvel, G_vvel, G_dxT, G_dyT, G_stressp_1, & G_stressp_2, G_stressp_3, G_stressp_4, G_stressm_1, & G_stressm_2, G_stressm_3, G_stressm_4, G_stress12_1, & G_stress12_2, G_stress12_3, G_stress12_4) @@ -1298,7 +1298,7 @@ subroutine ice_dyn_evp_1d_kernel !$XXXOMP PARALLEL PRIVATE(ksub) do ksub = 1, ndte - 1 call evp1d_stress(NA_len, ee, ne, se, 1, NA_len, uvel, & - vvel, dxt, dyt, hte, htn, htem1, htnm1, strength, & + vvel, dxT, dyT, hte, htn, htem1, htnm1, strength, & stressp_1, stressp_2, stressp_3, stressp_4, stressm_1, & stressm_2, stressm_3, stressm_4, stress12_1, & stress12_2, stress12_3, stress12_4, str1, str2, str3, & @@ -1315,7 +1315,7 @@ subroutine ice_dyn_evp_1d_kernel end do call evp1d_stress(NA_len, ee, ne, se, 1, NA_len, uvel, vvel, & - dxt, dyt, hte, htn, htem1, htnm1, strength, stressp_1, & + dxT, dyT, hte, htn, htem1, htnm1, strength, stressp_1, & stressp_2, stressp_3, stressp_4, stressm_1, stressm_2, & stressm_3, stressm_4, stress12_1, stress12_2, stress12_3, & stress12_4, str1, str2, str3, str4, str5, str6, str7, & @@ -1460,8 +1460,8 @@ end subroutine calc_navel subroutine convert_2d_1d(nx, ny, na, navel, I_HTE, I_HTN, & I_cdn_ocn, I_aiu, I_uocn, I_vocn, I_forcex, I_forcey, I_Tbu, & I_umassdti, I_fm, I_uarear, I_tarear, I_strintx, I_strinty, & - I_uvel_init, I_vvel_init, I_strength, I_uvel, I_vvel, I_dxt, & - I_dyt, I_stressp_1, I_stressp_2, I_stressp_3, I_stressp_4, & + I_uvel_init, I_vvel_init, I_strength, I_uvel, I_vvel, I_dxT, & + I_dyT, I_stressp_1, I_stressp_2, I_stressp_3, I_stressp_4, & I_stressm_1, I_stressm_2, I_stressm_3, I_stressm_4, & I_stress12_1, I_stress12_2, I_stress12_3, I_stress12_4) @@ -1472,7 +1472,7 @@ subroutine convert_2d_1d(nx, ny, na, navel, I_HTE, I_HTN, & I_HTN, I_cdn_ocn, I_aiu, I_uocn, I_vocn, I_forcex, I_forcey, & I_Tbu, I_umassdti, I_fm, I_uarear, I_tarear, I_strintx, & I_strinty, I_uvel_init, I_vvel_init, I_strength, I_uvel, & - I_vvel, I_dxt, I_dyt, I_stressp_1, I_stressp_2, I_stressp_3, & + I_vvel, I_dxT, I_dyT, I_stressp_1, I_stressp_2, I_stressp_3, & I_stressp_4, I_stressm_1, I_stressm_2, I_stressm_3, & I_stressm_4, I_stress12_1, I_stress12_2, I_stress12_3, & I_stress12_4 @@ -1554,8 +1554,8 @@ subroutine convert_2d_1d(nx, ny, na, navel, I_HTE, I_HTN, & uvel_init(iw) = I_uvel_init(i, j) vvel_init(iw) = I_vvel_init(i, j) strength(iw) = I_strength(i, j) - dxt(iw) = I_dxt(i, j) - dyt(iw) = I_dyt(i, j) + dxT(iw) = I_dxT(i, j) + dyT(iw) = I_dyT(i, j) stressp_1(iw) = I_stressp_1(i, j) stressp_2(iw) = I_stressp_2(i, j) stressp_3(iw) = I_stressp_3(i, j) @@ -1875,8 +1875,8 @@ subroutine numainit(l, u, uu) vvel_init(lo:up) = c0 uocn(lo:up) = c0 vocn(lo:up) = c0 - dxt(lo:up) = c0 - dyt(lo:up) = c0 + dxT(lo:up) = c0 + dyT(lo:up) = c0 HTE(lo:up) = c0 HTN(lo:up) = c0 HTEm1(lo:up) = c0 diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index bc7f3abb1..722022fad 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -38,7 +38,7 @@ module ice_dyn_shared integer (kind=int_kind), public :: & kdyn , & ! type of dynamics ( -1, 0 = off, 1 = evp, 2 = eap ) kridge , & ! set to "-1" to turn off ridging - ndte ! number of subcycles: ndte=dt/dte + ndte ! number of subcycles character (len=char_len), public :: & coriolis , & ! 'constant', 'zero', or 'latitude' @@ -79,8 +79,7 @@ module ice_dyn_shared deltaminEVP , & ! minimum delta for viscosities (EVP) deltaminVP , & ! minimum delta for viscosities (VP) capping , & ! capping of viscosities (1=Hibler79, 0=Kreyscher2000) - dtei , & ! 1/dte, where dte is subcycling timestep (1/s) -! dte2T , & ! dte/2T + dtei , & ! ndte/dt, where dt/ndte is subcycling timestep (1/s) denom1 ! constants for stress equation real (kind=dbl_kind), public :: & ! Bouillon et al relaxation constants @@ -223,7 +222,7 @@ subroutine init_dyn (dt) if (my_task == master_task) then write(nu_diag,*) 'dt = ',dt - write(nu_diag,*) 'dte = ',dt/real(ndte,kind=dbl_kind) + write(nu_diag,*) 'dt_subcyle = ',dt/real(ndte,kind=dbl_kind) write(nu_diag,*) 'tdamp =', elasticDamp * dt write(nu_diag,*) 'halo_dynbundle =', halo_dynbundle endif @@ -335,15 +334,9 @@ subroutine set_evp_parameters (dt) ! local variables - !real (kind=dbl_kind) :: & - !dte , & ! subcycling timestep for EVP dynamics, s - !tdamp2 ! 2*(wave damping time scale T) - character(len=*), parameter :: subname = '(set_evp_parameters)' ! elastic time step - !dte = dt/real(ndte,kind=dbl_kind) ! s - !dtei = c1/dte ! 1/s dtei = real(ndte,kind=dbl_kind)/dt ! variables for elliptical yield curve and plastic potential @@ -351,19 +344,12 @@ subroutine set_evp_parameters (dt) e_factor = e_yieldcurve**2 / e_plasticpot**4 ecci = c1/e_yieldcurve**2 ! temporary for 1d evp - ! constants for stress equation - !tdamp2 = c2 * elasticDamp * dt ! s - !dte2T = dte/tdamp2 or c1/(c2*elasticDamp*real(ndte,kind=dbl_kind)) ! ellipse (unitless) - if (revised_evp) then ! Bouillon et al, Ocean Mod 2013 revp = c1 denom1 = c1 arlx1i = c1/arlx else ! Hunke, JCP 2013 with modified stress eq revp = c0 - !arlx1i = dte2T - !arlx = c1/arlx1i - !brlx = dt*dtei arlx = c2 * elasticDamp * real(ndte,kind=dbl_kind) arlx1i = c1/arlx brlx = real(ndte,kind=dbl_kind) @@ -646,13 +632,9 @@ subroutine dyn_prep2 (nx_block, ny_block, & do j = jlo, jhi do i = ilo, ihi iceumask_old(i,j) = iceumask(i,j) ! save -! if (grid_ice == 'B') then ! include ice mask. ! ice extent mask (U-cells) iceumask(i,j) = (umask(i,j)) .and. (aiu (i,j) > a_min) & - .and. (umass(i,j) > m_min) -! else ! ice mask shpuld be applied to cd grid. For now it is not implemented. -! iceumask(i,j) = umask(i,j) -! endif + .and. (umass(i,j) > m_min) if (iceumask(i,j)) then icellu = icellu + 1 @@ -1639,7 +1621,7 @@ subroutine deformations (nx_block, ny_block, & icellt, & indxti, indxtj, & uvel, vvel, & - dxt, dyt, & + dxT, dyT, & cxp, cyp, & cxm, cym, & tarear, & @@ -1659,8 +1641,8 @@ subroutine deformations (nx_block, ny_block, & real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & uvel , & ! x-component of velocity (m/s) vvel , & ! y-component of velocity (m/s) - dxt , & ! width of T-cell through the middle (m) - dyt , & ! height of T-cell through the middle (m) + dxT , & ! width of T-cell through the middle (m) + dyT , & ! height of T-cell through the middle (m) cyp , & ! 1.5*HTE - 0.5*HTW cxp , & ! 1.5*HTN - 0.5*HTS cym , & ! 0.5*HTE - 1.5*HTW @@ -1698,7 +1680,7 @@ subroutine deformations (nx_block, ny_block, & call strain_rates (nx_block, ny_block, & i, j, & uvel, vvel, & - dxt, dyt, & + dxT, dyT, & cxp, cyp, & cxm, cym, & divune, divunw, & @@ -1943,7 +1925,7 @@ end subroutine deformationsC_T subroutine strain_rates (nx_block, ny_block, & i, j, & uvel, vvel, & - dxt, dyt, & + dxT, dyT, & cxp, cyp, & cxm, cym, & divune, divunw, & @@ -1964,8 +1946,8 @@ subroutine strain_rates (nx_block, ny_block, & real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & uvel , & ! x-component of velocity (m/s) vvel , & ! y-component of velocity (m/s) - dxt , & ! width of T-cell through the middle (m) - dyt , & ! height of T-cell through the middle (m) + dxT , & ! width of T-cell through the middle (m) + dyT , & ! height of T-cell through the middle (m) cyp , & ! 1.5*HTE - 0.5*HTW cxp , & ! 1.5*HTN - 0.5*HTS cym , & ! 0.5*HTE - 1.5*HTW @@ -1985,34 +1967,34 @@ subroutine strain_rates (nx_block, ny_block, & !----------------------------------------------------------------- ! divergence = e_11 + e_22 - divune = cyp(i,j)*uvel(i ,j ) - dyt(i,j)*uvel(i-1,j ) & - + cxp(i,j)*vvel(i ,j ) - dxt(i,j)*vvel(i ,j-1) - divunw = cym(i,j)*uvel(i-1,j ) + dyt(i,j)*uvel(i ,j ) & - + cxp(i,j)*vvel(i-1,j ) - dxt(i,j)*vvel(i-1,j-1) - divusw = cym(i,j)*uvel(i-1,j-1) + dyt(i,j)*uvel(i ,j-1) & - + cxm(i,j)*vvel(i-1,j-1) + dxt(i,j)*vvel(i-1,j ) - divuse = cyp(i,j)*uvel(i ,j-1) - dyt(i,j)*uvel(i-1,j-1) & - + cxm(i,j)*vvel(i ,j-1) + dxt(i,j)*vvel(i ,j ) + divune = cyp(i,j)*uvel(i ,j ) - dyT(i,j)*uvel(i-1,j ) & + + cxp(i,j)*vvel(i ,j ) - dxT(i,j)*vvel(i ,j-1) + divunw = cym(i,j)*uvel(i-1,j ) + dyT(i,j)*uvel(i ,j ) & + + cxp(i,j)*vvel(i-1,j ) - dxT(i,j)*vvel(i-1,j-1) + divusw = cym(i,j)*uvel(i-1,j-1) + dyT(i,j)*uvel(i ,j-1) & + + cxm(i,j)*vvel(i-1,j-1) + dxT(i,j)*vvel(i-1,j ) + divuse = cyp(i,j)*uvel(i ,j-1) - dyT(i,j)*uvel(i-1,j-1) & + + cxm(i,j)*vvel(i ,j-1) + dxT(i,j)*vvel(i ,j ) ! tension strain rate = e_11 - e_22 - tensionne = -cym(i,j)*uvel(i ,j ) - dyt(i,j)*uvel(i-1,j ) & - + cxm(i,j)*vvel(i ,j ) + dxt(i,j)*vvel(i ,j-1) - tensionnw = -cyp(i,j)*uvel(i-1,j ) + dyt(i,j)*uvel(i ,j ) & - + cxm(i,j)*vvel(i-1,j ) + dxt(i,j)*vvel(i-1,j-1) - tensionsw = -cyp(i,j)*uvel(i-1,j-1) + dyt(i,j)*uvel(i ,j-1) & - + cxp(i,j)*vvel(i-1,j-1) - dxt(i,j)*vvel(i-1,j ) - tensionse = -cym(i,j)*uvel(i ,j-1) - dyt(i,j)*uvel(i-1,j-1) & - + cxp(i,j)*vvel(i ,j-1) - dxt(i,j)*vvel(i ,j ) + tensionne = -cym(i,j)*uvel(i ,j ) - dyT(i,j)*uvel(i-1,j ) & + + cxm(i,j)*vvel(i ,j ) + dxT(i,j)*vvel(i ,j-1) + tensionnw = -cyp(i,j)*uvel(i-1,j ) + dyT(i,j)*uvel(i ,j ) & + + cxm(i,j)*vvel(i-1,j ) + dxT(i,j)*vvel(i-1,j-1) + tensionsw = -cyp(i,j)*uvel(i-1,j-1) + dyT(i,j)*uvel(i ,j-1) & + + cxp(i,j)*vvel(i-1,j-1) - dxT(i,j)*vvel(i-1,j ) + tensionse = -cym(i,j)*uvel(i ,j-1) - dyT(i,j)*uvel(i-1,j-1) & + + cxp(i,j)*vvel(i ,j-1) - dxT(i,j)*vvel(i ,j ) ! shearing strain rate = 2*e_12 - shearne = -cym(i,j)*vvel(i ,j ) - dyt(i,j)*vvel(i-1,j ) & - - cxm(i,j)*uvel(i ,j ) - dxt(i,j)*uvel(i ,j-1) - shearnw = -cyp(i,j)*vvel(i-1,j ) + dyt(i,j)*vvel(i ,j ) & - - cxm(i,j)*uvel(i-1,j ) - dxt(i,j)*uvel(i-1,j-1) - shearsw = -cyp(i,j)*vvel(i-1,j-1) + dyt(i,j)*vvel(i ,j-1) & - - cxp(i,j)*uvel(i-1,j-1) + dxt(i,j)*uvel(i-1,j ) - shearse = -cym(i,j)*vvel(i ,j-1) - dyt(i,j)*vvel(i-1,j-1) & - - cxp(i,j)*uvel(i ,j-1) + dxt(i,j)*uvel(i ,j ) + shearne = -cym(i,j)*vvel(i ,j ) - dyT(i,j)*vvel(i-1,j ) & + - cxm(i,j)*uvel(i ,j ) - dxT(i,j)*uvel(i ,j-1) + shearnw = -cyp(i,j)*vvel(i-1,j ) + dyT(i,j)*vvel(i ,j ) & + - cxm(i,j)*uvel(i-1,j ) - dxT(i,j)*uvel(i-1,j-1) + shearsw = -cyp(i,j)*vvel(i-1,j-1) + dyT(i,j)*vvel(i ,j-1) & + - cxp(i,j)*uvel(i-1,j-1) + dxT(i,j)*uvel(i-1,j ) + shearse = -cym(i,j)*vvel(i ,j-1) - dyT(i,j)*vvel(i-1,j-1) & + - cxp(i,j)*uvel(i ,j-1) + dxT(i,j)*uvel(i ,j ) ! Delta (in the denominator of zeta, eta) Deltane = sqrt(divune**2 + e_factor*(tensionne**2 + shearne**2)) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 index 4796669e9..36a273c01 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 @@ -52,7 +52,7 @@ module ice_dyn_vp use ice_fileunits, only: nu_diag use ice_flux, only: fm use ice_global_reductions, only: global_sum, global_allreduce_sum - use ice_grid, only: dxt, dyt, dxhy, dyhx, cxp, cyp, cxm, cym, uarear + use ice_grid, only: dxT, dyT, dxhy, dyhx, cxp, cyp, cxm, cym, uarear use ice_exit, only: abort_ice use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted use icepack_intfc, only: icepack_ice_strength, icepack_query_parameters @@ -174,7 +174,7 @@ subroutine implicit_solver (dt) stressp_1, stressp_2, stressp_3, stressp_4, & stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4 - use ice_grid, only: tmask, umask, dxt, dyt, cxp, cyp, cxm, cym, & + use ice_grid, only: tmask, umask, dxT, dyT, cxp, cyp, cxm, cym, & tarear, grid_type, grid_average_X2Y, & grid_atm_dynu, grid_atm_dynv, grid_ocn_dynu, grid_ocn_dynv use ice_state, only: aice, vice, vsno, uvel, vvel, divu, shear, & @@ -497,7 +497,7 @@ subroutine implicit_solver (dt) icellt(iblk) , & indxti (:,iblk), indxtj (:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & - dxt (:,:,iblk), dyt (:,:,iblk), & + dxT (:,:,iblk), dyT (:,:,iblk), & cxp (:,:,iblk), cyp (:,:,iblk), & cxm (:,:,iblk), cym (:,:,iblk), & zetax2 (:,:,iblk,:), etax2 (:,:,iblk,:), & @@ -520,7 +520,7 @@ subroutine implicit_solver (dt) icellt(iblk) , & indxti (:,iblk), indxtj (:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & - dxt (:,:,iblk), dyt (:,:,iblk), & + dxT (:,:,iblk), dyT (:,:,iblk), & cxp (:,:,iblk), cyp (:,:,iblk), & cxm (:,:,iblk), cym (:,:,iblk), & tarear (:,:,iblk), & @@ -699,7 +699,7 @@ subroutine anderson_solver (icellt , icellu, & use ice_domain, only: maskhalo_dyn, halo_info use ice_domain_size, only: max_blocks use ice_flux, only: fm, Tbu - use ice_grid, only: dxt, dyt, dxhy, dyhx, cxp, cyp, cxm, cym, & + use ice_grid, only: dxT, dyT, dxhy, dyhx, cxp, cyp, cxm, cym, & uarear use ice_dyn_shared, only: DminTarea use ice_state, only: uvel, vvel, strength @@ -847,7 +847,7 @@ subroutine anderson_solver (icellt , icellu, & icellt (iblk), & indxti (:,iblk), indxtj (:,iblk), & uprev_k (:,:,iblk), vprev_k (:,:,iblk), & - dxt (:,:,iblk), dyt (:,:,iblk), & + dxT (:,:,iblk), dyT (:,:,iblk), & dxhy (:,:,iblk), dyhx (:,:,iblk), & cxp (:,:,iblk), cyp (:,:,iblk), & cxm (:,:,iblk), cym (:,:,iblk), & @@ -878,7 +878,7 @@ subroutine anderson_solver (icellt , icellu, & icellu (iblk) , icellt (iblk), & indxui (:,iblk) , indxuj (:,iblk), & indxti (:,iblk) , indxtj (:,iblk), & - dxt (:,:,iblk) , dyt (:,:,iblk), & + dxT (:,:,iblk) , dyT (:,:,iblk), & dxhy (:,:,iblk) , dyhx (:,:,iblk), & cxp (:,:,iblk) , cyp (:,:,iblk), & cxm (:,:,iblk) , cym (:,:,iblk), & @@ -934,7 +934,7 @@ subroutine anderson_solver (icellt , icellu, & call formDiag_step1 (nx_block , ny_block , & icellu (iblk) , & indxui (:,iblk) , indxuj(:,iblk), & - dxt (:,:,iblk) , dyt (:,:,iblk), & + dxT (:,:,iblk) , dyT (:,:,iblk), & dxhy (:,:,iblk) , dyhx(:,:,iblk), & cxp (:,:,iblk) , cyp (:,:,iblk), & cxm (:,:,iblk) , cym (:,:,iblk), & @@ -1149,7 +1149,7 @@ subroutine calc_zeta_dPr (nx_block, ny_block, & icellt , & indxti , indxtj , & uvel , vvel , & - dxt , dyt , & + dxT , dyT , & dxhy , dyhx , & cxp , cyp , & cxm , cym , & @@ -1172,8 +1172,8 @@ subroutine calc_zeta_dPr (nx_block, ny_block, & strength , & ! ice strength (N/m) uvel , & ! x-component of velocity (m/s) vvel , & ! y-component of velocity (m/s) - dxt , & ! width of T-cell through the middle (m) - dyt , & ! height of T-cell through the middle (m) + dxT , & ! width of T-cell through the middle (m) + dyT , & ! height of T-cell through the middle (m) dxhy , & ! 0.5*(HTE - HTW) dyhx , & ! 0.5*(HTN - HTS) cyp , & ! 1.5*HTE - 0.5*HTW @@ -1224,7 +1224,7 @@ subroutine calc_zeta_dPr (nx_block, ny_block, & call strain_rates (nx_block , ny_block , & i , j , & uvel , vvel , & - dxt , dyt , & + dxT , dyT , & cxp , cyp , & cxm , cym , & divune , divunw , & @@ -1286,7 +1286,7 @@ subroutine calc_zeta_dPr (nx_block, ny_block, & !----------------------------------------------------------------- ! for dF/dx (u momentum) !----------------------------------------------------------------- - strp_tmp = p25*dyt(i,j)*(p333*ssigpn + p166*ssigps) + strp_tmp = p25*dyT(i,j)*(p333*ssigpn + p166*ssigps) ! northeast (i,j) stPr(i,j,1) = -strp_tmp & @@ -1296,7 +1296,7 @@ subroutine calc_zeta_dPr (nx_block, ny_block, & stPr(i,j,2) = strp_tmp & + dxhy(i,j)*(-csigpnw) - strp_tmp = p25*dyt(i,j)*(p333*ssigps + p166*ssigpn) + strp_tmp = p25*dyT(i,j)*(p333*ssigps + p166*ssigpn) ! southeast (i,j+1) stPr(i,j,3) = -strp_tmp & @@ -1309,7 +1309,7 @@ subroutine calc_zeta_dPr (nx_block, ny_block, & !----------------------------------------------------------------- ! for dF/dy (v momentum) !----------------------------------------------------------------- - strp_tmp = p25*dxt(i,j)*(p333*ssigpe + p166*ssigpw) + strp_tmp = p25*dxT(i,j)*(p333*ssigpe + p166*ssigpw) ! northeast (i,j) stPr(i,j,5) = -strp_tmp & @@ -1319,7 +1319,7 @@ subroutine calc_zeta_dPr (nx_block, ny_block, & stPr(i,j,6) = strp_tmp & - dyhx(i,j)*(csigpse) - strp_tmp = p25*dxt(i,j)*(p333*ssigpw + p166*ssigpe) + strp_tmp = p25*dxT(i,j)*(p333*ssigpw + p166*ssigpe) ! northwest (i+1,j) stPr(i,j,7) = -strp_tmp & @@ -1344,7 +1344,7 @@ subroutine stress_vp (nx_block , ny_block , & icellt , & indxti , indxtj , & uvel , vvel , & - dxt , dyt , & + dxT , dyT , & cxp , cyp , & cxm , cym , & zetax2 , etax2 , & @@ -1369,8 +1369,8 @@ subroutine stress_vp (nx_block , ny_block , & real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & uvel , & ! x-component of velocity (m/s) vvel , & ! y-component of velocity (m/s) - dxt , & ! width of T-cell through the middle (m) - dyt , & ! height of T-cell through the middle (m) + dxT , & ! width of T-cell through the middle (m) + dyT , & ! height of T-cell through the middle (m) cyp , & ! 1.5*HTE - 0.5*HTW cxp , & ! 1.5*HTN - 0.5*HTS cym , & ! 0.5*HTE - 1.5*HTW @@ -1410,7 +1410,7 @@ subroutine stress_vp (nx_block , ny_block , & call strain_rates (nx_block , ny_block , & i , j , & uvel , vvel , & - dxt , dyt , & + dxT , dyT , & cxp , cyp , & cxm , cym , & divune , divunw , & @@ -1566,7 +1566,7 @@ subroutine matvec (nx_block, ny_block, & icellu , icellt , & indxui , indxuj , & indxti , indxtj , & - dxt , dyt , & + dxT , dyT , & dxhy , dyhx , & cxp , cyp , & cxm , cym , & @@ -1591,8 +1591,8 @@ subroutine matvec (nx_block, ny_block, & indxtj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - dxt , & ! width of T-cell through the middle (m) - dyt , & ! height of T-cell through the middle (m) + dxT , & ! width of T-cell through the middle (m) + dyT , & ! height of T-cell through the middle (m) dxhy , & ! 0.5*(HTE - HTW) dyhx , & ! 0.5*(HTN - HTS) cyp , & ! 1.5*HTE - 0.5*HTW @@ -1668,7 +1668,7 @@ subroutine matvec (nx_block, ny_block, & call strain_rates (nx_block , ny_block , & i , j , & uvel , vvel , & - dxt , dyt , & + dxT , dyT , & cxp , cyp , & cxm , cym , & divune , divunw , & @@ -1745,16 +1745,16 @@ subroutine matvec (nx_block, ny_block, & csig12se = p222*stress12_4 + ssig121 & + p055*stress12_2 - str12ew = p5*dxt(i,j)*(p333*ssig12e + p166*ssig12w) - str12we = p5*dxt(i,j)*(p333*ssig12w + p166*ssig12e) - str12ns = p5*dyt(i,j)*(p333*ssig12n + p166*ssig12s) - str12sn = p5*dyt(i,j)*(p333*ssig12s + p166*ssig12n) + str12ew = p5*dxT(i,j)*(p333*ssig12e + p166*ssig12w) + str12we = p5*dxT(i,j)*(p333*ssig12w + p166*ssig12e) + str12ns = p5*dyT(i,j)*(p333*ssig12n + p166*ssig12s) + str12sn = p5*dyT(i,j)*(p333*ssig12s + p166*ssig12n) !----------------------------------------------------------------- ! for dF/dx (u momentum) !----------------------------------------------------------------- - strp_tmp = p25*dyt(i,j)*(p333*ssigpn + p166*ssigps) - strm_tmp = p25*dyt(i,j)*(p333*ssigmn + p166*ssigms) + strp_tmp = p25*dyT(i,j)*(p333*ssigpn + p166*ssigps) + strm_tmp = p25*dyT(i,j)*(p333*ssigmn + p166*ssigms) ! northeast (i,j) str(i,j,1) = -strp_tmp - strm_tmp - str12ew & @@ -1764,8 +1764,8 @@ subroutine matvec (nx_block, ny_block, & str(i,j,2) = strp_tmp + strm_tmp - str12we & + dxhy(i,j)*(-csigpnw + csigmnw) + dyhx(i,j)*csig12nw - strp_tmp = p25*dyt(i,j)*(p333*ssigps + p166*ssigpn) - strm_tmp = p25*dyt(i,j)*(p333*ssigms + p166*ssigmn) + strp_tmp = p25*dyT(i,j)*(p333*ssigps + p166*ssigpn) + strm_tmp = p25*dyT(i,j)*(p333*ssigms + p166*ssigmn) ! southeast (i,j+1) str(i,j,3) = -strp_tmp - strm_tmp + str12ew & @@ -1778,8 +1778,8 @@ subroutine matvec (nx_block, ny_block, & !----------------------------------------------------------------- ! for dF/dy (v momentum) !----------------------------------------------------------------- - strp_tmp = p25*dxt(i,j)*(p333*ssigpe + p166*ssigpw) - strm_tmp = p25*dxt(i,j)*(p333*ssigme + p166*ssigmw) + strp_tmp = p25*dxT(i,j)*(p333*ssigpe + p166*ssigpw) + strm_tmp = p25*dxT(i,j)*(p333*ssigme + p166*ssigmw) ! northeast (i,j) str(i,j,5) = -strp_tmp + strm_tmp - str12ns & @@ -1789,8 +1789,8 @@ subroutine matvec (nx_block, ny_block, & str(i,j,6) = strp_tmp - strm_tmp - str12sn & - dyhx(i,j)*(csigpse + csigmse) + dxhy(i,j)*csig12se - strp_tmp = p25*dxt(i,j)*(p333*ssigpw + p166*ssigpe) - strm_tmp = p25*dxt(i,j)*(p333*ssigmw + p166*ssigme) + strp_tmp = p25*dxT(i,j)*(p333*ssigpw + p166*ssigpe) + strm_tmp = p25*dxT(i,j)*(p333*ssigmw + p166*ssigme) ! northwest (i+1,j) str(i,j,7) = -strp_tmp + strm_tmp + str12ns & @@ -2026,7 +2026,7 @@ end subroutine residual_vec subroutine formDiag_step1 (nx_block, ny_block, & icellu , & indxui , indxuj , & - dxt , dyt , & + dxT , dyT , & dxhy , dyhx , & cxp , cyp , & cxm , cym , & @@ -2042,8 +2042,8 @@ subroutine formDiag_step1 (nx_block, ny_block, & indxuj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - dxt , & ! width of T-cell through the middle (m) - dyt , & ! height of T-cell through the middle (m) + dxT , & ! width of T-cell through the middle (m) + dyT , & ! height of T-cell through the middle (m) dxhy , & ! 0.5*(HTE - HTW) dyhx , & ! 0.5*(HTN - HTS) cyp , & ! 1.5*HTE - 0.5*HTW @@ -2207,34 +2207,34 @@ subroutine formDiag_step1 (nx_block, ny_block, & ! NOTE these are actually strain rates * area (m^2/s) !----------------------------------------------------------------- ! divergence = e_11 + e_22 - divune = cyp(i,j)*uij - dyt(i,j)*ui1j & - + cxp(i,j)*vij - dxt(i,j)*vij1 - divunw = cym(i,j)*ui1j + dyt(i,j)*uij & - + cxp(i,j)*vi1j - dxt(i,j)*vi1j1 - divusw = cym(i,j)*ui1j1 + dyt(i,j)*uij1 & - + cxm(i,j)*vi1j1 + dxt(i,j)*vi1j - divuse = cyp(i,j)*uij1 - dyt(i,j)*ui1j1 & - + cxm(i,j)*vij1 + dxt(i,j)*vij + divune = cyp(i,j)*uij - dyT(i,j)*ui1j & + + cxp(i,j)*vij - dxT(i,j)*vij1 + divunw = cym(i,j)*ui1j + dyT(i,j)*uij & + + cxp(i,j)*vi1j - dxT(i,j)*vi1j1 + divusw = cym(i,j)*ui1j1 + dyT(i,j)*uij1 & + + cxm(i,j)*vi1j1 + dxT(i,j)*vi1j + divuse = cyp(i,j)*uij1 - dyT(i,j)*ui1j1 & + + cxm(i,j)*vij1 + dxT(i,j)*vij ! tension strain rate = e_11 - e_22 - tensionne = -cym(i,j)*uij - dyt(i,j)*ui1j & - + cxm(i,j)*vij + dxt(i,j)*vij1 - tensionnw = -cyp(i,j)*ui1j + dyt(i,j)*uij & - + cxm(i,j)*vi1j + dxt(i,j)*vi1j1 - tensionsw = -cyp(i,j)*ui1j1 + dyt(i,j)*uij1 & - + cxp(i,j)*vi1j1 - dxt(i,j)*vi1j - tensionse = -cym(i,j)*uij1 - dyt(i,j)*ui1j1 & - + cxp(i,j)*vij1 - dxt(i,j)*vij + tensionne = -cym(i,j)*uij - dyT(i,j)*ui1j & + + cxm(i,j)*vij + dxT(i,j)*vij1 + tensionnw = -cyp(i,j)*ui1j + dyT(i,j)*uij & + + cxm(i,j)*vi1j + dxT(i,j)*vi1j1 + tensionsw = -cyp(i,j)*ui1j1 + dyT(i,j)*uij1 & + + cxp(i,j)*vi1j1 - dxT(i,j)*vi1j + tensionse = -cym(i,j)*uij1 - dyT(i,j)*ui1j1 & + + cxp(i,j)*vij1 - dxT(i,j)*vij ! shearing strain rate = 2*e_12 - shearne = -cym(i,j)*vij - dyt(i,j)*vi1j & - - cxm(i,j)*uij - dxt(i,j)*uij1 - shearnw = -cyp(i,j)*vi1j + dyt(i,j)*vij & - - cxm(i,j)*ui1j - dxt(i,j)*ui1j1 - shearsw = -cyp(i,j)*vi1j1 + dyt(i,j)*vij1 & - - cxp(i,j)*ui1j1 + dxt(i,j)*ui1j - shearse = -cym(i,j)*vij1 - dyt(i,j)*vi1j1 & - - cxp(i,j)*uij1 + dxt(i,j)*uij + shearne = -cym(i,j)*vij - dyT(i,j)*vi1j & + - cxm(i,j)*uij - dxT(i,j)*uij1 + shearnw = -cyp(i,j)*vi1j + dyT(i,j)*vij & + - cxm(i,j)*ui1j - dxT(i,j)*ui1j1 + shearsw = -cyp(i,j)*vi1j1 + dyT(i,j)*vij1 & + - cxp(i,j)*ui1j1 + dxT(i,j)*ui1j + shearse = -cym(i,j)*vij1 - dyT(i,j)*vi1j1 & + - cxp(i,j)*uij1 + dxT(i,j)*uij !----------------------------------------------------------------- ! the stresses ! kg/s^2 @@ -2300,10 +2300,10 @@ subroutine formDiag_step1 (nx_block, ny_block, & csig12se = p222*stress12_4 + ssig121 & + p055*stress12_2 - str12ew = p5*dxt(i,j)*(p333*ssig12e + p166*ssig12w) - str12we = p5*dxt(i,j)*(p333*ssig12w + p166*ssig12e) - str12ns = p5*dyt(i,j)*(p333*ssig12n + p166*ssig12s) - str12sn = p5*dyt(i,j)*(p333*ssig12s + p166*ssig12n) + str12ew = p5*dxT(i,j)*(p333*ssig12e + p166*ssig12w) + str12we = p5*dxT(i,j)*(p333*ssig12w + p166*ssig12e) + str12ns = p5*dyT(i,j)*(p333*ssig12n + p166*ssig12s) + str12sn = p5*dyT(i,j)*(p333*ssig12s + p166*ssig12n) !----------------------------------------------------------------- ! for dF/dx (u momentum) @@ -2311,8 +2311,8 @@ subroutine formDiag_step1 (nx_block, ny_block, & if (cc == 1) then ! T cell i,j - strp_tmp = p25*dyt(i,j)*(p333*ssigpn + p166*ssigps) - strm_tmp = p25*dyt(i,j)*(p333*ssigmn + p166*ssigms) + strp_tmp = p25*dyT(i,j)*(p333*ssigpn + p166*ssigps) + strm_tmp = p25*dyT(i,j)*(p333*ssigmn + p166*ssigms) ! northeast (i,j) Drheo(iu,ju,1) = -strp_tmp - strm_tmp - str12ew & @@ -2320,8 +2320,8 @@ subroutine formDiag_step1 (nx_block, ny_block, & elseif (cc == 2) then ! T cell i+1,j - strp_tmp = p25*dyt(i,j)*(p333*ssigpn + p166*ssigps) - strm_tmp = p25*dyt(i,j)*(p333*ssigmn + p166*ssigms) + strp_tmp = p25*dyT(i,j)*(p333*ssigpn + p166*ssigps) + strm_tmp = p25*dyT(i,j)*(p333*ssigmn + p166*ssigms) ! northwest (i+1,j) Drheo(iu,ju,2) = strp_tmp + strm_tmp - str12we & @@ -2329,8 +2329,8 @@ subroutine formDiag_step1 (nx_block, ny_block, & elseif (cc == 3) then ! T cell i,j+1 - strp_tmp = p25*dyt(i,j)*(p333*ssigps + p166*ssigpn) - strm_tmp = p25*dyt(i,j)*(p333*ssigms + p166*ssigmn) + strp_tmp = p25*dyT(i,j)*(p333*ssigps + p166*ssigpn) + strm_tmp = p25*dyT(i,j)*(p333*ssigms + p166*ssigmn) ! southeast (i,j+1) Drheo(iu,ju,3) = -strp_tmp - strm_tmp + str12ew & @@ -2338,8 +2338,8 @@ subroutine formDiag_step1 (nx_block, ny_block, & elseif (cc == 4) then ! T cell i+1,j+1 - strp_tmp = p25*dyt(i,j)*(p333*ssigps + p166*ssigpn) - strm_tmp = p25*dyt(i,j)*(p333*ssigms + p166*ssigmn) + strp_tmp = p25*dyT(i,j)*(p333*ssigps + p166*ssigpn) + strm_tmp = p25*dyT(i,j)*(p333*ssigms + p166*ssigmn) ! southwest (i+1,j+1) Drheo(iu,ju,4) = strp_tmp + strm_tmp + str12we & @@ -2351,8 +2351,8 @@ subroutine formDiag_step1 (nx_block, ny_block, & elseif (cc == 5) then ! T cell i,j - strp_tmp = p25*dxt(i,j)*(p333*ssigpe + p166*ssigpw) - strm_tmp = p25*dxt(i,j)*(p333*ssigme + p166*ssigmw) + strp_tmp = p25*dxT(i,j)*(p333*ssigpe + p166*ssigpw) + strm_tmp = p25*dxT(i,j)*(p333*ssigme + p166*ssigmw) ! northeast (i,j) Drheo(iu,ju,5) = -strp_tmp + strm_tmp - str12ns & @@ -2360,8 +2360,8 @@ subroutine formDiag_step1 (nx_block, ny_block, & elseif (cc == 6) then ! T cell i,j+1 - strp_tmp = p25*dxt(i,j)*(p333*ssigpe + p166*ssigpw) - strm_tmp = p25*dxt(i,j)*(p333*ssigme + p166*ssigmw) + strp_tmp = p25*dxT(i,j)*(p333*ssigpe + p166*ssigpw) + strm_tmp = p25*dxT(i,j)*(p333*ssigme + p166*ssigmw) ! southeast (i,j+1) Drheo(iu,ju,6) = strp_tmp - strm_tmp - str12sn & @@ -2369,8 +2369,8 @@ subroutine formDiag_step1 (nx_block, ny_block, & elseif (cc == 7) then ! T cell i,j+1 - strp_tmp = p25*dxt(i,j)*(p333*ssigpw + p166*ssigpe) - strm_tmp = p25*dxt(i,j)*(p333*ssigmw + p166*ssigme) + strp_tmp = p25*dxT(i,j)*(p333*ssigpw + p166*ssigpe) + strm_tmp = p25*dxT(i,j)*(p333*ssigmw + p166*ssigme) ! northwest (i+1,j) Drheo(iu,ju,7) = -strp_tmp + strm_tmp + str12ns & @@ -2378,8 +2378,8 @@ subroutine formDiag_step1 (nx_block, ny_block, & elseif (cc == 8) then ! T cell i+1,j+1 - strp_tmp = p25*dxt(i,j)*(p333*ssigpw + p166*ssigpe) - strm_tmp = p25*dxt(i,j)*(p333*ssigmw + p166*ssigme) + strp_tmp = p25*dxT(i,j)*(p333*ssigpw + p166*ssigpe) + strm_tmp = p25*dxT(i,j)*(p333*ssigmw + p166*ssigme) ! southwest (i+1,j+1) Drheo(iu,ju,8) = strp_tmp - strm_tmp + str12sn & @@ -2820,7 +2820,7 @@ subroutine fgmres (zetax2 , etax2 , & icellu (iblk) , icellt (iblk), & indxui (:,iblk) , indxuj (:,iblk), & indxti (:,iblk) , indxtj (:,iblk), & - dxt (:,:,iblk) , dyt (:,:,iblk), & + dxT (:,:,iblk) , dyT (:,:,iblk), & dxhy (:,:,iblk) , dyhx (:,:,iblk), & cxp (:,:,iblk) , cyp (:,:,iblk), & cxm (:,:,iblk) , cym (:,:,iblk), & @@ -2927,7 +2927,7 @@ subroutine fgmres (zetax2 , etax2 , & icellu (iblk) , icellt (iblk), & indxui (:,iblk) , indxuj (:,iblk), & indxti (:,iblk) , indxtj (:,iblk), & - dxt (:,:,iblk) , dyt (:,:,iblk), & + dxT (:,:,iblk) , dyT (:,:,iblk), & dxhy (:,:,iblk) , dyhx (:,:,iblk), & cxp (:,:,iblk) , cyp (:,:,iblk), & cxm (:,:,iblk) , cym (:,:,iblk), & @@ -3213,7 +3213,7 @@ subroutine pgmres (zetax2 , etax2 , & icellu (iblk) , icellt (iblk), & indxui (:,iblk) , indxuj (:,iblk), & indxti (:,iblk) , indxtj (:,iblk), & - dxt (:,:,iblk) , dyt (:,:,iblk), & + dxT (:,:,iblk) , dyT (:,:,iblk), & dxhy (:,:,iblk) , dyhx (:,:,iblk), & cxp (:,:,iblk) , cyp (:,:,iblk), & cxm (:,:,iblk) , cym (:,:,iblk), & @@ -3309,7 +3309,7 @@ subroutine pgmres (zetax2 , etax2 , & icellu (iblk) , icellt (iblk), & indxui (:,iblk) , indxuj (:,iblk), & indxti (:,iblk) , indxtj (:,iblk), & - dxt (:,:,iblk) , dyt (:,:,iblk), & + dxT (:,:,iblk) , dyT (:,:,iblk), & dxhy (:,:,iblk) , dyhx (:,:,iblk), & cxp (:,:,iblk) , cyp (:,:,iblk), & cxm (:,:,iblk) , cym (:,:,iblk), & diff --git a/cicecore/cicedynB/dynamics/ice_transport_driver.F90 b/cicecore/cicedynB/dynamics/ice_transport_driver.F90 index 9c9fa25d4..c3bf4cd15 100644 --- a/cicecore/cicedynB/dynamics/ice_transport_driver.F90 +++ b/cicecore/cicedynB/dynamics/ice_transport_driver.F90 @@ -302,7 +302,7 @@ subroutine transport_remap (dt) indxinc, indxjnc ! compressed i/j indices integer (kind=int_kind) :: & - ntrcr ! + ntrcr ! number of tracers type (block) :: & this_block ! block information for current block @@ -927,8 +927,8 @@ subroutine state_to_tracers (nx_block, ny_block, & real (kind=dbl_kind) :: & puny , & ! - rhos , & ! - Lfresh , & ! + rhos , & ! snow density (km/m^3) + Lfresh , & ! latent heat of melting fresh ice (J/kg) w1 ! work variable integer (kind=int_kind), dimension(nx_block*ny_block,0:ncat) :: & diff --git a/cicecore/cicedynB/dynamics/ice_transport_remap.F90 b/cicecore/cicedynB/dynamics/ice_transport_remap.F90 index 95ae33613..330816529 100644 --- a/cicecore/cicedynB/dynamics/ice_transport_remap.F90 +++ b/cicecore/cicedynB/dynamics/ice_transport_remap.F90 @@ -255,7 +255,7 @@ subroutine init_remap use ice_domain, only: nblocks use ice_grid, only: xav, yav, xxav, yyav -! dxt, dyt, xyav, & +! dxT, dyT, xyav, & ! xxxav, xxyav, xyyav, yyyav integer (kind=int_kind) :: & @@ -276,9 +276,9 @@ subroutine init_remap xav(i,j,iblk) = c0 yav(i,j,iblk) = c0 !!! These formulas would be used on a rectangular grid -!!! with dimensions (dxt, dyt): -!!! xxav(i,j,iblk) = dxt(i,j,iblk)**2 / c12 -!!! yyav(i,j,iblk) = dyt(i,j,iblk)**2 / c12 +!!! with dimensions (dxT, dyT): +!!! xxav(i,j,iblk) = dxT(i,j,iblk)**2 / c12 +!!! yyav(i,j,iblk) = dyT(i,j,iblk)**2 / c12 xxav(i,j,iblk) = c1/c12 yyav(i,j,iblk) = c1/c12 ! xyav(i,j,iblk) = c0 @@ -1203,7 +1203,7 @@ subroutine construct_fields (nx_block, ny_block, & ! limited gradient of mass field in each cell (except masked cells) ! Note: The gradient is computed in scaled coordinates with - ! dxt = dyt = hte = htn = 1. + ! dxT = dyT = hte = htn = 1. call limited_gradient (nx_block, ny_block, & ilo, ihi, jlo, jhi, & diff --git a/cicecore/cicedynB/general/ice_flux.F90 b/cicecore/cicedynB/general/ice_flux.F90 index f5289c922..312891f95 100644 --- a/cicecore/cicedynB/general/ice_flux.F90 +++ b/cicecore/cicedynB/general/ice_flux.F90 @@ -56,8 +56,8 @@ module ice_flux ! out to ocean T-cell (kg/m s^2) ! Note, CICE_IN_NEMO uses strocnx and strocny for coupling - strocnxT, & ! ice-ocean stress, x-direction at T points, mapped from strocnx, per ice fraction - strocnyT ! ice-ocean stress, y-direction at T points, mapped from strocny, per ice fraction + strocnxT, & ! ice-ocean stress, x-direction at T points, per ice fraction + strocnyT ! ice-ocean stress, y-direction at T points, per ice fraction ! diagnostic @@ -67,8 +67,8 @@ module ice_flux sigP , & ! internal ice pressure (N/m) taubx , & ! seabed stress (x) (N/m^2) tauby , & ! seabed stress (y) (N/m^2) - strairx , & ! stress on ice by air, x-direction at U points, mapped from strairxT - strairy , & ! stress on ice by air, y-direction at U points, mapped from strairyT + strairx , & ! stress on ice by air, x-direction at U points + strairy , & ! stress on ice by air, y-direction at U points strocnx , & ! ice-ocean stress, x-direction at U points, computed in dyn_finish strocny , & ! ice-ocean stress, y-direction at U points, computed in dyn_finish strtltx , & ! stress due to sea surface slope, x-direction @@ -77,8 +77,8 @@ module ice_flux strinty , & ! divergence of internal ice stress, y (N/m^2) taubxN , & ! seabed stress (x) at N points (N/m^2) taubyN , & ! seabed stress (y) at N points (N/m^2) - strairxN, & ! stress on ice by air, x-direction at N points, mapped from strairxT - strairyN, & ! stress on ice by air, y-direction at N points, mapped from strairyT + strairxN, & ! stress on ice by air, x-direction at N points + strairyN, & ! stress on ice by air, y-direction at N points strocnxN, & ! ice-ocean stress, x-direction at N points, computed in dyn_finish strocnyN, & ! ice-ocean stress, y-direction at N points, computed in dyn_finish strtltxN, & ! stress due to sea surface slope, x-direction at N points @@ -87,8 +87,8 @@ module ice_flux strintyN, & ! divergence of internal ice stress, y at N points (N/m^2) taubxE , & ! seabed stress (x) at E points (N/m^2) taubyE , & ! seabed stress (y) at E points (N/m^2) - strairxE, & ! stress on ice by air, x-direction at E points, mapped from strairxT - strairyE, & ! stress on ice by air, y-direction at E points, mapped from strairyT + strairxE, & ! stress on ice by air, x-direction at E points + strairyE, & ! stress on ice by air, y-direction at E points strocnxE, & ! ice-ocean stress, x-direction at E points, computed in dyn_finish strocnyE, & ! ice-ocean stress, y-direction at E points, computed in dyn_finish strtltxE, & ! stress due to sea surface slope, x-direction at E points @@ -364,8 +364,8 @@ module ice_flux !----------------------------------------------------------------- real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & - uatmT , & ! uatm mapped to T grid (m/s) - vatmT , & ! vatm mapped to T grid (m/s) + uatmT , & ! uatm on T grid (m/s) + vatmT , & ! vatm on T grid (m/s) rside , & ! fraction of ice that melts laterally fside , & ! lateral heat flux (W/m^2) fsw , & ! incoming shortwave radiation (W/m^2) @@ -539,8 +539,8 @@ subroutine alloc_flux fswthru_ai (nx_block,ny_block,max_blocks), & ! shortwave penetrating to ocean (W/m^2) fresh_da (nx_block,ny_block,max_blocks), & ! fresh water flux to ocean due to data assim (kg/m^2/s) fsalt_da (nx_block,ny_block,max_blocks), & ! salt flux to ocean due to data assimilation(kg/m^2/s) - uatmT (nx_block,ny_block,max_blocks), & ! uatm mapped to T grid - vatmT (nx_block,ny_block,max_blocks), & ! vatm mapped to T grid + uatmT (nx_block,ny_block,max_blocks), & ! uatm on T grid + vatmT (nx_block,ny_block,max_blocks), & ! vatm on T grid rside (nx_block,ny_block,max_blocks), & ! fraction of ice that melts laterally fside (nx_block,ny_block,max_blocks), & ! lateral melt rate (W/m^2) fsw (nx_block,ny_block,max_blocks), & ! incoming shortwave radiation (W/m^2) diff --git a/cicecore/cicedynB/general/ice_forcing.F90 b/cicecore/cicedynB/general/ice_forcing.F90 index 2d127ecb2..36dbfe88c 100755 --- a/cicecore/cicedynB/general/ice_forcing.F90 +++ b/cicecore/cicedynB/general/ice_forcing.F90 @@ -126,8 +126,8 @@ module ice_forcing ocn_data_type, & ! 'default', 'clim', 'ncar', 'oned', 'calm', 'box2001' ! 'hadgem_sst' or 'hadgem_sst_uvocn', 'uniform' ice_data_type, & ! 'latsst', 'box2001', 'boxslotcyl', etc - ice_data_conc, & ! 'p5','p8','p9','c1','parabolic' - ice_data_dist, & ! 'box2001','gauss', 'uniform' + ice_data_conc, & ! 'p5','p8','p9','c1','parabolic', 'box2001', etc + ice_data_dist, & ! 'box2001','gauss', 'uniform', etc precip_units ! 'mm_per_month', 'mm_per_sec', 'mks','m_per_sec' logical (kind=log_kind), public :: & @@ -5311,7 +5311,7 @@ end subroutine ocn_data_ispol_init ! subroutine box2001_data_atm -! wind and current fields as in Hunke, JCP 2001 +! wind fields as in Hunke, JCP 2001 ! these are defined at the u point ! authors: Elizabeth Hunke, LANL @@ -5354,17 +5354,6 @@ subroutine box2001_data_atm iglob = this_block%i_glob jglob = this_block%j_glob -!tcraig, move to box2001_data_ocn -! ! ocean current -! ! constant in time, could be initialized in ice_flux.F90 -! uocn(i,j,iblk) = p2*real(j-nghost, kind=dbl_kind) & -! / real(nx_global,kind=dbl_kind) - p1 -! vocn(i,j,iblk) = -p2*real(i-nghost, kind=dbl_kind) & -! / real(ny_global,kind=dbl_kind) + p1 -! -! uocn(i,j,iblk) = uocn(i,j,iblk) * uvm(i,j,iblk) -! vocn(i,j,iblk) = vocn(i,j,iblk) * uvm(i,j,iblk) - ! wind components uatm(i,j,iblk) = c5 + (sin(pi2*timesecs/period)-c3) & * sin(pi2*real(iglob(i), kind=dbl_kind) & @@ -5417,7 +5406,7 @@ end subroutine box2001_data_atm ! subroutine box2001_data_ocn -! wind and current fields as in Hunke, JCP 2001 +! current fields as in Hunke, JCP 2001 ! these are defined at the u point ! authors: Elizabeth Hunke, LANL @@ -5546,7 +5535,7 @@ end subroutine uniform_data_atm ! subroutine uniform_data_ocn(dir,spd) -! uniform wind fields in some direction +! uniform current fields in some direction use ice_domain, only: nblocks use ice_domain_size, only: max_blocks diff --git a/cicecore/cicedynB/general/ice_init.F90 b/cicecore/cicedynB/general/ice_init.F90 index 6276770bc..9b6bf673c 100644 --- a/cicecore/cicedynB/general/ice_init.F90 +++ b/cicecore/cicedynB/general/ice_init.F90 @@ -148,10 +148,11 @@ subroutine input_data kitd, kcatbound, ktransport character (len=char_len) :: shortwave, albedo_type, conduct, fbot_xfer_type, & - tfrz_option, frzpnd, atmbndy, wave_spec_type, snwredist, snw_aging_table + tfrz_option, frzpnd, atmbndy, wave_spec_type, snwredist, snw_aging_table, & + capping_method logical (kind=log_kind) :: calc_Tsfc, formdrag, highfreq, calc_strair, wave_spec, & - sw_redist, calc_dragio, use_smliq_pnd, snwgrain + sw_redist, calc_dragio, use_smliq_pnd, snwgrain logical (kind=log_kind) :: tr_iage, tr_FY, tr_lvl, tr_pond logical (kind=log_kind) :: tr_iso, tr_aero, tr_fsd, tr_snow @@ -229,7 +230,7 @@ subroutine input_data damping_andacc, start_andacc, fpfunc_andacc, use_mean_vrel, & ortho_type, seabed_stress, seabed_stress_method, & k1, k2, alphab, threshold_hw, & - deltaminEVP, deltaminVP, capping, & + deltaminEVP, deltaminVP, capping_method, & Cf, Pstar, Cstar, Ktens namelist /shortwave_nml/ & @@ -326,22 +327,22 @@ subroutine input_data dumpfreq_n = 1 ! restart frequency dumpfreq_base = 'init' ! restart frequency reference date dump_last = .false. ! write restart on last time step - restart_dir = './' ! write to executable dir for default + restart_dir = './' ! write to executable dir for default restart_file = 'iced' ! restart file name prefix restart_ext = .false. ! if true, read/write ghost cells - restart_coszen = .false. ! if true, read/write coszen + restart_coszen = .false. ! if true, read/write coszen pointer_file = 'ice.restart_file' restart_format = 'default' ! restart file format - lcdf64 = .false. ! 64 bit offset for netCDF - ice_ic = 'default' ! latitude and sst-dependent - grid_format = 'bin' ! file format ('bin'=binary or 'nc'=netcdf) - grid_type = 'rectangular' ! define rectangular grid internally + lcdf64 = .false. ! 64 bit offset for netCDF + ice_ic = 'default' ! latitude and sst-dependent + grid_format = 'bin' ! file format ('bin'=binary or 'nc'=netcdf) + grid_type = 'rectangular'! define rectangular grid internally grid_file = 'unknown_grid_file' - grid_ice = 'B' ! underlying grid system - grid_atm = 'A' ! underlying atm forcing/coupling grid - grid_ocn = 'A' ! underlying atm forcing/coupling grid + grid_ice = 'B' ! underlying grid system + grid_atm = 'A' ! underlying atm forcing/coupling grid + grid_ocn = 'A' ! underlying atm forcing/coupling grid gridcpl_file = 'unknown_gridcpl_file' - orca_halogrid = .false. ! orca haloed grid + orca_halogrid = .false. ! orca haloed grid bathymetry_file = 'unknown_bathymetry_file' bathymetry_format = 'default' use_bathymetry = .false. @@ -359,76 +360,76 @@ subroutine input_data kdyn = 1 ! type of dynamics (-1, 0 = off, 1 = evp, 2 = eap, 3 = vp) ndtd = 1 ! dynamic time steps per thermodynamic time step ndte = 120 ! subcycles per dynamics timestep: ndte=dt_dyn/dte - evp_algorithm = 'standard_2d' ! EVP kernel (=standard_2d: standard cice evp; =shared_mem_1d: 1d shared memory and no mpi. if more mpi processors then executed on master - elasticDamp = 0.36_dbl_kind ! coefficient for calculating the parameter E - pgl_global_ext = .false. ! if true, init primary grid lengths (global ext.) + evp_algorithm = 'standard_2d' ! EVP kernel (=standard_2d: standard cice evp; =shared_mem_1d: 1d shared memory and no mpi. if more mpi processors then executed on master + elasticDamp = 0.36_dbl_kind ! coefficient for calculating the parameter E + pgl_global_ext = .false. ! if true, init primary grid lengths (global ext.) brlx = 300.0_dbl_kind ! revised_evp values. Otherwise overwritten in ice_dyn_shared arlx = 300.0_dbl_kind ! revised_evp values. Otherwise overwritten in ice_dyn_shared - revised_evp = .false. ! if true, use revised procedure for evp dynamics - yield_curve = 'ellipse' ! yield curve - kstrength = 1 ! 1 = Rothrock 75 strength, 0 = Hibler 79 + revised_evp = .false. ! if true, use revised procedure for evp dynamics + yield_curve = 'ellipse' ! yield curve + kstrength = 1 ! 1 = Rothrock 75 strength, 0 = Hibler 79 Pstar = 2.75e4_dbl_kind ! constant in Hibler strength formula (kstrength = 0) Cstar = 20._dbl_kind ! constant in Hibler strength formula (kstrength = 0) - krdg_partic = 1 ! 1 = new participation, 0 = Thorndike et al 75 - krdg_redist = 1 ! 1 = new redistribution, 0 = Hibler 80 - mu_rdg = 3 ! e-folding scale of ridged ice, krdg_partic=1 (m^0.5) - Cf = 17.0_dbl_kind ! ratio of ridging work to PE change in ridging - ksno = 0.3_dbl_kind ! snow thermal conductivity - dxrect = 0.0_dbl_kind ! user defined grid spacing in cm in x direction - dyrect = 0.0_dbl_kind ! user defined grid spacing in cm in y direction + krdg_partic = 1 ! 1 = new participation, 0 = Thorndike et al 75 + krdg_redist = 1 ! 1 = new redistribution, 0 = Hibler 80 + mu_rdg = 3 ! e-folding scale of ridged ice, krdg_partic=1 (m^0.5) + Cf = 17.0_dbl_kind ! ratio of ridging work to PE change in ridging + ksno = 0.3_dbl_kind ! snow thermal conductivity + dxrect = 0.0_dbl_kind ! user defined grid spacing in cm in x direction + dyrect = 0.0_dbl_kind ! user defined grid spacing in cm in y direction close_boundaries = .false. ! true = set land on edges of grid - seabed_stress= .false. ! if true, seabed stress for landfast is on - seabed_stress_method = 'LKD' ! LKD = Lemieux et al 2015, probabilistic = Dupont et al. in prep - k1 = 7.5_dbl_kind ! 1st free parameter for landfast parameterization - k2 = 15.0_dbl_kind ! 2nd free parameter (N/m^3) for landfast parametrization - alphab = 20.0_dbl_kind ! alphab=Cb factor in Lemieux et al 2015 + seabed_stress= .false. ! if true, seabed stress for landfast is on + seabed_stress_method = 'LKD'! LKD = Lemieux et al 2015, probabilistic = Dupont et al. in prep + k1 = 7.5_dbl_kind ! 1st free parameter for landfast parameterization + k2 = 15.0_dbl_kind ! 2nd free parameter (N/m^3) for landfast parametrization + alphab = 20.0_dbl_kind ! alphab=Cb factor in Lemieux et al 2015 threshold_hw = 30.0_dbl_kind ! max water depth for grounding - Ktens = 0.0_dbl_kind ! T=Ktens*P (tensile strength: see Konig and Holland, 2010) - e_yieldcurve = 2.0_dbl_kind ! VP aspect ratio of elliptical yield curve - e_plasticpot = 2.0_dbl_kind ! VP aspect ratio of elliptical plastic potential + Ktens = 0.0_dbl_kind ! T=Ktens*P (tensile strength: see Konig and Holland, 2010) + e_yieldcurve = 2.0_dbl_kind ! VP aspect ratio of elliptical yield curve + e_plasticpot = 2.0_dbl_kind ! VP aspect ratio of elliptical plastic potential visc_method = 'avg_strength' ! calc viscosities at U point: avg_strength, avg_zeta deltaminEVP = 1e-11_dbl_kind ! minimum delta for viscosities (EVP, Hunke 2001) deltaminVP = 2e-9_dbl_kind ! minimum delta for viscosities (VP, Hibler 1979) - capping = 1.0_dbl_kind ! method for capping of viscosities (1=Hibler 1979,0=Kreyscher2000) - maxits_nonlin = 4 ! max nb of iteration for nonlinear solver - precond = 'pgmres' ! preconditioner for fgmres: 'ident' (identity), 'diag' (diagonal), 'pgmres' (Jacobi-preconditioned GMRES) - dim_fgmres = 50 ! size of fgmres Krylov subspace - dim_pgmres = 5 ! size of pgmres Krylov subspace - maxits_fgmres = 50 ! max nb of iteration for fgmres - maxits_pgmres = 5 ! max nb of iteration for pgmres + capping_method = 'max' ! method for capping of viscosities (max=Hibler 1979,sum=Kreyscher2000) + maxits_nonlin = 4 ! max nb of iteration for nonlinear solver + precond = 'pgmres' ! preconditioner for fgmres: 'ident' (identity), 'diag' (diagonal), 'pgmres' (Jacobi-preconditioned GMRES) + dim_fgmres = 50 ! size of fgmres Krylov subspace + dim_pgmres = 5 ! size of pgmres Krylov subspace + maxits_fgmres = 50 ! max nb of iteration for fgmres + maxits_pgmres = 5 ! max nb of iteration for pgmres monitor_nonlin = .false. ! print nonlinear residual norm monitor_fgmres = .false. ! print fgmres residual norm monitor_pgmres = .false. ! print pgmres residual norm - ortho_type = 'mgs' ! orthogonalization procedure 'cgs' or 'mgs' + ortho_type = 'mgs' ! orthogonalization procedure 'cgs' or 'mgs' reltol_nonlin = 1e-8_dbl_kind ! nonlinear stopping criterion: reltol_nonlin*res(k=0) reltol_fgmres = 1e-2_dbl_kind ! fgmres stopping criterion: reltol_fgmres*res(k) reltol_pgmres = 1e-6_dbl_kind ! pgmres stopping criterion: reltol_pgmres*res(k) algo_nonlin = 'picard' ! nonlinear algorithm: 'picard' (Picard iteration), 'anderson' (Anderson acceleration) - fpfunc_andacc = 1 ! fixed point function for Anderson acceleration: 1: g(x) = FMGRES(A(x),b(x)), 2: g(x) = x - A(x)x + b(x) - dim_andacc = 5 ! size of Anderson minimization matrix (number of saved previous residuals) + fpfunc_andacc = 1 ! fixed point function for Anderson acceleration: 1: g(x) = FMGRES(A(x),b(x)), 2: g(x) = x - A(x)x + b(x) + dim_andacc = 5 ! size of Anderson minimization matrix (number of saved previous residuals) reltol_andacc = 1e-6_dbl_kind ! relative tolerance for Anderson acceleration - damping_andacc = 0 ! damping factor for Anderson acceleration - start_andacc = 0 ! acceleration delay factor (acceleration starts at this iteration) - use_mean_vrel = .true. ! use mean of previous 2 iterates to compute vrel - advection = 'remap' ! incremental remapping transport scheme - conserv_check = .false.! tracer conservation check - shortwave = 'ccsm3' ! 'ccsm3' or 'dEdd' (delta-Eddington) - albedo_type = 'ccsm3' ! 'ccsm3' or 'constant' - ktherm = 1 ! -1 = OFF, 0 = 0-layer, 1 = BL99, 2 = mushy thermo - conduct = 'bubbly' ! 'MU71' or 'bubbly' (Pringle et al 2007) - coriolis = 'latitude' ! latitude dependent, or 'constant' + damping_andacc = 0 ! damping factor for Anderson acceleration + start_andacc = 0 ! acceleration delay factor (acceleration starts at this iteration) + use_mean_vrel = .true. ! use mean of previous 2 iterates to compute vrel + advection = 'remap' ! incremental remapping transport scheme + conserv_check = .false. ! tracer conservation check + shortwave = 'ccsm3' ! 'ccsm3' or 'dEdd' (delta-Eddington) + albedo_type = 'ccsm3' ! 'ccsm3' or 'constant' + ktherm = 1 ! -1 = OFF, 0 = 0-layer, 1 = BL99, 2 = mushy thermo + conduct = 'bubbly' ! 'MU71' or 'bubbly' (Pringle et al 2007) + coriolis = 'latitude' ! latitude dependent, or 'constant' ssh_stress = 'geostrophic' ! 'geostrophic' or 'coupled' - kridge = 1 ! -1 = off, 1 = on - ktransport = 1 ! -1 = off, 1 = on - calc_Tsfc = .true. ! calculate surface temperature - update_ocn_f = .false. ! include fresh water and salt fluxes for frazil - ustar_min = 0.005 ! minimum friction velocity for ocean heat flux (m/s) + kridge = 1 ! -1 = off, 1 = on + ktransport = 1 ! -1 = off, 1 = on + calc_Tsfc = .true. ! calculate surface temperature + update_ocn_f = .false. ! include fresh water and salt fluxes for frazil + ustar_min = 0.005 ! minimum friction velocity for ocean heat flux (m/s) iceruf = 0.0005_dbl_kind ! ice surface roughness at atmosphere interface (m) iceruf_ocn = 0.03_dbl_kind ! under-ice roughness (m) - calc_dragio = .false. ! compute dragio from iceruf_ocn and thickness of first ocean level - emissivity = 0.985 ! emissivity of snow and ice - l_mpond_fresh = .false. ! logical switch for including meltpond freshwater - ! flux feedback to ocean model + calc_dragio = .false. ! compute dragio from iceruf_ocn and thickness of first ocean level + emissivity = 0.985 ! emissivity of snow and ice + l_mpond_fresh = .false. ! logical switch for including meltpond freshwater + ! flux feedback to ocean model fbot_xfer_type = 'constant' ! transfer coefficient type for ocn heat flux R_ice = 0.00_dbl_kind ! tuning parameter for sea ice R_pnd = 0.00_dbl_kind ! tuning parameter for ponded sea ice @@ -513,7 +514,7 @@ subroutine input_data #ifndef CESMCOUPLED runid = 'unknown' ! run ID used in CESM and for machine 'bering' runtype = 'initial' ! run type: 'initial', 'continue' - restart = .false. ! if true, read ice state from restart file + restart = .false. ! if true, read ice state from restart file use_restart_time = .false. ! if true, use time info written in file #endif @@ -868,7 +869,7 @@ subroutine input_data call broadcast_scalar(visc_method, master_task) call broadcast_scalar(deltaminEVP, master_task) call broadcast_scalar(deltaminVP, master_task) - call broadcast_scalar(capping, master_task) + call broadcast_scalar(capping_method, master_task) call broadcast_scalar(advection, master_task) call broadcast_scalar(conserv_check, master_task) call broadcast_scalar(shortwave, master_task) @@ -1187,14 +1188,19 @@ subroutine input_data endif endif + capping = -9.99e30 if (kdyn == 1 .or. kdyn == 3) then - if (capping /= c0 .and. capping /= c1) then - if (my_task == master_task) then - write(nu_diag,*) subname//' ERROR: invalid method for capping viscosities' - write(nu_diag,*) subname//' ERROR: capping should be equal to 0.0 or 1.0' + if (capping_method == 'max') then + capping = c1 + elseif (capping_method == 'sum') then + capping = c0 + else + if (my_task == master_task) then + write(nu_diag,*) subname//' ERROR: invalid method for capping viscosities' + write(nu_diag,*) subname//' ERROR: capping_method should be equal to max or sum' + endif + abort_list = trim(abort_list)//":45" endif - abort_list = trim(abort_list)//":45" - endif endif rpcesm = 0 @@ -1714,12 +1720,13 @@ subroutine input_data if (kdyn == 1) then write(nu_diag,1003) ' deltamin = ', deltaminEVP, ' : minimum delta for viscosities' - write(nu_diag,1002) ' capping = ', capping, ' : capping method for viscosities' + write(nu_diag,1030) ' capping_meth = ', trim(capping_method), ' : capping method for viscosities' elseif (kdyn == 3) then write(nu_diag,1003) ' deltamin = ', deltaminVP, ' : minimum delta for viscosities' - write(nu_diag,1002) ' capping = ', capping, ' : capping method for viscosities' + write(nu_diag,1030) ' capping_meth = ', trim(capping_method), ' : capping method for viscosities' endif - + !write(nu_diag,1002) ' capping = ', capping, ' : capping value for viscosities' + write(nu_diag,1002) ' elasticDamp = ', elasticDamp, ' : coefficient for calculating the parameter E' if (trim(coriolis) == 'latitude') then @@ -2902,7 +2909,8 @@ subroutine set_state_var (nx_block, ny_block, & if (trim(ice_data_conc) == 'p5' .or. & trim(ice_data_conc) == 'p8' .or. & trim(ice_data_conc) == 'p9' .or. & - trim(ice_data_conc) == 'c1') then + trim(ice_data_conc) == 'c1' .or. & + trim(ice_data_conc) == 'box2001') then if (trim(ice_data_conc) == 'p5') then hbar = c2 ! initial ice thickness @@ -2916,6 +2924,9 @@ subroutine set_state_var (nx_block, ny_block, & elseif (trim(ice_data_conc) == 'c1') then hbar = c1 ! initial ice thickness abar = c1 ! initial ice concentration + elseif (trim(ice_data_conc) == 'box2001') then + hbar = c2 ! initial ice thickness + abar = p5 ! initial ice concentration endif do n = 1, ncat @@ -3034,20 +3045,6 @@ subroutine set_state_var (nx_block, ny_block, & enddo enddo - elseif (trim(ice_data_type) == 'smallblock') then - ! 2x2 ice in center of domain - icells = 0 - do j = jlo, jhi - do i = ilo, ihi - if ((iglob(i) == nx_global/2 .or. iglob(i) == nx_global/2+1) .and. & - (jglob(j) == ny_global/2 .or. jglob(j) == ny_global/2+1)) then - icells = icells + 1 - indxi(icells) = i - indxj(icells) = j - endif - enddo - enddo - elseif (trim(ice_data_type) == 'block') then ! ice in 50% of domain, not at edges icells = 0 @@ -3064,35 +3061,6 @@ subroutine set_state_var (nx_block, ny_block, & enddo enddo - elseif (trim(ice_data_type) == 'bigblock') then - ! ice in 90% of domain, not at edges - icells = 0 - iedge = int(real(nx_global,kind=dbl_kind) * 0.05) + 1 - jedge = int(real(ny_global,kind=dbl_kind) * 0.05) + 1 - do j = jlo, jhi - do i = ilo, ihi - if ((iglob(i) > iedge .and. iglob(i) < nx_global-iedge+1) .and. & - (jglob(j) > jedge .and. jglob(j) < ny_global-jedge+1)) then - icells = icells + 1 - indxi(icells) = i - indxj(icells) = j - endif - enddo - enddo - - elseif (trim(ice_data_type) == 'easthalf') then - ! block on east half of domain - icells = 0 - do j = jlo, jhi - do i = ilo, ihi - if (iglob(i) >= nx_global/2) then - icells = icells + 1 - indxi(icells) = i - indxj(icells) = j - endif - enddo - enddo - elseif (trim(ice_data_type) == 'eastblock') then ! block on east half of domain in center of domain icells = 0 @@ -3154,13 +3122,13 @@ subroutine set_state_var (nx_block, ny_block, & if (trim(ice_data_dist) == 'box2001') then if (hinit(n) > c0) then -! ! constant slope from 0 to 1 in x direction +! ! varies linearly from 0 to 1 in x direction aicen(i,j,n) = (real(iglob(i), kind=dbl_kind)-p5) & / (real(nx_global,kind=dbl_kind)) ! ! constant slope from 0 to 0.5 in x direction ! aicen(i,j,n) = (real(iglob(i), kind=dbl_kind)-p5) & ! / (real(nx_global,kind=dbl_kind)) * p5 - ! quadratic +! ! quadratic ! aicen(i,j,n) = max(c0,(real(iglob(i), kind=dbl_kind)-p5) & ! / (real(nx_global,kind=dbl_kind)) & ! * (real(jglob(j), kind=dbl_kind)-p5) & diff --git a/cicecore/cicedynB/infrastructure/ice_grid.F90 b/cicecore/cicedynB/infrastructure/ice_grid.F90 index 1892a396e..0509f7623 100644 --- a/cicecore/cicedynB/infrastructure/ice_grid.F90 +++ b/cicecore/cicedynB/infrastructure/ice_grid.F90 @@ -15,11 +15,10 @@ ! 2007: Option to read from netcdf files (A. Keen, Met Office) ! Grid reading routines reworked by E. Hunke for boundary values ! 2021: Add N (center of north face) and E (center of east face) grids -! to support CD solvers. Defining T at center of cells, U at +! to support C and CD solvers. Defining T at center of cells, U at ! NE corner, N at center of top face, E at center of right face. ! All cells are quadrilaterals with NE, E, and N associated with -! directions relative to logical grid. E is increasing i (x) and -! N is increasing j (y) direction. +! directions relative to logical grid. module ice_grid @@ -73,14 +72,14 @@ module ice_grid ! displaced_pole, tripole, regional real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & - dxt , & ! width of T-cell through the middle (m) - dyt , & ! height of T-cell through the middle (m) - dxu , & ! width of U-cell through the middle (m) - dyu , & ! height of U-cell through the middle (m) - dxn , & ! width of N-cell through the middle (m) - dyn , & ! height of N-cell through the middle (m) - dxe , & ! width of E-cell through the middle (m) - dye , & ! height of E-cell through the middle (m) + dxT , & ! width of T-cell through the middle (m) + dyT , & ! height of T-cell through the middle (m) + dxU , & ! width of U-cell through the middle (m) + dyU , & ! height of U-cell through the middle (m) + dxN , & ! width of N-cell through the middle (m) + dyN , & ! height of N-cell through the middle (m) + dxE , & ! width of E-cell through the middle (m) + dyE , & ! height of E-cell through the middle (m) HTE , & ! length of eastern edge of T-cell (m) HTN , & ! length of northern edge of T-cell (m) tarea , & ! area of T-cell (m^2) @@ -120,8 +119,8 @@ module ice_grid dyhx ! 0.5*(HTN(i,j) - HTS(i,j)) = 0.5*(HTN(i,j) - HTN(i,j-1)) real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & - ratiodxN , & ! - dxn(i+1,j) / dxn(i,j) - ratiodyE , & ! - dye(i ,j+1) / dye(i,j) + ratiodxN , & ! - dxN(i+1,j) / dxN(i,j) + ratiodyE , & ! - dyE(i ,j+1) / dyE(i,j) ratiodxNr , & ! 1 / ratiodxN ratiodyEr ! 1 / ratiodyE @@ -211,14 +210,14 @@ subroutine alloc_grid character(len=*), parameter :: subname = '(alloc_grid)' allocate( & - dxt (nx_block,ny_block,max_blocks), & ! width of T-cell through the middle (m) - dyt (nx_block,ny_block,max_blocks), & ! height of T-cell through the middle (m) - dxu (nx_block,ny_block,max_blocks), & ! width of U-cell through the middle (m) - dyu (nx_block,ny_block,max_blocks), & ! height of U-cell through the middle (m) - dxn (nx_block,ny_block,max_blocks), & ! width of N-cell through the middle (m) - dyn (nx_block,ny_block,max_blocks), & ! height of N-cell through the middle (m) - dxe (nx_block,ny_block,max_blocks), & ! width of E-cell through the middle (m) - dye (nx_block,ny_block,max_blocks), & ! height of E-cell through the middle (m) + dxT (nx_block,ny_block,max_blocks), & ! width of T-cell through the middle (m) + dyT (nx_block,ny_block,max_blocks), & ! height of T-cell through the middle (m) + dxU (nx_block,ny_block,max_blocks), & ! width of U-cell through the middle (m) + dyU (nx_block,ny_block,max_blocks), & ! height of U-cell through the middle (m) + dxN (nx_block,ny_block,max_blocks), & ! width of N-cell through the middle (m) + dyN (nx_block,ny_block,max_blocks), & ! height of N-cell through the middle (m) + dxE (nx_block,ny_block,max_blocks), & ! width of E-cell through the middle (m) + dyE (nx_block,ny_block,max_blocks), & ! height of E-cell through the middle (m) HTE (nx_block,ny_block,max_blocks), & ! length of eastern edge of T-cell (m) HTN (nx_block,ny_block,max_blocks), & ! length of northern edge of T-cell (m) tarea (nx_block,ny_block,max_blocks), & ! area of T-cell (m^2) @@ -534,10 +533,10 @@ subroutine init_grid2 do j = 1,ny_block do i = 1,nx_block - tarea(i,j,iblk) = dxt(i,j,iblk)*dyt(i,j,iblk) - uarea(i,j,iblk) = dxu(i,j,iblk)*dyu(i,j,iblk) - narea(i,j,iblk) = dxn(i,j,iblk)*dyn(i,j,iblk) - earea(i,j,iblk) = dxe(i,j,iblk)*dye(i,j,iblk) + tarea(i,j,iblk) = dxT(i,j,iblk)*dyT(i,j,iblk) + uarea(i,j,iblk) = dxU(i,j,iblk)*dyU(i,j,iblk) + narea(i,j,iblk) = dxN(i,j,iblk)*dyN(i,j,iblk) + earea(i,j,iblk) = dxE(i,j,iblk)*dyE(i,j,iblk) if (tarea(i,j,iblk) > c0) then tarear(i,j,iblk) = c1/tarea(i,j,iblk) @@ -583,10 +582,10 @@ subroutine init_grid2 if (grid_ice == 'CD' .or. grid_ice == 'C') then do j = jlo, jhi do i = ilo, ihi - ratiodxN (i,j,iblk) = - dxn(i+1,j ,iblk) / dxn(i,j,iblk) - ratiodyE (i,j,iblk) = - dye(i ,j+1,iblk) / dye(i,j,iblk) - ratiodxNr(i,j,iblk) = c1 / ratiodxn(i,j,iblk) - ratiodyEr(i,j,iblk) = c1 / ratiodye(i,j,iblk) + ratiodxN (i,j,iblk) = - dxN(i+1,j ,iblk) / dxN(i,j,iblk) + ratiodyE (i,j,iblk) = - dyE(i ,j+1,iblk) / dyE(i,j,iblk) + ratiodxNr(i,j,iblk) = c1 / ratiodxN(i,j,iblk) + ratiodyEr(i,j,iblk) = c1 / ratiodyE(i,j,iblk) enddo enddo endif @@ -840,10 +839,10 @@ subroutine popgrid !----------------------------------------------------------------- call ice_read_global(nu_grid,3,work_g1,'rda8',.true.) ! HTN - call primary_grid_lengths_HTN(work_g1) ! dxu, dxt, dxn, dxe + call primary_grid_lengths_HTN(work_g1) ! dxU, dxT, dxN, dxE call ice_read_global(nu_grid,4,work_g1,'rda8',.true.) ! HTE - call primary_grid_lengths_HTE(work_g1) ! dyu, dyt, dyn, dye + call primary_grid_lengths_HTE(work_g1) ! dyU, dyT, dyN, dyE deallocate(work_g1) @@ -1017,10 +1016,10 @@ subroutine popgrid_nc fieldname='htn' call ice_read_global_nc(fid_grid,1,fieldname,work_g1,diag) ! HTN - call primary_grid_lengths_HTN(work_g1) ! dxu, dxt, dxn, dxe + call primary_grid_lengths_HTN(work_g1) ! dxU, dxT, dxN, dxE fieldname='hte' call ice_read_global_nc(fid_grid,1,fieldname,work_g1,diag) ! HTE - call primary_grid_lengths_HTE(work_g1) ! dyu, dyt, dyn, dye + call primary_grid_lengths_HTE(work_g1) ! dyU, dyT, dyN, dyE deallocate(work_g1) @@ -1294,14 +1293,14 @@ subroutine latlongrid ANGLET(i,j,iblk) = c0 HTN (i,j,iblk) = 1.e36_dbl_kind HTE (i,j,iblk) = 1.e36_dbl_kind - dxt (i,j,iblk) = 1.e36_dbl_kind - dyt (i,j,iblk) = 1.e36_dbl_kind - dxu (i,j,iblk) = 1.e36_dbl_kind - dyu (i,j,iblk) = 1.e36_dbl_kind - dxn (i,j,iblk) = 1.e36_dbl_kind - dyn (i,j,iblk) = 1.e36_dbl_kind - dxe (i,j,iblk) = 1.e36_dbl_kind - dye (i,j,iblk) = 1.e36_dbl_kind + dxT (i,j,iblk) = 1.e36_dbl_kind + dyT (i,j,iblk) = 1.e36_dbl_kind + dxU (i,j,iblk) = 1.e36_dbl_kind + dyU (i,j,iblk) = 1.e36_dbl_kind + dxN (i,j,iblk) = 1.e36_dbl_kind + dyN (i,j,iblk) = 1.e36_dbl_kind + dxE (i,j,iblk) = 1.e36_dbl_kind + dyE (i,j,iblk) = 1.e36_dbl_kind dxhy (i,j,iblk) = 1.e36_dbl_kind dyhx (i,j,iblk) = 1.e36_dbl_kind cyp (i,j,iblk) = 1.e36_dbl_kind @@ -1413,7 +1412,7 @@ subroutine rectgrid enddo enddo endif - call primary_grid_lengths_HTN(work_g1) ! dxu, dxt, dxn, dxe + call primary_grid_lengths_HTN(work_g1) ! dxU, dxT, dxN, dxE if (my_task == master_task) then do j = 1, ny_global @@ -1422,7 +1421,7 @@ subroutine rectgrid enddo enddo endif - call primary_grid_lengths_HTE(work_g1) ! dyu, dyt, dyn, dye + call primary_grid_lengths_HTE(work_g1) ! dyU, dyT, dyN, dyE !----------------------------------------------------------------- ! Construct T-cell land mask @@ -1717,11 +1716,11 @@ subroutine cpomgrid call ice_read_global(nu_grid,3,work_g1, 'rda8',diag) work_g1 = work_g1 * m_to_cm - call primary_grid_lengths_HTN(work_g1) ! dxu, dxt, dxn, dxe + call primary_grid_lengths_HTN(work_g1) ! dxU, dxT, dxN, dxE call ice_read_global(nu_grid,4,work_g1, 'rda8',diag) work_g1 = work_g1 * m_to_cm - call primary_grid_lengths_HTE(work_g1) ! dyu, dyt, dyn, dye + call primary_grid_lengths_HTE(work_g1) ! dyU, dyT, dyN, dyE call ice_read_global(nu_grid,7,work_g1,'rda8',diag) call scatter_global(ANGLE, work_g1, master_task, distrb_info, & @@ -1745,9 +1744,9 @@ end subroutine cpomgrid !======================================================================= -! Calculate dxu and dxt from HTN on the global grid, to preserve +! Calculate dxU and dxT from HTN on the global grid, to preserve ! ghost cell and/or land values that might otherwise be lost. Scatter -! dxu, dxt and HTN to all processors. +! dxU, dxT and HTN to all processors. ! ! author: Elizabeth C. Hunke, LANL @@ -1776,7 +1775,7 @@ subroutine primary_grid_lengths_HTN(work_g) allocate(work_g2(1,1)) endif - ! HTN, dxu = average of 2 neighbor HTNs in i + ! HTN, dxU = average of 2 neighbor HTNs in i if (my_task == master_task) then do j = 1, ny_global @@ -1789,7 +1788,7 @@ subroutine primary_grid_lengths_HTN(work_g) ! assume cyclic; noncyclic will be handled during scatter ip1 = i+1 if (i == nx_global) ip1 = 1 - work_g2(i,j) = p5*(work_g(i,j) + work_g(ip1,j)) ! dxu + work_g2(i,j) = p5*(work_g(i,j) + work_g(ip1,j)) ! dxU enddo enddo endif @@ -1799,30 +1798,30 @@ subroutine primary_grid_lengths_HTN(work_g) endif call scatter_global(HTN, work_g, master_task, distrb_info, & field_loc_Nface, field_type_scalar) - call scatter_global(dxu, work_g2, master_task, distrb_info, & + call scatter_global(dxU, work_g2, master_task, distrb_info, & field_loc_NEcorner, field_type_scalar) - ! dxt = average of 2 neighbor HTNs in j + ! dxT = average of 2 neighbor HTNs in j if (my_task == master_task) then do j = 2, ny_global do i = 1, nx_global - work_g2(i,j) = p5*(work_g(i,j) + work_g(i,j-1)) ! dxt + work_g2(i,j) = p5*(work_g(i,j) + work_g(i,j-1)) ! dxT enddo enddo - ! extrapolate to obtain dxt along j=1 + ! extrapolate to obtain dxT along j=1 do i = 1, nx_global - work_g2(i,1) = c2*work_g(i,2) - work_g(i,3) ! dxt + work_g2(i,1) = c2*work_g(i,2) - work_g(i,3) ! dxT enddo endif - call scatter_global(dxt, work_g2, master_task, distrb_info, & + call scatter_global(dxT, work_g2, master_task, distrb_info, & field_loc_center, field_type_scalar) - ! dxn = HTN + ! dxN = HTN - dxn(:,:,:) = HTN(:,:,:) ! dxn + dxN(:,:,:) = HTN(:,:,:) ! dxN - ! dxe = average of 4 surrounding HTNs + ! dxE = average of 4 surrounding HTNs if (my_task == master_task) then do j = 2, ny_global @@ -1830,19 +1829,19 @@ subroutine primary_grid_lengths_HTN(work_g) ! assume cyclic; noncyclic will be handled during scatter ip1 = i+1 if (i == nx_global) ip1 = 1 - work_g2(i,j) = p25*(work_g(i,j)+work_g(ip1,j)+work_g(i,j-1)+work_g(ip1,j-1)) ! dxe + work_g2(i,j) = p25*(work_g(i,j)+work_g(ip1,j)+work_g(i,j-1)+work_g(ip1,j-1)) ! dxE enddo enddo - ! extrapolate to obtain dxt along j=1 + ! extrapolate to obtain dxT along j=1 do i = 1, nx_global ! assume cyclic; noncyclic will be handled during scatter ip1 = i+1 if (i == nx_global) ip1 = 1 work_g2(i,1) = p5*(c2*work_g(i ,2) - work_g(i ,3) + & - c2*work_g(ip1,2) - work_g(ip1,3)) ! dxe + c2*work_g(ip1,2) - work_g(ip1,3)) ! dxE enddo endif - call scatter_global(dxe, work_g2, master_task, distrb_info, & + call scatter_global(dxE, work_g2, master_task, distrb_info, & field_loc_center, field_type_scalar) deallocate(work_g2) @@ -1850,9 +1849,9 @@ subroutine primary_grid_lengths_HTN(work_g) end subroutine primary_grid_lengths_HTN !======================================================================= -! Calculate dyu and dyt from HTE on the global grid, to preserve +! Calculate dyU and dyT from HTE on the global grid, to preserve ! ghost cell and/or land values that might otherwise be lost. Scatter -! dyu, dyt and HTE to all processors. +! dyU, dyT and HTE to all processors. ! ! author: Elizabeth C. Hunke, LANL @@ -1881,7 +1880,7 @@ subroutine primary_grid_lengths_HTE(work_g) allocate(work_g2(1,1)) endif - ! HTE, dyu = average of 2 neighbor HTE in j + ! HTE, dyU = average of 2 neighbor HTE in j if (my_task == master_task) then do j = 1, ny_global @@ -1891,13 +1890,13 @@ subroutine primary_grid_lengths_HTE(work_g) enddo do j = 1, ny_global-1 do i = 1, nx_global - work_g2(i,j) = p5*(work_g(i,j) + work_g(i,j+1)) ! dyu + work_g2(i,j) = p5*(work_g(i,j) + work_g(i,j+1)) ! dyU enddo enddo - ! extrapolate to obtain dyu along j=ny_global + ! extrapolate to obtain dyU along j=ny_global if (ny_global > 1) then do i = 1, nx_global - work_g2(i,ny_global) = c2*work_g(i,ny_global-1) - work_g(i,ny_global-2) ! dyu + work_g2(i,ny_global) = c2*work_g(i,ny_global-1) - work_g(i,ny_global-2) ! dyU enddo endif endif @@ -1907,10 +1906,10 @@ subroutine primary_grid_lengths_HTE(work_g) endif call scatter_global(HTE, work_g, master_task, distrb_info, & field_loc_Eface, field_type_scalar) - call scatter_global(dyu, work_g2, master_task, distrb_info, & + call scatter_global(dyU, work_g2, master_task, distrb_info, & field_loc_NEcorner, field_type_scalar) - ! dyt = average of 2 neighbor HTE in i + ! dyT = average of 2 neighbor HTE in i if (my_task == master_task) then do j = 1, ny_global @@ -1918,14 +1917,14 @@ subroutine primary_grid_lengths_HTE(work_g) ! assume cyclic; noncyclic will be handled during scatter im1 = i-1 if (i == 1) im1 = nx_global - work_g2(i,j) = p5*(work_g(i,j) + work_g(im1,j)) ! dyt + work_g2(i,j) = p5*(work_g(i,j) + work_g(im1,j)) ! dyT enddo enddo endif - call scatter_global(dyt, work_g2, master_task, distrb_info, & + call scatter_global(dyT, work_g2, master_task, distrb_info, & field_loc_center, field_type_scalar) - ! dyn = average of 4 neighbor HTEs + ! dyN = average of 4 neighbor HTEs if (my_task == master_task) then do j = 1, ny_global-1 @@ -1933,26 +1932,26 @@ subroutine primary_grid_lengths_HTE(work_g) ! assume cyclic; noncyclic will be handled during scatter im1 = i-1 if (i == 1) im1 = nx_global - work_g2(i,j) = p25*(work_g(i,j) + work_g(im1,j) + work_g(i,j+1) + work_g(im1,j+1)) ! dyn + work_g2(i,j) = p25*(work_g(i,j) + work_g(im1,j) + work_g(i,j+1) + work_g(im1,j+1)) ! dyN enddo enddo - ! extrapolate to obtain dyn along j=ny_global + ! extrapolate to obtain dyN along j=ny_global if (ny_global > 1) then do i = 1, nx_global ! assume cyclic; noncyclic will be handled during scatter im1 = i-1 if (i == 1) im1 = nx_global work_g2(i,ny_global) = p5*(c2*work_g(i ,ny_global-1) - work_g(i ,ny_global-2) + & - c2*work_g(im1,ny_global-1) - work_g(im1,ny_global-2)) ! dyn + c2*work_g(im1,ny_global-1) - work_g(im1,ny_global-2)) ! dyN enddo endif endif - call scatter_global(dyn, work_g2, master_task, distrb_info, & + call scatter_global(dyN, work_g2, master_task, distrb_info, & field_loc_center, field_type_scalar) - ! dye = HTE + ! dyE = HTE - dye(:,:,:) = HTE(:,:,:) + dyE(:,:,:) = HTE(:,:,:) deallocate(work_g2) diff --git a/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_history_write.F90 b/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_history_write.F90 index 5587f2b6b..97bb72dab 100644 --- a/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_history_write.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_history_write.F90 @@ -56,7 +56,7 @@ subroutine ice_write_hist (ns) use ice_gather_scatter, only: gather_global use ice_grid, only: TLON, TLAT, ULON, ULAT, NLON, NLAT, ELON, ELAT, & hm, uvm, npm, epm, bm, tarea, uarea, narea, earea, & - dxu, dxt, dyu, dyt, dxn, dyn, dxe, dye, HTN, HTE, ANGLE, ANGLET, & + dxU, dxT, dyU, dyT, dxN, dyN, dxE, dyE, HTN, HTE, ANGLE, ANGLET, & lont_bounds, latt_bounds, lonu_bounds, latu_bounds, & lonn_bounds, latn_bounds, lone_bounds, late_bounds use ice_history_shared @@ -846,21 +846,21 @@ subroutine ice_write_hist (ns) CASE ('blkmask') call gather_global(work_g1, bm, master_task, distrb_info) CASE ('dxu') - call gather_global(work_g1, dxu, master_task, distrb_info) + call gather_global(work_g1, dxU, master_task, distrb_info) CASE ('dyu') - call gather_global(work_g1, dyu, master_task, distrb_info) + call gather_global(work_g1, dyU, master_task, distrb_info) CASE ('dxt') - call gather_global(work_g1, dxt, master_task, distrb_info) + call gather_global(work_g1, dxT, master_task, distrb_info) CASE ('dyt') - call gather_global(work_g1, dyt, master_task, distrb_info) + call gather_global(work_g1, dyT, master_task, distrb_info) CASE ('dxn') - call gather_global(work_g1, dxn, master_task, distrb_info) + call gather_global(work_g1, dxN, master_task, distrb_info) CASE ('dyn') - call gather_global(work_g1, dyn, master_task, distrb_info) + call gather_global(work_g1, dyN, master_task, distrb_info) CASE ('dxe') - call gather_global(work_g1, dxe, master_task, distrb_info) + call gather_global(work_g1, dxE, master_task, distrb_info) CASE ('dye') - call gather_global(work_g1, dye, master_task, distrb_info) + call gather_global(work_g1, dyE, master_task, distrb_info) CASE ('HTN') call gather_global(work_g1, HTN, master_task, distrb_info) CASE ('HTE') diff --git a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_history_write.F90 b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_history_write.F90 index a6660544e..92f7663a2 100644 --- a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_history_write.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_history_write.F90 @@ -51,7 +51,7 @@ subroutine ice_write_hist (ns) use ice_gather_scatter, only: gather_global use ice_grid, only: TLON, TLAT, ULON, ULAT, NLON, NLAT, ELON, ELAT, & hm, bm, uvm, npm, epm, & - dxu, dxt, dyu, dyt, dxn, dyn, dxe, dye, HTN, HTE, ANGLE, ANGLET, & + dxU, dxT, dyU, dyT, dxN, dyN, dxE, dyE, HTN, HTE, ANGLE, ANGLET, & tarea, uarea, narea, earea, tmask, umask, nmask, emask, & lont_bounds, latt_bounds, lonu_bounds, latu_bounds, & lonn_bounds, latn_bounds, lone_bounds, late_bounds @@ -779,21 +779,21 @@ subroutine ice_write_hist (ns) CASE ('earea') workd2 = earea(:,:,1:nblocks) CASE ('dxt') - workd2 = dxt(:,:,1:nblocks) + workd2 = dxT(:,:,1:nblocks) CASE ('dyt') - workd2 = dyt(:,:,1:nblocks) + workd2 = dyT(:,:,1:nblocks) CASE ('dxu') - workd2 = dxu(:,:,1:nblocks) + workd2 = dxU(:,:,1:nblocks) CASE ('dyu') - workd2 = dyu(:,:,1:nblocks) + workd2 = dyU(:,:,1:nblocks) CASE ('dxn') - workd2 = dxn(:,:,1:nblocks) + workd2 = dxN(:,:,1:nblocks) CASE ('dyn') - workd2 = dyn(:,:,1:nblocks) + workd2 = dyN(:,:,1:nblocks) CASE ('dxe') - workd2 = dxe(:,:,1:nblocks) + workd2 = dxE(:,:,1:nblocks) CASE ('dye') - workd2 = dye(:,:,1:nblocks) + workd2 = dyE(:,:,1:nblocks) CASE ('HTN') workd2 = HTN(:,:,1:nblocks) CASE ('HTE') diff --git a/cicecore/drivers/nuopc/cmeps/ice_mesh_mod.F90 b/cicecore/drivers/nuopc/cmeps/ice_mesh_mod.F90 index e7fb5f632..56287feb1 100644 --- a/cicecore/drivers/nuopc/cmeps/ice_mesh_mod.F90 +++ b/cicecore/drivers/nuopc/cmeps/ice_mesh_mod.F90 @@ -437,7 +437,7 @@ subroutine ice_mesh_init_tlon_tlat_area_hm() use ice_grid , only : tlon, tlat, hm, tarea, ULON, ULAT, HTN, HTE, ANGLE, ANGLET use ice_grid , only : uarea, uarear, tarear!, tinyarea - use ice_grid , only : dxt, dyt, dxu, dyu, dyhx, dxhy, cyp, cxp, cym, cxm + use ice_grid , only : dxT, dyT, dxU, dyU, dyhx, dxhy, cyp, cxp, cym, cxm use ice_grid , only : makemask use ice_boundary , only : ice_HaloUpdate use ice_domain , only : blocks_ice, nblocks, halo_info, distrb_info @@ -531,10 +531,10 @@ subroutine ice_mesh_init_tlon_tlat_area_hm() HTN (i,j,iblk) = 1.e36_dbl_kind HTE (i,j,iblk) = 1.e36_dbl_kind - dxt (i,j,iblk) = 1.e36_dbl_kind - dyt (i,j,iblk) = 1.e36_dbl_kind - dxu (i,j,iblk) = 1.e36_dbl_kind - dyu (i,j,iblk) = 1.e36_dbl_kind + dxT (i,j,iblk) = 1.e36_dbl_kind + dyT (i,j,iblk) = 1.e36_dbl_kind + dxU (i,j,iblk) = 1.e36_dbl_kind + dyU (i,j,iblk) = 1.e36_dbl_kind dxhy (i,j,iblk) = 1.e36_dbl_kind dyhx (i,j,iblk) = 1.e36_dbl_kind cyp (i,j,iblk) = 1.e36_dbl_kind diff --git a/cicecore/drivers/nuopc/dmi/cice_cap.info b/cicecore/drivers/nuopc/dmi/cice_cap.info index 2faa623ec..202207c38 100644 --- a/cicecore/drivers/nuopc/dmi/cice_cap.info +++ b/cicecore/drivers/nuopc/dmi/cice_cap.info @@ -18,7 +18,7 @@ module cice_cap use ice_calendar, only: dt use ice_flux use ice_grid, only: TLAT, TLON, ULAT, ULON, hm, tarea, ANGLET, ANGLE, & - dxt, dyt, grid_average_X2Y + dxT, dyT, grid_average_X2Y use ice_state use CICE_RunMod use CICE_InitMod diff --git a/configuration/scripts/ice_in b/configuration/scripts/ice_in index 25130b131..27a333d86 100644 --- a/configuration/scripts/ice_in +++ b/configuration/scripts/ice_in @@ -153,7 +153,7 @@ elasticDamp = 0.36d0 deltaminEVP = 1e-11 deltaminVP = 2e-9 - capping = 1. + capping_method = 'max' seabed_stress = .false. seabed_stress_method = 'LKD' k1 = 7.5 @@ -271,7 +271,7 @@ fyear_init = 2005 ycycle = 1 atm_data_format = 'bin' - atm_data_dir = '/glade/u/home/tcraig/cice_data/' + atm_data_dir = 'unknown_atm_data_dir' bgc_data_dir = 'unknown_bgc_data_dir' ocn_data_format = 'bin' ocn_data_dir = '/unknown_ocn_data_dir' diff --git a/configuration/scripts/options/set_nml.alt07 b/configuration/scripts/options/set_nml.alt07 new file mode 100644 index 000000000..3355b6019 --- /dev/null +++ b/configuration/scripts/options/set_nml.alt07 @@ -0,0 +1,6 @@ +kdyn = 1 +evp_algorithm = 'standard_2d' +ndte = 300 +capping_method = 'sum' +visc_method = 'avg_zeta' + diff --git a/configuration/scripts/options/set_nml.box2001 b/configuration/scripts/options/set_nml.box2001 index adce08a74..2cd5923ac 100644 --- a/configuration/scripts/options/set_nml.box2001 +++ b/configuration/scripts/options/set_nml.box2001 @@ -27,7 +27,7 @@ atmbndy = 'constant' atm_data_type = 'box2001' ocn_data_type = 'box2001' ice_data_type = 'box2001' -ice_data_conc = 'p5' +ice_data_conc = 'box2001' ice_data_dist = 'box2001' calc_strair = .false. restore_ice = .true. diff --git a/configuration/scripts/options/set_nml.boxadv b/configuration/scripts/options/set_nml.boxadv index 815746102..716884031 100644 --- a/configuration/scripts/options/set_nml.boxadv +++ b/configuration/scripts/options/set_nml.boxadv @@ -8,7 +8,7 @@ ns_boundary_type = 'cyclic' atm_data_type = 'box2001' ocn_data_type = 'box2001' ice_data_type = 'box2001' -ice_data_conc = 'p5' +ice_data_conc = 'box2001' ice_data_dist = 'box2001' tr_iage = .true. tr_FY = .false. diff --git a/configuration/scripts/options/set_nml.boxnodyn b/configuration/scripts/options/set_nml.boxnodyn index deb53cf5a..0b9a214f1 100644 --- a/configuration/scripts/options/set_nml.boxnodyn +++ b/configuration/scripts/options/set_nml.boxnodyn @@ -53,7 +53,7 @@ seabed_stress = .true. atm_data_type = 'calm' ocn_data_type = 'calm' ice_data_type = 'box2001' -ice_data_conc = 'p5' +ice_data_conc = 'box2001' ice_data_dist = 'box2001' shortwave = 'ccsm3' albedo_type = 'constant' diff --git a/configuration/scripts/options/set_nml.boxrestore b/configuration/scripts/options/set_nml.boxrestore index b2078a566..fd6a9e59e 100644 --- a/configuration/scripts/options/set_nml.boxrestore +++ b/configuration/scripts/options/set_nml.boxrestore @@ -10,7 +10,7 @@ ns_boundary_type = 'open' atm_data_type = 'box2001' ocn_data_type = 'box2001' ice_data_type = 'box2001' -ice_data_conc = 'p5' +ice_data_conc = 'box2001' ice_data_dist = 'box2001' histfreq = 'd','x','x','x','x' histfreq_n = 1,1,1,1,1 diff --git a/configuration/scripts/options/set_nml.boxwall b/configuration/scripts/options/set_nml.boxwall deleted file mode 100644 index 5a99e311b..000000000 --- a/configuration/scripts/options/set_nml.boxwall +++ /dev/null @@ -1,58 +0,0 @@ -days_per_year = 360 -use_leap_years = .false. -npt = 240 -ice_ic = 'internal' -restart_ext = .true. -histfreq = 'd','1','x','x','x' -grid_type = 'rectangular' -kmt_type = 'wall' -dxrect = 16.e5 -dyrect = 16.e5 -close_boundaries = .false. -ew_boundary_type = 'cyclic' -ns_boundary_type = 'cyclic' -tr_iage = .false. -tr_FY = .false. -tr_lvl = .false. -tr_pond_lvl = .false. -ktherm = -1 -kstrength = 0 -kdyn = 1 -kridge = -1 -ktransport = -1 -coriolis = 'zero' -atmbndy = 'constant' -atm_data_type = 'uniform_east' -ocn_data_type = 'calm' -ice_data_type = 'easthalf' -ice_data_conc = 'c1' -ice_data_dist = 'uniform' -calc_strair = .false. -rotate_wind = .false. -restore_ice = .false. -f_aice = 'd1' -f_hi = 'd1' -f_hs = 'd' -f_Tsfc = 'd' -f_uvel = 'd1' -f_vvel = 'd1' -f_uatm = 'd' -f_vatm = 'd' -f_uocn = 'd' -f_vocn = 'd' -f_strairx = 'd1' -f_strairy = 'd1' -f_strtltx = 'd1' -f_strtlty = 'd1' -f_strcorx = 'd1' -f_strcory = 'd1' -f_strocnx = 'd1' -f_strocny = 'd1' -f_strintx = 'd1' -f_strinty = 'd1' -f_taubx = 'd1' -f_tauby = 'd1' -f_divu = 'd1' -f_sig1 = 'd1' -f_sig2 = 'd1' -f_sigP = 'd1' diff --git a/configuration/scripts/options/set_nml.boxwallp5 b/configuration/scripts/options/set_nml.boxwallp5 deleted file mode 100644 index 229dba456..000000000 --- a/configuration/scripts/options/set_nml.boxwallp5 +++ /dev/null @@ -1,58 +0,0 @@ -days_per_year = 360 -use_leap_years = .false. -npt = 240 -ice_ic = 'internal' -restart_ext = .true. -histfreq = 'd','1','x','x','x' -grid_type = 'rectangular' -kmt_type = 'wall' -dxrect = 16.e5 -dyrect = 16.e5 -close_boundaries = .false. -ew_boundary_type = 'cyclic' -ns_boundary_type = 'cyclic' -tr_iage = .false. -tr_FY = .false. -tr_lvl = .false. -tr_pond_lvl = .false. -ktherm = -1 -kstrength = 0 -kdyn = 1 -kridge = -1 -ktransport = -1 -coriolis = 'zero' -atmbndy = 'constant' -atm_data_type = 'uniform_east' -ocn_data_type = 'calm' -ice_data_type = 'easthalf' -ice_data_conc = 'p5' -ice_data_dist = 'uniform' -calc_strair = .false. -rotate_wind = .false. -restore_ice = .false. -f_aice = 'd1' -f_hi = 'd1' -f_hs = 'd' -f_Tsfc = 'd' -f_uvel = 'd1' -f_vvel = 'd1' -f_uatm = 'd' -f_vatm = 'd' -f_uocn = 'd' -f_vocn = 'd' -f_strairx = 'd1' -f_strairy = 'd1' -f_strtltx = 'd1' -f_strtlty = 'd1' -f_strcorx = 'd1' -f_strcory = 'd1' -f_strocnx = 'd1' -f_strocny = 'd1' -f_strintx = 'd1' -f_strinty = 'd1' -f_taubx = 'd1' -f_tauby = 'd1' -f_divu = 'd1' -f_sig1 = 'd1' -f_sig2 = 'd1' -f_sigP = 'd1' diff --git a/configuration/scripts/options/set_nml.dynanderson b/configuration/scripts/options/set_nml.dynanderson index 2e8e13659..2306ac918 100644 --- a/configuration/scripts/options/set_nml.dynanderson +++ b/configuration/scripts/options/set_nml.dynanderson @@ -1,5 +1,5 @@ kdyn = 3 algo_nonlin = 'anderson' use_mean_vrel = .false. -capping = 1. +capping_method = 'max' diff --git a/configuration/scripts/options/set_nml.dynpicard b/configuration/scripts/options/set_nml.dynpicard index 05efb3526..6f32c5153 100644 --- a/configuration/scripts/options/set_nml.dynpicard +++ b/configuration/scripts/options/set_nml.dynpicard @@ -1,4 +1,4 @@ kdyn = 3 algo_nonlin = 'picard' use_mean_vrel = .true. -capping = 1. +capping_method = 'max' diff --git a/configuration/scripts/options/set_nml.gbox12 b/configuration/scripts/options/set_nml.gbox12 index 6ad5b567b..96701522f 100644 --- a/configuration/scripts/options/set_nml.gbox12 +++ b/configuration/scripts/options/set_nml.gbox12 @@ -4,5 +4,5 @@ kmt_type = 'default' atm_data_type = 'box2001' ocn_data_type = 'calm' ice_data_type = 'box2001' -ice_data_conc = 'p5' +ice_data_conc = 'box2001' ice_data_dist = 'box2001' diff --git a/configuration/scripts/options/set_nml.gbox128 b/configuration/scripts/options/set_nml.gbox128 index f82267c64..275c02607 100644 --- a/configuration/scripts/options/set_nml.gbox128 +++ b/configuration/scripts/options/set_nml.gbox128 @@ -5,6 +5,6 @@ kmt_type = 'default' atm_data_type = 'box2001' ocn_data_type = 'box2001' ice_data_type = 'box2001' -ice_data_conc = 'p5' +ice_data_conc = 'box2001' ice_data_dist = 'box2001' diff --git a/configuration/scripts/options/set_nml.gbox180 b/configuration/scripts/options/set_nml.gbox180 index 6ad5b567b..96701522f 100644 --- a/configuration/scripts/options/set_nml.gbox180 +++ b/configuration/scripts/options/set_nml.gbox180 @@ -4,5 +4,5 @@ kmt_type = 'default' atm_data_type = 'box2001' ocn_data_type = 'calm' ice_data_type = 'box2001' -ice_data_conc = 'p5' +ice_data_conc = 'box2001' ice_data_dist = 'box2001' diff --git a/configuration/scripts/options/set_nml.gbox80 b/configuration/scripts/options/set_nml.gbox80 index 6ad5b567b..96701522f 100644 --- a/configuration/scripts/options/set_nml.gbox80 +++ b/configuration/scripts/options/set_nml.gbox80 @@ -4,5 +4,5 @@ kmt_type = 'default' atm_data_type = 'box2001' ocn_data_type = 'calm' ice_data_type = 'box2001' -ice_data_conc = 'p5' +ice_data_conc = 'box2001' ice_data_dist = 'box2001' diff --git a/configuration/scripts/tests/base_suite.ts b/configuration/scripts/tests/base_suite.ts index 858961eac..d4bbe8031 100644 --- a/configuration/scripts/tests/base_suite.ts +++ b/configuration/scripts/tests/base_suite.ts @@ -21,6 +21,7 @@ restart gx3 12x2 alt03,maskhalo,droundrobin restart gx3 4x4 alt04 restart gx3 4x4 alt05 restart gx3 8x2 alt06 +restart gx3 8x3 alt07 restart gx3 18x2 debug,maskhalo restart gx3 6x2 alt01,debug,short restart gx3 8x2 alt02,debug,short @@ -29,6 +30,7 @@ smoke gx3 12x2 alt03,debug,short,maskhalo,droundrobin smoke gx3 4x4 alt04,debug,short smoke gx3 4x4 alt05,debug,short smoke gx3 8x2 alt06,debug,short +smoke gx3 8x3 alt07,debug,short smoke gx3 10x2 debug,diag1,run5day,gx3sep2 smoke gx3 7x2x5x29x12 diag1,bigdiag,run1day,debug restart gbox128 4x2 short diff --git a/configuration/scripts/tests/gridsys_suite.ts b/configuration/scripts/tests/gridsys_suite.ts index 6909c1ac9..10d8ae36c 100644 --- a/configuration/scripts/tests/gridsys_suite.ts +++ b/configuration/scripts/tests/gridsys_suite.ts @@ -5,8 +5,6 @@ restart gx3 4x2 debug,diag1 restart2 gx1 16x2 debug,diag1 smoke gbox12 1x1x12x12x1 boxchan smoke gbox80 1x1 box2001 -smoke gbox80 2x2 boxwallp5 -smoke gbox80 3x3 boxwall smoke gbox80 2x2 boxwallblock smoke gbox80 1x1 boxslotcyl smoke gbox80 2x4 boxnodyn @@ -34,8 +32,6 @@ restart gx3 4x2 debug,diag1,gridcd restart2 gx1 16x2 debug,diag1,gridcd smoke gbox12 1x1x12x12x1 boxchan,gridcd smoke gbox80 1x1 box2001,gridcd -smoke gbox80 2x2 boxwallp5,gridcd -smoke gbox80 3x3 boxwall,gridcd smoke gbox80 2x2 boxwallblock,gridcd smoke gbox80 1x1 boxslotcyl,gridcd smoke gbox80 2x4 boxnodyn,gridcd @@ -63,8 +59,6 @@ restart gx3 4x2 debug,diag1,gridc restart2 gx1 16x2 debug,diag1,gridc smoke gbox12 1x1x12x12x1 boxchan,gridc smoke gbox80 1x1 box2001,gridc -smoke gbox80 2x2 boxwallp5,gridc -smoke gbox80 3x3 boxwall,gridc smoke gbox80 2x2 boxwallblock,gridc smoke gbox80 1x1 boxslotcyl,gridc smoke gbox80 2x4 boxnodyn,gridc diff --git a/configuration/scripts/tests/io_suite.ts b/configuration/scripts/tests/io_suite.ts index 4d5129578..01ea21ec2 100644 --- a/configuration/scripts/tests/io_suite.ts +++ b/configuration/scripts/tests/io_suite.ts @@ -8,6 +8,7 @@ restart gx3 16x2 gx3ncarbulk,alt02,histall,iobinary,precision8 restart gx3 8x4 gx3ncarbulk,alt04,histall,iobinary,precision8 restart gx3 4x4 gx3ncarbulk,alt05,histall,iobinary restart gx3 14x2 gx3ncarbulk,alt06,histall,iobinary,precision8 +restart gx3 14x2 gx3ncarbulk,alt07,histall,iobinary,precision8 restart gx3 32x1 gx3ncarbulk,bgcz,histall,iobinary,precision8 restart gx3 16x2 gx3ncarbulk,bgcskl,histall,iobinary restart gx3 14x2 gx3ncarbulk,isotope,histall,iobinary,precision8 @@ -21,6 +22,7 @@ restart gx3 24x1 alt03,histall,ionetcdf,precision8 restart gx3 8x4 alt04,histall,ionetcdf,cdf64 restart gx3 8x4 alt05,histall,ionetcdf,precision8,cdf64 restart gx3 16x2 alt06,histall,ionetcdf +restart gx3 16x2 alt07,histall,ionetcdf restart gx3 30x1 bgcz,histall,ionetcdf restart gx3 15x2 bgcskl,histall,ionetcdf,precision8 restart gx3 31x1 isotope,histall,ionetcdf,cdf64 @@ -34,6 +36,7 @@ restart gx3 24x1 alt03,histall,iopio1 restart gx3 8x4 alt04,histall,iopio1,precision8,cdf64 restart gx3 8x4 alt05,histall,iopio1,cdf64 restart gx3 32x1 alt06,histall,iopio1,precision8 +restart gx3 32x1 alt07,histall,iopio1,precision8 restart gx3 16x2 bgcz,histall,iopio1,precision8 restart gx3 30x1 bgcskl,histall,iopio1 restart gx3 8x4 isotope,histall,iopio1,precision8,cdf64 @@ -47,6 +50,7 @@ restart gx3 24x1 alt03,histall,iopio2,precision8 restart gx3 8x4 alt04,histall,iopio2 restart gx3 8x4 alt05,histall,iopio2,precision8,cdf64 restart gx3 16x2 alt06,histall,iopio2,cdf64 +restart gx3 16x2 alt07,histall,iopio2,cdf64 restart gx3 16x2 bgcz,histall,iopio2,cdf64 restart gx3 30x1 bgcskl,histall,iopio2,precision8 restart gx3 8x4 isotope,histall,iopio2 @@ -60,6 +64,7 @@ restart gx3 24x1 alt03,histall,iopio1p,cdf64 restart gx3 8x4 alt04,histall,iopio1p,precision8 restart gx3 8x4 alt05,histall,iopio1p restart gx3 6x4 alt06,histall,iopio1p,precision8,cdf64 +restart gx3 6x4 alt07,histall,iopio1p,precision8,cdf64 restart gx3 16x2 bgcz,histall,iopio1p,precision8,cdf64 restart gx3 30x1 bgcskl,histall,iopio1p,cdf64 restart gx3 8x4 isotope,histall,iopio1p,precision8 @@ -73,6 +78,7 @@ restart gx3 24x1 alt03,histall,iopio2p,precision8,cdf64 restart gx3 8x4 alt04,histall,iopio2p,cdf64 restart gx3 8x4 alt05,histall,iopio2p,precision8 restart gx3 24x1 alt06,histall,iopio2p +restart gx3 24x1 alt07,histall,iopio2p restart gx3 16x2 bgcz,histall,iopio2p restart gx3 30x1 bgcskl,histall,iopio2p,precision8,cdf64 restart gx3 8x4 isotope,histall,iopio2p,cdf64 diff --git a/configuration/scripts/tests/nothread_suite.ts b/configuration/scripts/tests/nothread_suite.ts index 12fd03662..93839b000 100644 --- a/configuration/scripts/tests/nothread_suite.ts +++ b/configuration/scripts/tests/nothread_suite.ts @@ -22,12 +22,14 @@ restart gx3 8x1 alt03 restart gx3 16x1x5x29x6 alt04 restart gx3 16x1 alt05 restart gx3 20x1 alt06 +restart gx3 18x1 alt07 restart gx3 18x1 alt01,debug,short restart gx3 20x1 alt02,debug,short restart gx3 24x1 alt03,debug,short smoke gx3 24x1 alt04,debug,short smoke gx3 32x1 alt05,debug,short smoke gx3 16x1 alt06,debug,short +smoke gx3 20x1 alt07,debug,short restart gx3 16x1 isotope smoke gx3 6x1 isotope,debug smoke gx3 8x1 fsd1,diag24,run5day,debug diff --git a/configuration/scripts/tests/omp_suite.ts b/configuration/scripts/tests/omp_suite.ts index ea8680170..937a3ec90 100644 --- a/configuration/scripts/tests/omp_suite.ts +++ b/configuration/scripts/tests/omp_suite.ts @@ -7,6 +7,7 @@ smoke gx3 12x2 alt03,droundrobin,reprosum,run10day smoke gx3 4x4 alt04,reprosum,run10day smoke gx3 4x4 alt05,reprosum,run10day smoke gx3 8x2 alt06,reprosum,run10day +smoke gx3 7x2 alt07,reprosum,run10day smoke gx3 8x2 bgcz,reprosum,run10day smoke gx1 15x2 reprosum,run10day smoke gx1 15x2 seabedprob,reprosum,run10day @@ -32,6 +33,7 @@ smoke gx3 8x1 alt03,reprosum,run10day,cmplogrest,thread smoke gx3 8x1 alt04,reprosum,run10day,cmplogrest,thread smoke_gx3_4x4_alt04_reprosum_run10day smoke gx3 8x1 alt05,reprosum,run10day,cmplogrest,thread smoke_gx3_4x4_alt05_reprosum_run10day smoke gx3 8x1 alt06,reprosum,run10day,cmplogrest,thread smoke_gx3_8x2_alt06_reprosum_run10day +smoke gx3 8x1 alt07,reprosum,run10day,cmplogrest,thread smoke_gx3_7x2_alt07_reprosum_run10day smoke gx3 8x1 bgcz,reprosum,run10day,cmplogrest,thread smoke_gx3_8x2_bgcz_reprosum_run10day smoke gx1 18x1 reprosum,run10day,cmplogrest,thread smoke_gx1_15x2_reprosum_run10day smoke gx1 18x1 seabedprob,reprosum,run10day,cmplogrest,thread smoke_gx1_15x2_reprosum_run10day_seabedprob @@ -59,6 +61,7 @@ smoke gx3 8x2 alt02,reprosum,run10day,gridc smoke gx3 4x4 alt04,reprosum,run10day,gridc smoke gx3 4x4 alt05,reprosum,run10day,gridc smoke gx3 8x2 alt06,reprosum,run10day,gridc +smoke gx3 7x2 alt07,reprosum,run10day,gridc smoke gx3 8x2 bgcz,reprosum,run10day,gridc smoke gx1 15x2 reprosum,run10day,gridc smoke gx1 15x2 seabedprob,reprosum,run10day,gridc @@ -84,6 +87,7 @@ smoke gx3 8x1 alt02,reprosum,run10day,cmplogrest,thread,grid smoke gx3 8x1 alt04,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_4x4_alt04_gridc_reprosum_run10day smoke gx3 8x1 alt05,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_4x4_alt05_gridc_reprosum_run10day smoke gx3 8x1 alt06,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x2_alt06_gridc_reprosum_run10day +smoke gx3 8x1 alt07,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_7x2_alt07_gridc_reprosum_run10day smoke gx3 8x1 bgcz,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x2_bgcz_gridc_reprosum_run10day smoke gx1 18x1 reprosum,run10day,cmplogrest,thread,gridc smoke_gx1_15x2_gridc_reprosum_run10day smoke gx1 18x1 seabedprob,reprosum,run10day,cmplogrest,thread,gridc smoke_gx1_15x2_gridc_reprosum_run10day_seabedprob @@ -111,6 +115,7 @@ smoke gx3 8x2 alt02,reprosum,run10day,gridcd smoke gx3 4x4 alt04,reprosum,run10day,gridcd smoke gx3 4x4 alt05,reprosum,run10day,gridcd smoke gx3 8x2 alt06,reprosum,run10day,gridcd +smoke gx3 7x2 alt07,reprosum,run10day,gridcd smoke gx3 8x2 bgcz,reprosum,run10day,gridcd smoke gx1 15x2 reprosum,run10day,gridcd smoke gx1 15x2 seabedprob,reprosum,run10day,gridcd @@ -136,6 +141,7 @@ smoke gx3 8x1 alt02,reprosum,run10day,cmplogrest,thread,grid smoke gx3 8x1 alt04,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_4x4_alt04_gridcd_reprosum_run10day smoke gx3 8x1 alt05,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_4x4_alt05_gridcd_reprosum_run10day smoke gx3 8x1 alt06,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x2_alt06_gridcd_reprosum_run10day +smoke gx3 8x1 alt07,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_7x2_alt07_gridcd_reprosum_run10day smoke gx3 8x1 bgcz,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x2_bgcz_gridcd_reprosum_run10day smoke gx1 18x1 reprosum,run10day,cmplogrest,thread,gridcd smoke_gx1_15x2_gridcd_reprosum_run10day smoke gx1 18x1 seabedprob,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx1_15x2_gridcd_reprosum_run10day_seabedprob diff --git a/doc/source/cice_index.rst b/doc/source/cice_index.rst index c17938d59..df8e4616b 100644 --- a/doc/source/cice_index.rst +++ b/doc/source/cice_index.rst @@ -95,7 +95,8 @@ either Celsius or Kelvin units). "calc_dragio", "if true, calculate ``dragio`` from ``iceruf_ocn`` and ``thickness_ocn_layer1``", "F" "calc_strair", "if true, calculate wind stress", "T" "calc_Tsfc", "if true, calculate surface temperature", "T" - "capping", "parameter for capping method of viscosities", "1.0" + "capping", "parameter associated with capping method of viscosities", "1.0" + "capping_method", "namelist to specify capping method", "hibler" "Cdn_atm", "atmospheric drag coefficient", "" "Cdn_ocn", "ocean drag coefficient", "" "Cf", "ratio of ridging work to PE change in ridging", "17." @@ -188,17 +189,16 @@ either Celsius or Kelvin units). "dumpfreq_n", "restart output frequency", "" "dump_last", "if true, write restart on last time step of simulation", "" "dwavefreq", "widths of wave frequency bins", "1/s" - "dxe", "width of E cell (:math:`\Delta x`) through the middle", "m" + "dxE", "width of E cell (:math:`\Delta x`) through the middle", "m" + "dxN", "width of N cell (:math:`\Delta x`) through the middle", "m" + "dxT", "width of T cell (:math:`\Delta x`) through the middle", "m" + "dxU", "width of U cell (:math:`\Delta x`) through the middle", "m" "dxhy", "combination of HTE values", "" - "dxn", "width of N cell (:math:`\Delta x`) through the middle", "m" - "dxt", "width of T cell (:math:`\Delta x`) through the middle", "m" - "dxu", "width of U cell (:math:`\Delta x`) through the middle", "m" - "dye", "height of E cell (:math:`\Delta y`) through the middle", "m" + "dyE", "height of E cell (:math:`\Delta y`) through the middle", "m" + "dyN", "height of N cell (:math:`\Delta y`) through the middle", "m" + "dyT", "height of T cell (:math:`\Delta y`) through the middle", "m" + "dyU", "height of U cell (:math:`\Delta y`) through the middle", "m" "dyhx", "combination of HTN values", "" - "dyn", "height of N cell (:math:`\Delta y`) through the middle", "m" - "dyn_dt", "dynamics and transport time step (:math:`\Delta t_{dyn}`)", "s" - "dyt", "height of T cell (:math:`\Delta y`) through the middle", "m" - "dyu", "height of U cell (:math:`\Delta y`) through the middle", "m" "dvidtd", "ice volume tendency due to dynamics/transport", "m/s" "dvidtt", "ice volume tendency due to thermodynamics", "m/s" "dvirdg(n)dt", "ice volume ridging rate (category n)", "m/s" diff --git a/doc/source/science_guide/sg_dynamics.rst b/doc/source/science_guide/sg_dynamics.rst index 287de001e..241fa05fe 100644 --- a/doc/source/science_guide/sg_dynamics.rst +++ b/doc/source/science_guide/sg_dynamics.rst @@ -505,7 +505,7 @@ where \Delta = \left[D_D^2 + {e_f^2\over e_g^4}\left(D_T^2 + D_S^2\right)\right]^{1/2}. :label: Delta -When the deformation :math:`\Delta` tends toward zero, the viscosities tend toward infinity. To avoid this issue, :math:`\Delta` needs to be limited and is replaced by :math:`\Delta^*` in equation :eq:`zeta`. Two methods for limiting :math:`\Delta` (or for capping the viscosities) are available in the code. If the namelist parameter ``capping`` is set to 1., :math:`\Delta^*=max(\Delta, \Delta_{min})` :cite:`Hibler79` while with ``capping`` set to 0., the smoother formulation :math:`\Delta^*=(\Delta + \Delta_{min})` of :cite:`Kreyscher00` is used. +When the deformation :math:`\Delta` tends toward zero, the viscosities tend toward infinity. To avoid this issue, :math:`\Delta` needs to be limited and is replaced by :math:`\Delta^*` in equation :eq:`zeta`. Two methods for limiting :math:`\Delta` (or for capping the viscosities) are available in the code. If the namelist parameter ``capping_method`` is set to ``max``, :math:`\Delta^*=max(\Delta, \Delta_{min})` :cite:`Hibler79` while with ``capping_method`` set to ``sum``, the smoother formulation :math:`\Delta^*=(\Delta + \Delta_{min})` of :cite:`Kreyscher00` is used. The ice strength :math:`P` is a function of the ice thickness distribution as described in the `Icepack Documentation `_. diff --git a/doc/source/user_guide/ug_case_settings.rst b/doc/source/user_guide/ug_case_settings.rst index 5bf0ab6dc..6b10a2165 100644 --- a/doc/source/user_guide/ug_case_settings.rst +++ b/doc/source/user_guide/ug_case_settings.rst @@ -366,6 +366,7 @@ tracer_nml "``restart_pond_cesm``", "logical", "restart tracer values from file", "``.false.``" "``restart_pond_lvl``", "logical", "restart tracer values from file", "``.false.``" "``restart_pond_topo``", "logical", "restart tracer values from file", "``.false.``" + "``restart_snow``", "logical", "restart snow tracer values from file", "``.false.``" "", "", "", "" thermo_nml @@ -400,6 +401,16 @@ thermo_nml dynamics_nml ~~~~~~~~~~~~~~~~~~~~~~~~~ +.. + commented out + "``damping_andacc``", "integer", "damping factor for Anderson acceleration", "0" + "``dim_andacc``", "integer", "size of Anderson minimization matrix", "5" + "``fpfunc_andacc``", "``1``", "fix point function for Anderson acceleration, FMGRES(A(x),b(x))", "1" + "", "``2``", "fix point function for Anderson acceleration, x-A(x)x+b(x)", "" + "``reltol_andacc``", "real", "relative tolerance for Anderson acceleration", "1e-6" + "``start_andacc``", "integer", "acceleration delay factor for Anderson acceleration", "0" + commented out + .. csv-table:: **dynamics_nml namelist options** :header: "variable", "options/format", "description", "default value" :widths: 15, 15, 30, 15 @@ -412,9 +423,8 @@ dynamics_nml "``alphab``", "real", ":math:`\alpha_{b}` factor in :cite:`Lemieux16`", "20.0" "``arlx``", "real", "revised_evp value", "300.0" "``brlx``", "real", "revised_evp value", "300.0" - "``capping``", "real", "method for capping the viscosities", "1.0" - "", "``0``", "Kreyscher 2000", "" - "", "``1``", "Hibler 1979", "" + "``capping_method``", "``max``", "max capping in :cite:`Hibler79`", "max" + "", "``sum``", "sum capping in :cite:`Kreyscher00`", "" "``Cf``", "real", "ratio of ridging work to PE change in ridging", "17.0" "``coriolis``", "``constant``", "constant coriolis value = 1.46e-4 s\ :math:`^{-1}`", "``latitude``" "", "``latitude``", "coriolis variable by latitude", "" @@ -447,11 +457,11 @@ dynamics_nml "``Ktens``", "real", "Tensile strength factor (see :cite:`Konig10`)", "0.0" "``k1``", "real", "1st free parameter for landfast parameterization", "7.5" "``k2``", "real", "2nd free parameter (N/m\ :math:`^3`) for landfast parameterization", "15.0" - "``maxits_nonlin``", "integer", "maximum number of nonlinear iterations for VP solver", "1000" "``maxits_fgmres``", "integer", "maximum number of restarts for FGMRES solver", "1" + "``maxits_nonlin``", "integer", "maximum number of nonlinear iterations for VP solver", "1000" "``maxits_pgmres``", "integer", "maximum number of restarts for PGMRES preconditioner", "1" - "``monitor_nonlin``", "logical", "write velocity norm at each nonlinear iteration", "``.false.``" "``monitor_fgmres``", "logical", "write velocity norm at each FGMRES iteration", "``.false.``" + "``monitor_nonlin``", "logical", "write velocity norm at each nonlinear iteration", "``.false.``" "``monitor_pgmres``", "logical", "write velocity norm at each PGMRES iteration", "``.false.``" "``mu_rdg``", "real", "e-folding scale of ridged ice for ``krdg_partic`` = 1 in m^0.5", "3.0" "``ndte``", "integer", "number of EVP subcycles", "120" @@ -461,8 +471,8 @@ dynamics_nml "", "``ident``", "Don't use a preconditioner for the FGMRES solver", "" "", "``pgmres``", "Use GMRES as preconditioner for FGMRES solver", "" "``Pstar``", "real", "constant in Hibler strength formula (N/m\ :math:`^2`)", "2.75e4" - "``reltol_nonlin``", "real", "relative tolerance for nonlinear solver", "1e-8" "``reltol_fgmres``", "real", "relative tolerance for FGMRES solver", "1e-2" + "``reltol_nonlin``", "real", "relative tolerance for nonlinear solver", "1e-8" "``reltol_pgmres``", "real", "relative tolerance for PGMRES preconditioner", "1e-6" "``revised_evp``", "logical", "use revised EVP formulation", "``.false.``" "``seabed_stress``", "logical", "use seabed stress parameterization for landfast ice", "``.false.``" @@ -561,6 +571,12 @@ snow_nml forcing_nml ~~~~~~~~~~~~~~~~~~~~~~~~~ +.. + commented out + "``calc_dragio``", "logical", "compute dragio from iceruf_ocean and thickness of first ocean level, not supported", "``.false.``" + "``iceruf_ocn``", "real", "under ice roughness in meters, not supported", "0.03" + commented out + .. csv-table:: **forcing_nml namelist options** :header: "variable", "options/format", "description", "default value" :widths: 15, 15, 30, 15 @@ -601,7 +617,8 @@ forcing_nml "``formdrag``", "logical", "calculate form drag", "``.false.``" "``fyear_init``", "integer", "first year of atmospheric forcing data", "1900" "``highfreq``", "logical", "high-frequency atmo coupling", "``.false.``" - "``ice_data_conc``", "``c1``", "initial ice concentation of 1.0", "``default``" + "``ice_data_conc``", "``box2001``", "ice distribution ramped from 0 to 1 west to east consistent with :ref:`box2001` test (:cite:`Hunke01`)", "``default``" + "", "``c1``", "initial ice concentation of 1.0", "" "", "``default``", "same as parabolic", "" "", "``p5``", "initial concentration of 0.5", "" "", "``p8``", "initial concentration of 0.8", "" @@ -611,18 +628,15 @@ forcing_nml "", "``default``", "uniform distribution, equivalent to uniform", "" "", "``gauss``", "gauss distbution of ice with a peak in the center of the domain", "" "", "``uniform``", "uniform distribution, equivalent to default", "" - "``ice_data_type``", "``bigblock``", "ice mask covering about 90 percent of the area in center of domain", "``default``" - "", "``block``", "ice block covering about 25 percent of the area in center of domain", "" + "``ice_data_type``", "``block``", "ice block covering about 25 percent of the area in center of domain", "``default``" "", "``boxslotcyl``", "slot cylinder ice mask associated with :ref:`boxslotcyl` test (:cite:`Zalesak79`)", "" "", "``box2001``", "box2001 ice mask associate with :ref:`box2001` test (:cite:`Hunke01`)", "" "", "``channel``", "ice defined on entire grid in i-direction and 50% in j-direction in center of domain", "" "", "``default``", "same as latsst", "" "", "``eastblock``", "ice block covering about 25 percent of domain at the east edge of the domain", "" - "", "``easthalf``", "ice defined on east half of the domain","" "", "``latsst``", "ice dependent on latitude and ocean temperature", "" - "", "``smallblock``", "ice defined on 2x2 gridcells in center of domain", "" "", "``uniform``", "ice defined at all grid points", "" - "``iceruf``", "real", "ice surface roughness at atmosphere interface", "0.0005" + "``iceruf``", "real", "ice surface roughness at atmosphere interface in meters", "0.0005" "``l_mpond_fresh``", "``.false.``", "release pond water immediately to ocean", "``.false.``" "", "``true``", "retain (topo) pond water until ponds drain", "" "``natmiter``", "integer", "number of atmo boundary layer iterations", "5" @@ -648,7 +662,7 @@ forcing_nml "", "``minus1p8``", "constant ocean freezing temperature (:math:`-1.8^{\circ} C`)", "" "", "``mushy``", "matches mushy-layer thermo (ktherm=2)", "" "``trestore``", "integer", "sst restoring time scale (days)", "90" - "``ustar_min``", "real", "minimum value of ocean friction velocity", "0.0005 m/s" + "``ustar_min``", "real", "minimum value of ocean friction velocity in m/s", "0.0005" "``update_ocn_f``", "``.false.``", "do not include frazil water/salt fluxes in ocn fluxes", "``.false.``" "", "``true``", "include frazil water/salt fluxes in ocn fluxes", "" "``wave_spec_file``", "string", "data file containing wave spectrum forcing data", "" @@ -822,6 +836,17 @@ zbgc_nml icefields_nml ~~~~~~~~~~~~~~~~~~~~~~~~~ +There are several icefield namelist groups to control model history output. See the +source code for a full list of supported output fields. + +* ``icefields_nml`` is in **cicecore/cicedynB/analysis/ice_history_shared.F90** +* ``icefields_bgc_nml`` is in **cicecore/cicedynB/analysis/ice_history_bgc.F90** +* ``icefields_drag_nml`` is in **cicecore/cicedynB/analysis/ice_history_drag.F90** +* ``icefields_fsd_nml`` is in **cicecore/cicedynB/analysis/ice_history_fsd.F90** +* ``icefields_mechred_nml`` is in **cicecore/cicedynB/analysis/ice_history_mechred.F90** +* ``icefields_pond_nml`` is in **cicecore/cicedynB/analysis/ice_history_pond.F90** +* ``icefields_snow_nml`` is in **cicecore/cicedynB/analysis/ice_history_snow.F90** + .. csv-table:: **icefields_nml namelist options** :header: "variable", "options/format", "description", "default value" :widths: 15, 15, 30, 15 @@ -843,3 +868,4 @@ icefields_nml "", "``md``", "*e.g.,* write both monthly and daily files", "" "", "", "", "" + diff --git a/doc/source/user_guide/ug_implementation.rst b/doc/source/user_guide/ug_implementation.rst index 36799d68e..6f1a94a36 100644 --- a/doc/source/user_guide/ug_implementation.rst +++ b/doc/source/user_guide/ug_implementation.rst @@ -79,7 +79,7 @@ this tool. Grid, boundary conditions and masks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The spatial discretization of the original implementation was specialized +The spatial discretization of the original implementation is specialized for a generalized orthogonal B-grid as in :cite:`Murray96` or :cite:`Smith95`. Figure :ref:`fig-Bgrid` is a schematic of CICE B-grid. This cell with the tracer point :math:`t(i,j)` in the middle @@ -90,7 +90,7 @@ corner. The other corners of the T-cell are northwest (NW), southwest (SW) and southeast (SE). The lengths of the four edges of the T-cell are respectively HTN, HTW, HTS and HTE for the northern, western, southern and eastern edges. The lengths of the T-cell through the -middle are respectively dxt and dyt along the x and y axis. +middle are respectively dxT and dyT along the x and y axis. We also occasionally refer to “U-cells,” which are centered on the northeast corner of the corresponding T-cells and have velocity in the @@ -155,9 +155,9 @@ the primary prognostic grid variable names on the different grids. +----------------+-------+-------+-------+-------+ | latitude | TLAT | ULAT | NLAT | ELAT | +----------------+-------+-------+-------+-------+ - | dx | dxt | dxu | dxn | dxe | + | dx | dxT | dxU | dxN | dxE | +----------------+-------+-------+-------+-------+ - | dy | dyt | dyu | dyn | dye | + | dy | dyT | dyU | dyN | dyE | +----------------+-------+-------+-------+-------+ | area | tarea | uarea | narea | earea | +----------------+-------+-------+-------+-------+ @@ -893,9 +893,11 @@ Three namelist variables generally control model initialization, ``runtype``, are ``initial`` or ``continue``. When ``runtype`` = `continue`, the restart filename is stored in a small text (pointer) file, ``use_restart_time`` is forced to true and ``ice_ic`` plays no role. When ``runtype`` = -`initial`, ``ice_ic`` has three options, ``none``, ``default``, -or *filename*. These initial states are no-ice, latitudinal dependent -ice, and ice defined by a file respectively. In `initial` mode, +`initial`, ``ice_ic`` has three options, ``none``, ``internal``, +or *filename*. These initial states are no-ice, namelist driven initial +condition, and ice defined by a file respectively. If ``ice_ic`` is set +to ``internal``, the initial state is defined by the namelist values +``ice_data_type``, ``ice_data_dist``, and ``ice_data_conc``. In `initial` mode, ``use_restart_time`` should generally be set to false and the initial time is then defined by ``year_init``, ``month_init``, ``day_init``, and ``sec_init``. These combinations options are summarized in @@ -938,8 +940,8 @@ the model. If namelist defines the start date, it's done with | `initial` | `none` | not used | no ice, | | | | | namelist defines start date | +----------------+--------------------------+--------------------------------------+----------------------------------------+ - | `initial` | `default` | not used | latitude dependent internal ic, | - | | | | namelist defines start date | + | `initial` | `internal` or | not used | set by namelist ice_data_type, | + | | `default` | | ice_data_dist, ice_data_conc | +----------------+--------------------------+--------------------------------------+----------------------------------------+ | `initial` | *filename* | false | read ice state from filename, | | | | | namelist defines start date | From be8709bd7397289a26dec071df9605bb5e9ce38e Mon Sep 17 00:00:00 2001 From: JFLemieux73 <31927797+JFLemieux73@users.noreply.github.com> Date: Tue, 17 May 2022 21:46:01 +0000 Subject: [PATCH 2/4] Minor modif to doc: advice for value of ndte (#722) --- doc/source/master_list.bib | 12 ++++++++++++ doc/source/user_guide/ug_implementation.rst | 5 +---- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/doc/source/master_list.bib b/doc/source/master_list.bib index b563414d7..a2da9b9f8 100644 --- a/doc/source/master_list.bib +++ b/doc/source/master_list.bib @@ -1070,6 +1070,18 @@ @Article{Ringeisen21 url = {https://doi.org/10.5194/tc-15-2873-2021} } +@article{Bouchat22, +author = {Bouchat, Amelie and Hutter, Nils and Chanut, Jerome and Dupont, Frederic and Dukhovskoy, Dmitry and Garric, Gilles and Lee, Younjoo J. and Lemieux, Jean-Francois and Lique, Camille and Losch, Martin and Maslowski, Wieslaw and Myers, Paul G. and Olason, Einar and Rampal, Pierre and Rasmussen, Till and Talandier, Claude and Tremblay, Bruno and Wang, Qiang}, +title = {Sea Ice Rheology Experiment (SIREx): 1. Scaling and Statistical Properties of Sea-Ice Deformation Fields}, +journal = {Journal of Geophysical Research: Oceans}, +volume = {127}, +number = {4}, +pages = {e2021JC017667}, +doi = {https://doi.org/10.1029/2021JC017667}, +url = {https://agupubs.onlinelibrary.wiley.com/doi/abs/10.1029/2021JC017667}, +year = {2022} +} + @Article{Tsujino18, author = "H. Tsujino and S. Urakawa and R.J. Small and W.M. Kim and S.G. Yeager and et al.", title = "{JRA‐55 based surface dataset for driving ocean–sea‐ice models (JRA55‐do)}", diff --git a/doc/source/user_guide/ug_implementation.rst b/doc/source/user_guide/ug_implementation.rst index 6f1a94a36..500209326 100644 --- a/doc/source/user_guide/ug_implementation.rst +++ b/doc/source/user_guide/ug_implementation.rst @@ -1040,10 +1040,7 @@ stable as long as the subcycling time step :math:`\Delta t_e` sufficiently resolves the damping timescale :math:`T`. For the stability analysis we had to make several simplifications of the problem; hence the location of the boundary between stable and unstable regions is -merely an estimate. In practice, the ratio -:math:`\Delta t_e ~:~ T ~:~ \Delta t`  = 1 : 40 : 120 provides both -stability and acceptable efficiency for time steps (:math:`\Delta t`) on -the order of 1 hour. +merely an estimate. The current default parameters for the EVP and EAP are :math:`ndte=240` and :math:`E_\circ=0.36`. For high resolution applications, it is however recommended to increase the value of :math:`ndte` :cite:`Koldunov19`, :cite:`Bouchat22`. Note that only :math:`T` and :math:`\Delta t_e` figure into the stability of the dynamics component; :math:`\Delta t` does not. Although From 317b8623c36c85650233b8987222a6625aaa26f5 Mon Sep 17 00:00:00 2001 From: Philippe Blain Date: Thu, 26 May 2022 12:56:03 -0400 Subject: [PATCH 3/4] scripts: unset executable bit for 'makdep.c' and 'cice.settings' (#723) This allows tab-completion for './cice.s[TAB]' to unambiguously complete to './cice.submit'. 'cice.settings' is designed to be sourced anyway, so it makes little sense for it to be executable. While at it, also unset the bit for 'makdep.c'. --- configuration/scripts/cice.settings | 0 configuration/scripts/makdep.c | 0 2 files changed, 0 insertions(+), 0 deletions(-) mode change 100755 => 100644 configuration/scripts/cice.settings mode change 100755 => 100644 configuration/scripts/makdep.c diff --git a/configuration/scripts/cice.settings b/configuration/scripts/cice.settings old mode 100755 new mode 100644 diff --git a/configuration/scripts/makdep.c b/configuration/scripts/makdep.c old mode 100755 new mode 100644 From c334aee2e27e27767a1758a0acf1a12fa69c8565 Mon Sep 17 00:00:00 2001 From: Philippe Blain Date: Thu, 2 Jun 2022 11:26:36 -0400 Subject: [PATCH 4/4] machines: eccc: update our machine files (#725) A few notes: - We do need to request memory even on machines where we have excluseive node access. 20 GB was chosen rather arbitrarily. - We set umask to 022 to make the .o and .e files readable by group and others. - We use the minimal SSM package for the compiler and Intel MPI, but we keep the setup using environment modules commented if ever we need to weak things (i.e. I_MPI_LIBRARY_KIND) - We set OMP_STACKSIZE. Since d1e972a (Update OMP (CICE-Consortium#680), 2022-02-18), OpenMP threading is active in 'ice_transport_remap.F90', and the default OpenMP stack size needs to be adjusted to avoid stack overflows. We set it to a 64 Mb size as used for other machines. Also, remove dead code setting 'CICE_ACCT'. This variable was last used in 98e03077 (Update scripts, rename variables from CICE_ to ICE_ to be more reusable in icepack., 2017-09-15), and so did not do anything for any of the machines that were using it after that commit. Remove code in machines env files that was setting it based on '~/.cice_proj'. A few notes specific to 'gpsc3': - Since we use an '--export' directive to choose which environment variables are exported to the job environment by SLURM, SSMUSE_BASE and SSMUSE_PATH are not present in the environnement and loading domains without their full paths fails on csh, so use a full path. - We use the compiler package from main/opt instead of eccc/all/opt since we do not need the EC-specific variables to be set (and it also leads to job failures since BASE_ARCH is not defined). --- configuration/scripts/cice.batch.csh | 37 ++++++++++++++ configuration/scripts/cice.launch.csh | 37 ++++++++++++++ ...cros.millikan_intel => Macros.gpsc3_intel} | 33 +++++-------- ...{Macros.cesium_intel => Macros.ppp3_intel} | 33 +++++-------- .../scripts/machines/Macros.ppp5_intel | 49 +++++++++++++++++++ .../scripts/machines/Macros.ppp6_gnu | 44 +++++++++++++++++ .../scripts/machines/Macros.ppp6_gnu-impi | 44 +++++++++++++++++ .../scripts/machines/Macros.ppp6_intel | 49 +++++++++++++++++++ .../scripts/machines/Macros.ppp6_intel19 | 49 +++++++++++++++++++ .../scripts/machines/Macros.robert_intel | 49 +++++++++++++++++++ .../scripts/machines/Macros.underhill_intel | 49 +++++++++++++++++++ .../scripts/machines/env.cesium_intel | 30 ------------ configuration/scripts/machines/env.fram_intel | 5 -- .../scripts/machines/env.gpsc3_intel | 37 ++++++++++++++ .../scripts/machines/env.millikan_intel | 30 ------------ configuration/scripts/machines/env.ppp3_intel | 35 +++++++++++++ configuration/scripts/machines/env.ppp5_intel | 38 ++++++++++++++ configuration/scripts/machines/env.ppp6_gnu | 29 +++++++++++ .../scripts/machines/env.ppp6_gnu-impi | 38 ++++++++++++++ configuration/scripts/machines/env.ppp6_intel | 38 ++++++++++++++ .../scripts/machines/env.ppp6_intel19 | 39 +++++++++++++++ .../scripts/machines/env.robert_intel | 39 +++++++++++++++ .../scripts/machines/env.underhill_intel | 39 +++++++++++++++ 23 files changed, 761 insertions(+), 109 deletions(-) rename configuration/scripts/machines/{Macros.millikan_intel => Macros.gpsc3_intel} (55%) rename configuration/scripts/machines/{Macros.cesium_intel => Macros.ppp3_intel} (55%) create mode 100644 configuration/scripts/machines/Macros.ppp5_intel create mode 100644 configuration/scripts/machines/Macros.ppp6_gnu create mode 100644 configuration/scripts/machines/Macros.ppp6_gnu-impi create mode 100644 configuration/scripts/machines/Macros.ppp6_intel create mode 100644 configuration/scripts/machines/Macros.ppp6_intel19 create mode 100644 configuration/scripts/machines/Macros.robert_intel create mode 100644 configuration/scripts/machines/Macros.underhill_intel delete mode 100755 configuration/scripts/machines/env.cesium_intel create mode 100644 configuration/scripts/machines/env.gpsc3_intel delete mode 100755 configuration/scripts/machines/env.millikan_intel create mode 100644 configuration/scripts/machines/env.ppp3_intel create mode 100644 configuration/scripts/machines/env.ppp5_intel create mode 100644 configuration/scripts/machines/env.ppp6_gnu create mode 100644 configuration/scripts/machines/env.ppp6_gnu-impi create mode 100644 configuration/scripts/machines/env.ppp6_intel create mode 100644 configuration/scripts/machines/env.ppp6_intel19 create mode 100644 configuration/scripts/machines/env.robert_intel create mode 100644 configuration/scripts/machines/env.underhill_intel diff --git a/configuration/scripts/cice.batch.csh b/configuration/scripts/cice.batch.csh index 7a1334532..43ce00010 100755 --- a/configuration/scripts/cice.batch.csh +++ b/configuration/scripts/cice.batch.csh @@ -250,8 +250,45 @@ cat >> ${jobfile} << EOFB #PBS -j oe #PBS -l select=${nnodes}:ncpus=${corespernode}:mpiprocs=${taskpernodelimit}:ompthreads=${nthrds} #PBS -l walltime=${batchtime} +#PBS -W umask=022 EOFB +else if (${ICE_MACHINE} =~ robert* || ${ICE_MACHINE} =~ underhill* || ${ICE_MACHINE} =~ ppp6* || ${ICE_MACHINE} =~ ppp5*) then +cat >> ${jobfile} << EOFB +#PBS -N ${ICE_CASENAME} +#PBS -j oe +#PBS -l select=${nnodes}:ncpus=${corespernode}:mpiprocs=${taskpernodelimit}:ompthreads=${nthrds}:mem=20gb +#PBS -l walltime=${batchtime} +#PBS -W umask=022 +EOFB + +else if (${ICE_MACHINE} =~ ppp3*) then +cat >> ${jobfile} << EOFB +#PBS -N ${ICE_CASENAME} +#PBS -j oe +#PBS -l select=${nnodes}:ncpus=${corespernode}:mpiprocs=${taskpernodelimit}:ompthreads=${nthrds}:mem=20gb:res_tmpfs=1000:res_image=eccc/eccc_all_ppp_ubuntu-18.04-amd64_latest +#PBS -l walltime=${batchtime} +#PBS -W umask=022 +#PBS -q development +#PBS -o ${ICE_CASEDIR} +#PBS -S /bin/csh +EOFB + +else if (${ICE_MACHINE} =~ gpsc3*) then +cat >> ${jobfile} << EOFB +#SBATCH --export=USER,LOGNAME,HOME,MAIL,PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin +#SBATCH -J ${ICE_CASENAME} +#SBATCH -A ${acct} +#SBATCH --partition ${queue} +#SBATCH --time ${batchtime} +#SBATCH --nodes ${nnodes} +#SBATCH --ntasks ${ntasks} +#SBATCH --cpus-per-task ${nthrds} +#SBATCH --mem-per-cpu=5G +#SBATCH --comment="image=eccc/eccc_all_default_ubuntu-18.04-amd64_latest" +EOFB + + else if (${ICE_MACHINE} =~ freya* ) then cat >> ${jobfile} << EOFB #PBS -N ${ICE_CASENAME} diff --git a/configuration/scripts/cice.launch.csh b/configuration/scripts/cice.launch.csh index 9a557ec44..a4b6ca37d 100755 --- a/configuration/scripts/cice.launch.csh +++ b/configuration/scripts/cice.launch.csh @@ -159,6 +159,43 @@ aprun -n ${ntasks} -N ${taskpernodelimit} -d ${nthrds} ./cice >&! \$ICE_RUNLOG_F EOFR endif +#======= +else if (${ICE_MACHINE} =~ ppp5* || ${ICE_MACHINE} =~ ppp6* || ${ICE_MACHINE} =~ robert* || ${ICE_MACHINE} =~ underhill*) then +if (${ICE_COMMDIR} =~ serial*) then +cat >> ${jobfile} << EOFR +./cice >&! \$ICE_RUNLOG_FILE +EOFR +else +cat >> ${jobfile} << EOFR +mpirun -np ${ntasks} ./cice >&! \$ICE_RUNLOG_FILE +EOFR +endif + +#======= +else if (${ICE_MACHINE} =~ ppp3*) then +if (${ICE_COMMDIR} =~ serial*) then +cat >> ${jobfile} << EOFR +./cice >&! \$ICE_RUNLOG_FILE +EOFR +else +cat >> ${jobfile} << EOFR +rumpirun -np ${ntasks} ./cice >&! \$ICE_RUNLOG_FILE +EOFR +endif + +#======= +else if (${ICE_MACHINE} =~ gpsc3*) then +if (${ICE_COMMDIR} =~ serial*) then +cat >> ${jobfile} << EOFR +./cice >&! \$ICE_RUNLOG_FILE +EOFR +else +cat >> ${jobfile} << EOFR +mpirun -np ${ntasks} ./cice >&! \$ICE_RUNLOG_FILE +EOFR +endif + + #======= else if (${ICE_MACHINE} =~ freya*) then if (${ICE_COMMDIR} =~ serial*) then diff --git a/configuration/scripts/machines/Macros.millikan_intel b/configuration/scripts/machines/Macros.gpsc3_intel similarity index 55% rename from configuration/scripts/machines/Macros.millikan_intel rename to configuration/scripts/machines/Macros.gpsc3_intel index 4a3b21093..916996617 100644 --- a/configuration/scripts/machines/Macros.millikan_intel +++ b/configuration/scripts/machines/Macros.gpsc3_intel @@ -1,5 +1,5 @@ #============================================================================== -# Makefile macros for "millikan" +# Makefile macros for "gpsc3" #============================================================================== # For use with intel compiler #============================================================================== @@ -11,12 +11,12 @@ CFLAGS := -c -O2 -fp-model precise FIXEDFLAGS := -132 FREEFLAGS := -FR -FFLAGS := -fp-model source -convert big_endian -assume byterecl -ftz -traceback -no-wrap-margin +FFLAGS := -fp-model source -convert big_endian -assume byterecl -ftz -traceback -diag-disable 5140 -no-wrap-margin #-xHost -FFLAGS_NOOPT:= -O0 ifeq ($(ICE_BLDDEBUG), true) - FFLAGS += -O0 -g -check -fpe0 -ftrapuv -fp-model except -check noarg_temp_created -init=snan,arrays + FFLAGS += -O0 -g -check -fpe0 -ftrapuv -fp-model except -check noarg_temp_created +#-init=snan,arrays # -heap-arrays 1024 else FFLAGS += -O2 @@ -36,25 +36,14 @@ else endif LD:= $(FC) -NETCDF_PATH := /fs/ssm/hpco/tmp/eccc/201402/04/intel-2016.1.150/ubuntu-14.04-amd64-64/ - -PIO_CONFIG_OPTS:= --enable-filesystem-hints=gpfs - -#PNETCDF_PATH := $(PNETCDF) -#PNETCDF_PATH := /glade/u/apps/ch/opt/pio/2.2/mpt/2.15f/intel/17.0.1/lib - -INCLDIR := $(INCLDIR) - -LIB_NETCDF := $(NETCDF_PATH)/lib -LIB_PNETCDF := $(PNETCDF_PATH)/lib -LIB_MPI := $(IMPILIBDIR) - -#SLIBS := -L$(LIB_NETCDF) -lnetcdff -lnetcdf -L$(LIB_PNETCDF) -lpnetcdf -lgptl -SLIBS := -L$(LIB_NETCDF) -lnetcdff -lnetcdf +ifeq ($(ICE_IOTYPE), netcdf) + INCLDIR += $(shell nf-config --fflags) + SLIBS := $(shell nf-config --flibs) +endif ifeq ($(ICE_THREADED), true) - LDFLAGS += -openmp - CFLAGS += -openmp - FFLAGS += -openmp + LDFLAGS += -qopenmp + CFLAGS += -qopenmp + FFLAGS += -qopenmp endif diff --git a/configuration/scripts/machines/Macros.cesium_intel b/configuration/scripts/machines/Macros.ppp3_intel similarity index 55% rename from configuration/scripts/machines/Macros.cesium_intel rename to configuration/scripts/machines/Macros.ppp3_intel index 2ad3ff1f3..143445478 100644 --- a/configuration/scripts/machines/Macros.cesium_intel +++ b/configuration/scripts/machines/Macros.ppp3_intel @@ -1,5 +1,5 @@ #============================================================================== -# Makefile macros for "cesium" +# Makefile macros for "ppp3" #============================================================================== # For use with intel compiler #============================================================================== @@ -11,12 +11,12 @@ CFLAGS := -c -O2 -fp-model precise FIXEDFLAGS := -132 FREEFLAGS := -FR -FFLAGS := -fp-model source -convert big_endian -assume byterecl -ftz -traceback -no-wrap-margin +FFLAGS := -fp-model source -convert big_endian -assume byterecl -ftz -traceback -diag-disable 5140 -no-wrap-margin #-xHost -FFLAGS_NOOPT:= -O0 ifeq ($(ICE_BLDDEBUG), true) - FFLAGS += -O0 -g -check -fpe0 -ftrapuv -fp-model except -check noarg_temp_created -init=snan,arrays + FFLAGS += -O0 -g -check -fpe0 -ftrapuv -fp-model except -check noarg_temp_created +#-init=snan,arrays # -heap-arrays 1024 else FFLAGS += -O2 @@ -36,25 +36,14 @@ else endif LD:= $(FC) -NETCDF_PATH := /fs/ssm/hpco/tmp/eccc/201402/04/intel-2016.1.150/ubuntu-14.04-amd64-64/ - -PIO_CONFIG_OPTS:= --enable-filesystem-hints=gpfs - -#PNETCDF_PATH := $(PNETCDF) -#PNETCDF_PATH := /glade/u/apps/ch/opt/pio/2.2/mpt/2.15f/intel/17.0.1/lib - -INCLDIR := $(INCLDIR) - -LIB_NETCDF := $(NETCDF_PATH)/lib -LIB_PNETCDF := $(PNETCDF_PATH)/lib -LIB_MPI := $(IMPILIBDIR) - -#SLIBS := -L$(LIB_NETCDF) -lnetcdff -lnetcdf -L$(LIB_PNETCDF) -lpnetcdf -lgptl -SLIBS := -L$(LIB_NETCDF) -lnetcdff -lnetcdf -llapack -lblas +ifeq ($(ICE_IOTYPE), netcdf) + INCLDIR += $(shell nf-config --fflags) + SLIBS := $(shell nf-config --flibs) +endif ifeq ($(ICE_THREADED), true) - LDFLAGS += -openmp - CFLAGS += -openmp - FFLAGS += -openmp + LDFLAGS += -qopenmp + CFLAGS += -qopenmp + FFLAGS += -qopenmp endif diff --git a/configuration/scripts/machines/Macros.ppp5_intel b/configuration/scripts/machines/Macros.ppp5_intel new file mode 100644 index 000000000..0a45703b9 --- /dev/null +++ b/configuration/scripts/machines/Macros.ppp5_intel @@ -0,0 +1,49 @@ +#============================================================================== +# Makefile macros for "ppp5" +#============================================================================== +# For use with intel compiler +#============================================================================== + +CPP := fpp +CPPDEFS := -DFORTRANUNDERSCORE ${ICE_CPPDEFS} +CFLAGS := -c -O2 -fp-model precise +#-xHost + +FIXEDFLAGS := -132 +FREEFLAGS := -FR +FFLAGS := -fp-model source -convert big_endian -assume byterecl -ftz -traceback -diag-disable 5140 -no-wrap-margin +#-xHost + +ifeq ($(ICE_BLDDEBUG), true) + FFLAGS += -O0 -g -check -fpe0 -ftrapuv -fp-model except -check noarg_temp_created -check nooutput_conversion -debug-parameters all +#-init=snan,arrays +# -heap-arrays 1024 +else + FFLAGS += -O2 +endif + +SCC := icc +SFC := ifort +MPICC := mpiicc +MPIFC := mpiifort + +ifeq ($(ICE_COMMDIR), mpi) + FC := $(MPIFC) + CC := $(MPICC) +else + FC := $(SFC) + CC := $(SCC) +endif +LD:= $(FC) + +ifeq ($(ICE_IOTYPE), netcdf) + INCLDIR += $(shell nf-config --fflags) + SLIBS := $(shell nf-config --flibs) +endif + +ifeq ($(ICE_THREADED), true) + LDFLAGS += -qopenmp + CFLAGS += -qopenmp + FFLAGS += -qopenmp +endif + diff --git a/configuration/scripts/machines/Macros.ppp6_gnu b/configuration/scripts/machines/Macros.ppp6_gnu new file mode 100644 index 000000000..c55406d39 --- /dev/null +++ b/configuration/scripts/machines/Macros.ppp6_gnu @@ -0,0 +1,44 @@ +#============================================================================== +# Makefile macros for ECCC ppp6 +#============================================================================== +# For use with GNU compiler +#============================================================================== + +CPPDEFS := -DFORTRANUNDERSCORE ${ICE_CPPDEFS} +CFLAGS := -c -O2 +#-xHost + +FREEFLAGS := -ffree-form +FFLAGS := -fconvert=big-endian -fbacktrace -ffree-line-length-none +#-xHost + +ifeq ($(ICE_BLDDEBUG), true) + FFLAGS += -O0 -g -fcheck=bounds -finit-real=nan -fimplicit-none -ffpe-trap=invalid,zero,overflow +else + FFLAGS += -O2 +endif + +SCC := gcc +SFC := gfortran +MPICC := mpicc +MPIFC := mpifort + +ifeq ($(ICE_COMMDIR), mpi) + FC := $(MPIFC) + CC := $(MPICC) +else + FC := $(SFC) + CC := $(SCC) +endif +LD:= $(FC) + +ifeq ($(ICE_IOTYPE), netcdf) + INCLDIR += $(shell nf-config --fflags) + SLIBS := $(shell nf-config --flibs) +endif + +ifeq ($(ICE_THREADED), true) + LDFLAGS += -fopenmp + CFLAGS += -fopenmp + FFLAGS += -fopenmp +endif diff --git a/configuration/scripts/machines/Macros.ppp6_gnu-impi b/configuration/scripts/machines/Macros.ppp6_gnu-impi new file mode 100644 index 000000000..413154223 --- /dev/null +++ b/configuration/scripts/machines/Macros.ppp6_gnu-impi @@ -0,0 +1,44 @@ +#============================================================================== +# Makefile macros for ECCC ppp6 +#============================================================================== +# For use with GNU compiler +#============================================================================== + +CPPDEFS := -DFORTRANUNDERSCORE ${ICE_CPPDEFS} +CFLAGS := -c -O2 +#-xHost + +FREEFLAGS := -ffree-form +FFLAGS := -fconvert=big-endian -fbacktrace -ffree-line-length-none +#-xHost + +ifeq ($(ICE_BLDDEBUG), true) + FFLAGS += -O0 -g -fcheck=bounds -finit-real=nan -fimplicit-none -ffpe-trap=invalid,zero,overflow +else + FFLAGS += -O2 +endif + +SCC := gcc +SFC := gfortran +MPICC := mpicc +MPIFC := mpif90 + +ifeq ($(ICE_COMMDIR), mpi) + FC := $(MPIFC) + CC := $(MPICC) +else + FC := $(SFC) + CC := $(SCC) +endif +LD:= $(FC) + +ifeq ($(ICE_IOTYPE), netcdf) + INCLDIR += $(shell nf-config --fflags) + SLIBS := $(shell nf-config --flibs) +endif + +ifeq ($(ICE_THREADED), true) + LDFLAGS += -fopenmp + CFLAGS += -fopenmp + FFLAGS += -fopenmp +endif diff --git a/configuration/scripts/machines/Macros.ppp6_intel b/configuration/scripts/machines/Macros.ppp6_intel new file mode 100644 index 000000000..ece398a49 --- /dev/null +++ b/configuration/scripts/machines/Macros.ppp6_intel @@ -0,0 +1,49 @@ +#============================================================================== +# Makefile macros for "ppp6" +#============================================================================== +# For use with intel compiler +#============================================================================== + +CPP := fpp +CPPDEFS := -DFORTRANUNDERSCORE ${ICE_CPPDEFS} +CFLAGS := -c -O2 -fp-model precise +#-xHost + +FIXEDFLAGS := -132 +FREEFLAGS := -FR +FFLAGS := -fp-model source -convert big_endian -assume byterecl -ftz -traceback -diag-disable 5140 -no-wrap-margin +#-xHost + +ifeq ($(ICE_BLDDEBUG), true) + FFLAGS += -O0 -g -check -fpe0 -ftrapuv -fp-model except -check noarg_temp_created -check nooutput_conversion -debug-parameters all +#-init=snan,arrays +# -heap-arrays 1024 +else + FFLAGS += -O2 +endif + +SCC := icc +SFC := ifort +MPICC := mpiicc +MPIFC := mpiifort + +ifeq ($(ICE_COMMDIR), mpi) + FC := $(MPIFC) + CC := $(MPICC) +else + FC := $(SFC) + CC := $(SCC) +endif +LD:= $(FC) + +ifeq ($(ICE_IOTYPE), netcdf) + INCLDIR += $(shell nf-config --fflags) + SLIBS := $(shell nf-config --flibs) +endif + +ifeq ($(ICE_THREADED), true) + LDFLAGS += -qopenmp + CFLAGS += -qopenmp + FFLAGS += -qopenmp +endif + diff --git a/configuration/scripts/machines/Macros.ppp6_intel19 b/configuration/scripts/machines/Macros.ppp6_intel19 new file mode 100644 index 000000000..ece398a49 --- /dev/null +++ b/configuration/scripts/machines/Macros.ppp6_intel19 @@ -0,0 +1,49 @@ +#============================================================================== +# Makefile macros for "ppp6" +#============================================================================== +# For use with intel compiler +#============================================================================== + +CPP := fpp +CPPDEFS := -DFORTRANUNDERSCORE ${ICE_CPPDEFS} +CFLAGS := -c -O2 -fp-model precise +#-xHost + +FIXEDFLAGS := -132 +FREEFLAGS := -FR +FFLAGS := -fp-model source -convert big_endian -assume byterecl -ftz -traceback -diag-disable 5140 -no-wrap-margin +#-xHost + +ifeq ($(ICE_BLDDEBUG), true) + FFLAGS += -O0 -g -check -fpe0 -ftrapuv -fp-model except -check noarg_temp_created -check nooutput_conversion -debug-parameters all +#-init=snan,arrays +# -heap-arrays 1024 +else + FFLAGS += -O2 +endif + +SCC := icc +SFC := ifort +MPICC := mpiicc +MPIFC := mpiifort + +ifeq ($(ICE_COMMDIR), mpi) + FC := $(MPIFC) + CC := $(MPICC) +else + FC := $(SFC) + CC := $(SCC) +endif +LD:= $(FC) + +ifeq ($(ICE_IOTYPE), netcdf) + INCLDIR += $(shell nf-config --fflags) + SLIBS := $(shell nf-config --flibs) +endif + +ifeq ($(ICE_THREADED), true) + LDFLAGS += -qopenmp + CFLAGS += -qopenmp + FFLAGS += -qopenmp +endif + diff --git a/configuration/scripts/machines/Macros.robert_intel b/configuration/scripts/machines/Macros.robert_intel new file mode 100644 index 000000000..e54a645c9 --- /dev/null +++ b/configuration/scripts/machines/Macros.robert_intel @@ -0,0 +1,49 @@ +#============================================================================== +# Makefile macros for "robert" +#============================================================================== +# For use with intel compiler +#============================================================================== + +CPP := fpp +CPPDEFS := -DFORTRANUNDERSCORE ${ICE_CPPDEFS} +CFLAGS := -c -O2 -fp-model precise +#-xHost + +FIXEDFLAGS := -132 +FREEFLAGS := -FR +FFLAGS := -fp-model source -convert big_endian -assume byterecl -ftz -traceback -diag-disable 5140 -no-wrap-margin +#-xHost + +ifeq ($(ICE_BLDDEBUG), true) + FFLAGS += -O0 -g -check -fpe0 -ftrapuv -fp-model except -check noarg_temp_created -check nooutput_conversion -debug-parameters all +#-init=snan,arrays +# -heap-arrays 1024 +else + FFLAGS += -O2 +endif + +SCC := icc +SFC := ifort +MPICC := mpiicc +MPIFC := mpiifort + +ifeq ($(ICE_COMMDIR), mpi) + FC := $(MPIFC) + CC := $(MPICC) +else + FC := $(SFC) + CC := $(SCC) +endif +LD:= $(FC) + +ifeq ($(ICE_IOTYPE), netcdf) + INCLDIR += $(shell nf-config --fflags) + SLIBS := $(shell nf-config --flibs) +endif + +ifeq ($(ICE_THREADED), true) + LDFLAGS += -qopenmp + CFLAGS += -qopenmp + FFLAGS += -qopenmp +endif + diff --git a/configuration/scripts/machines/Macros.underhill_intel b/configuration/scripts/machines/Macros.underhill_intel new file mode 100644 index 000000000..70a889009 --- /dev/null +++ b/configuration/scripts/machines/Macros.underhill_intel @@ -0,0 +1,49 @@ +#============================================================================== +# Makefile macros for "underhill" +#============================================================================== +# For use with intel compiler +#============================================================================== + +CPP := fpp +CPPDEFS := -DFORTRANUNDERSCORE ${ICE_CPPDEFS} +CFLAGS := -c -O2 -fp-model precise +#-xHost + +FIXEDFLAGS := -132 +FREEFLAGS := -FR +FFLAGS := -fp-model source -convert big_endian -assume byterecl -ftz -traceback -diag-disable 5140 -no-wrap-margin +#-xHost + +ifeq ($(ICE_BLDDEBUG), true) + FFLAGS += -O0 -g -check -fpe0 -ftrapuv -fp-model except -check noarg_temp_created -check nooutput_conversion -debug-parameters all +#-init=snan,arrays +# -heap-arrays 1024 +else + FFLAGS += -O2 +endif + +SCC := icc +SFC := ifort +MPICC := mpiicc +MPIFC := mpiifort + +ifeq ($(ICE_COMMDIR), mpi) + FC := $(MPIFC) + CC := $(MPICC) +else + FC := $(SFC) + CC := $(SCC) +endif +LD:= $(FC) + +ifeq ($(ICE_IOTYPE), netcdf) + INCLDIR += $(shell nf-config --fflags) + SLIBS := $(shell nf-config --flibs) +endif + +ifeq ($(ICE_THREADED), true) + LDFLAGS += -qopenmp + CFLAGS += -qopenmp + FFLAGS += -qopenmp +endif + diff --git a/configuration/scripts/machines/env.cesium_intel b/configuration/scripts/machines/env.cesium_intel deleted file mode 100755 index 329bdf32d..000000000 --- a/configuration/scripts/machines/env.cesium_intel +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/csh -f - -set ssmuse=/fs/ssm/main/env/20180430/all/bin/ssmuse-csh # package loader -source $ssmuse -d /fs/ssm/main/opt/intelcomp/intelcomp-2016.1.156 # intel compiler -source /fs/ssm/main/opt/intelcomp/intelcomp-2016.1.156/intelcomp_2016.1.156_multi/bin/compilervars.csh intel64 # should be sourced by above domain, but bug in csh script -source $ssmuse -d /fs/ssm/main/opt/openmpi/openmpi-1.6.5/intelcomp-2016.1.156 # openmpi -source $ssmuse -d /fs/ssm/hpco/tmp/eccc/201402/04/intel-2016.1.150 # netcdf (and openmpi) - -# May be needed for OpenMP memory -#setenv OMP_STACKSIZE 64M - -setenv ICE_MACHINE_MACHNAME cesium -setenv ICE_MACHINE_MACHINFO "cesium" -setenv ICE_MACHINE_ENVNAME intel -setenv ICE_MACHINE_ENVINFO "intel 2016.1.156, openmpi 1.6.5" -setenv ICE_MACHINE_MAKE colormake-short -setenv ICE_MACHINE_WKDIR /users/dor/afsg/phb/local/CICEDIRS/CICE_RUNS -setenv ICE_MACHINE_INPUTDATA /users/dor/afsg/phb/local/FORCING -setenv ICE_MACHINE_BASELINE /users/dor/afsg/phb/local/CICEDIRS/CICE_BASELINE -setenv ICE_MACHINE_SUBMIT "qsub" -setenv ICE_MACHINE_TPNODE 36 -setenv ICE_MACHINE_ACCT P0000000 -setenv ICE_MACHINE_QUEUE "debug" -setenv ICE_MACHINE_BLDTHRDS 4 -setenv ICE_MACHINE_QSTAT "qstat " - -if (-e ~/.cice_proj) then - set account_name = `head -1 ~/.cice_proj` - setenv CICE_ACCT ${account_name} -endif diff --git a/configuration/scripts/machines/env.fram_intel b/configuration/scripts/machines/env.fram_intel index 35de927e7..6ca8285bc 100755 --- a/configuration/scripts/machines/env.fram_intel +++ b/configuration/scripts/machines/env.fram_intel @@ -24,8 +24,3 @@ setenv ICE_MACHINE_TPNODE 36 setenv ICE_MACHINE_ACCT P0000000 setenv ICE_MACHINE_BLDTHRDS 1 setenv ICE_MACHINE_QSTAT "qstat " - -if (-e ~/.cice_proj) then - set account_name = `head -1 ~/.cice_proj` - setenv CICE_ACCT ${account_name} -endif diff --git a/configuration/scripts/machines/env.gpsc3_intel b/configuration/scripts/machines/env.gpsc3_intel new file mode 100644 index 000000000..2c8d49275 --- /dev/null +++ b/configuration/scripts/machines/env.gpsc3_intel @@ -0,0 +1,37 @@ +#!/bin/csh -f + +set inp = "undefined" +if ($#argv == 1) then + set inp = $1 +endif + +if ("$inp" != "-nomodules") then + +# SSM package loader +source /fs/ssm/main/opt/ssmuse/ssmuse-1.11/ssmuse_1.11_all/bin/ssmuse-boot.csh >& /dev/null +set ssmuse=`which ssmuse-csh` +# Intel compiler +source $ssmuse -d /fs/ssm/main/intelcomp/intelpsxe-cluster-19.0.3.199 +setenv FOR_DUMP_CORE_FILE 1 +# OpenMPI +source $ssmuse -d /fs/ssm/hpco/exp/openmpi/openmpi-3.1.2--hpcx-2.4.0-mofed-4.6--csh--intel-19.0.3.199 +source $ssmuse -d /fs/ssm/main/opt/openmpi-setup/openmpi-setup-0.3 +# NetCDF +source $ssmuse -d /fs/ssm/hpco/exp/hdf5-netcdf4/serial/static/intel-19.0.3.199/02 + +endif + +setenv ICE_MACHINE_MACHNAME gpsc3 +setenv ICE_MACHINE_ENVNAME intel +setenv ICE_MACHINE_MAKE make +setenv ICE_MACHINE_WKDIR ~/data/site3/cice/runs/ +setenv ICE_MACHINE_INPUTDATA /home/ords/cmdd/cmde/sice500/ +setenv ICE_MACHINE_BASELINE ~/data/site3/cice/baseline/ +setenv ICE_MACHINE_SUBMIT "sbatch" +setenv ICE_MACHINE_TPNODE 44 +setenv ICE_MACHINE_ACCT "eccc_cmdd" +setenv ICE_MACHINE_QUEUE "standard" +setenv ICE_MACHINE_BLDTHRDS 24 +setenv ICE_MACHINE_QSTAT "squeue" + +setenv SLURM_EXPORT_ENV "ALL" diff --git a/configuration/scripts/machines/env.millikan_intel b/configuration/scripts/machines/env.millikan_intel deleted file mode 100755 index 350ea5b6e..000000000 --- a/configuration/scripts/machines/env.millikan_intel +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/csh -f - -set ssmuse=/fs/ssm/main/env/20180430/all/bin/ssmuse-csh # package loader -source $ssmuse -d /fs/ssm/main/opt/intelcomp/intelcomp-2016.1.156 # intel compiler -source /fs/ssm/main/opt/intelcomp/intelcomp-2016.1.156/intelcomp_2016.1.156_multi/bin/compilervars.csh intel64 # should be sourced by above domain, but bug in csh script -source $ssmuse -d /fs/ssm/main/opt/openmpi/openmpi-1.6.5/intelcomp-2016.1.156 # openmpi -source $ssmuse -d /fs/ssm/hpco/tmp/eccc/201402/04/intel-2016.1.150 # netcdf (and openmpi) - -# May be needed for OpenMP memory -#setenv OMP_STACKSIZE 64M - -setenv ICE_MACHINE_MACHNAME millikan -setenv ICE_MACHINE_MACHINFO "millikan" -setenv ICE_MACHINE_ENVNAME intel -setenv ICE_MACHINE_ENVINFO "intel 2016.1.156, openmpi 1.6.5, netcdf" -setenv ICE_MACHINE_MAKE make -setenv ICE_MACHINE_WKDIR /users/dor/armn/amb/data/local/runs -setenv ICE_MACHINE_INPUTDATA /users/dor/armn/amb/data/local/forcing -setenv ICE_MACHINE_BASELINE /users/dor/armn/amb/data/local/baseline -setenv ICE_MACHINE_SUBMIT "qsub" -setenv ICE_MACHINE_TPNODE 36 -setenv ICE_MACHINE_ACCT P0000000 -setenv ICE_MACHINE_QUEUE "debug" -setenv ICE_MACHINE_BLDTHRDS 4 -setenv ICE_MACHINE_QSTAT "qstat " - -if (-e ~/.cice_proj) then - set account_name = `head -1 ~/.cice_proj` - setenv CICE_ACCT ${account_name} -endif diff --git a/configuration/scripts/machines/env.ppp3_intel b/configuration/scripts/machines/env.ppp3_intel new file mode 100644 index 000000000..cf9fbe4a7 --- /dev/null +++ b/configuration/scripts/machines/env.ppp3_intel @@ -0,0 +1,35 @@ +#!/bin/csh -f + +set inp = "undefined" +if ($#argv == 1) then + set inp = $1 +endif + +if ("$inp" != "-nomodules") then + +# SSM package loader +source /fs/ssm/main/opt/ssmuse/ssmuse-1.11/ssmuse_1.11_all/bin/ssmuse-boot.csh >& /dev/null +set ssmuse=`which ssmuse-csh` +# Intel compiler +source $ssmuse -d comm/eccc/all/opt/intelcomp/intelpsxe-cluster-19.0.3.199 +setenv FOR_DUMP_CORE_FILE 1 +# OpenMPI +source $ssmuse -d /fs/ssm/hpco/exp/openmpi/openmpi-3.1.2--hpcx-2.4.0-mofed-4.6--csh--intel-19.0.3.199 +source $ssmuse -d main/opt/openmpi-setup/openmpi-setup-0.3 +# NetCDF +source $ssmuse -d hpco/exp/hdf5-netcdf4/serial/static/intel-19.0.3.199/02 + +endif + +setenv ICE_MACHINE_MACHNAME ppp3 +setenv ICE_MACHINE_ENVNAME intel +setenv ICE_MACHINE_MAKE make +setenv ICE_MACHINE_WKDIR ~/data/site3/cice/runs/ +setenv ICE_MACHINE_INPUTDATA /home/ords/cmdd/cmde/sice500/ +setenv ICE_MACHINE_BASELINE ~/data/site3/cice/baseline/ +setenv ICE_MACHINE_SUBMIT "jobsub" +setenv ICE_MACHINE_TPNODE 36 +setenv ICE_MACHINE_ACCT P0000000 +setenv ICE_MACHINE_QUEUE "development" +setenv ICE_MACHINE_BLDTHRDS 24 +setenv ICE_MACHINE_QSTAT "jobctl-qstat " diff --git a/configuration/scripts/machines/env.ppp5_intel b/configuration/scripts/machines/env.ppp5_intel new file mode 100644 index 000000000..79dbf2a1b --- /dev/null +++ b/configuration/scripts/machines/env.ppp5_intel @@ -0,0 +1,38 @@ +#!/bin/csh -f + +set inp = "undefined" +if ($#argv == 1) then + set inp = $1 +endif + +if ("$inp" != "-nomodules") then + +# Package loader +source /fs/ssm/main/opt/ssmuse/ssmuse-1.11/ssmuse_1.11_all/bin/ssmuse-boot.csh >& /dev/null +set ssmuse=`which ssmuse-csh` +# Intel compiler + Intel MPI +source $ssmuse -d /fs/ssm/main/opt/intelcomp/inteloneapi-2022.1.2/intelcomp+mpi+mkl +# module use /home/sice500/modulefiles +# module load -s icc mpi +setenv FOR_DUMP_CORE_FILE 1 +setenv I_MPI_DEBUG_COREDUMP 1 +# NetCDF +source $ssmuse -d main/opt/hdf5-netcdf4/serial/shared/inteloneapi-2022.1.2/01 + +# OpenMP +setenv OMP_STACKSIZE 64M + +endif + +setenv ICE_MACHINE_MACHNAME ppp5 +setenv ICE_MACHINE_ENVNAME intel +setenv ICE_MACHINE_MAKE make +setenv ICE_MACHINE_WKDIR ~/data/ppp5/cice/runs/ +setenv ICE_MACHINE_INPUTDATA /space/hall5/sitestore/eccc/cmd/e/sice500/ +setenv ICE_MACHINE_BASELINE ~/data/ppp5/cice/baselines/ +setenv ICE_MACHINE_SUBMIT qsub +setenv ICE_MACHINE_TPNODE 80 +setenv ICE_MACHINE_ACCT unused +setenv ICE_MACHINE_QUEUE development +setenv ICE_MACHINE_BLDTHRDS 40 +setenv ICE_MACHINE_QSTAT qstat diff --git a/configuration/scripts/machines/env.ppp6_gnu b/configuration/scripts/machines/env.ppp6_gnu new file mode 100644 index 000000000..39cc27740 --- /dev/null +++ b/configuration/scripts/machines/env.ppp6_gnu @@ -0,0 +1,29 @@ +#!/bin/csh -f + +set inp = "undefined" +if ($#argv == 1) then + set inp = $1 +endif + +if ("$inp" != "-nomodules") then + +# OpenMPI +source /usr/mpi/gcc/openmpi-4.1.2a1/bin/mpivars.csh + +# OpenMP +setenv OMP_STACKSIZE 64M + +endif + +setenv ICE_MACHINE_MACHNAME ppp6 +setenv ICE_MACHINE_ENVNAME gnu +setenv ICE_MACHINE_MAKE make +setenv ICE_MACHINE_WKDIR ~/data/site6/cice/runs/ +setenv ICE_MACHINE_INPUTDATA /space/hall6/sitestore/eccc/cmd/e/sice500/ +setenv ICE_MACHINE_BASELINE ~/data/site6/cice/baseline/ +setenv ICE_MACHINE_SUBMIT qsub +setenv ICE_MACHINE_TPNODE 80 +setenv ICE_MACHINE_ACCT unused +setenv ICE_MACHINE_QUEUE development +setenv ICE_MACHINE_BLDTHRDS 40 +setenv ICE_MACHINE_QSTAT qstat diff --git a/configuration/scripts/machines/env.ppp6_gnu-impi b/configuration/scripts/machines/env.ppp6_gnu-impi new file mode 100644 index 000000000..f2a523bf1 --- /dev/null +++ b/configuration/scripts/machines/env.ppp6_gnu-impi @@ -0,0 +1,38 @@ +#!/bin/csh -f + +set inp = "undefined" +if ($#argv == 1) then + set inp = $1 +endif + +if ("$inp" != "-nomodules") then + +# package loader +source /fs/ssm/main/opt/ssmuse/ssmuse-1.11/ssmuse_1.11_all/bin/ssmuse-boot.csh >& /dev/null +set ssmuse=`which ssmuse-csh` +# GNU compilers + Intel MPI +source $ssmuse -x /fs/ssm/main/opt/intelcomp/inteloneapi-2022.1.2 +setenv I_MPI_DEBUG_COREDUMP 1 +setenv I_MPI_F77 gfortran +setenv I_MPI_F90 gfortran +setenv I_MPI_FC gfortran +setenv I_MPI_CC gcc +setenv I_MPI_CXX g++ + +# OpenMP +setenv OMP_STACKSIZE 64M + +endif + +setenv ICE_MACHINE_MACHNAME ppp6 +setenv ICE_MACHINE_ENVNAME gnu-impi +setenv ICE_MACHINE_MAKE make +setenv ICE_MACHINE_WKDIR ~/data/site6/cice/runs/ +setenv ICE_MACHINE_INPUTDATA /space/hall6/sitestore/eccc/cmd/e/sice500/ +setenv ICE_MACHINE_BASELINE ~/data/site6/cice/baseline/ +setenv ICE_MACHINE_SUBMIT qsub +setenv ICE_MACHINE_TPNODE 80 +setenv ICE_MACHINE_ACCT unused +setenv ICE_MACHINE_QUEUE development +setenv ICE_MACHINE_BLDTHRDS 40 +setenv ICE_MACHINE_QSTAT qstat diff --git a/configuration/scripts/machines/env.ppp6_intel b/configuration/scripts/machines/env.ppp6_intel new file mode 100644 index 000000000..dfaeb855f --- /dev/null +++ b/configuration/scripts/machines/env.ppp6_intel @@ -0,0 +1,38 @@ +#!/bin/csh -f + +set inp = "undefined" +if ($#argv == 1) then + set inp = $1 +endif + +if ("$inp" != "-nomodules") then + +# Package loader +source /fs/ssm/main/opt/ssmuse/ssmuse-1.11/ssmuse_1.11_all/bin/ssmuse-boot.csh >& /dev/null +set ssmuse=`which ssmuse-csh` +# Intel compiler + Intel MPI +source $ssmuse -d /fs/ssm/main/opt/intelcomp/inteloneapi-2022.1.2/intelcomp+mpi+mkl +# module use /home/sice500/modulefiles +# module load -s icc mpi +setenv FOR_DUMP_CORE_FILE 1 +setenv I_MPI_DEBUG_COREDUMP 1 +# NetCDF +source $ssmuse -d main/opt/hdf5-netcdf4/serial/shared/inteloneapi-2022.1.2/01 + +# OpenMP +setenv OMP_STACKSIZE 64M + +endif + +setenv ICE_MACHINE_MACHNAME ppp6 +setenv ICE_MACHINE_ENVNAME intel +setenv ICE_MACHINE_MAKE make +setenv ICE_MACHINE_WKDIR ~/data/ppp6/cice/runs/ +setenv ICE_MACHINE_INPUTDATA /space/hall6/sitestore/eccc/cmd/e/sice500/ +setenv ICE_MACHINE_BASELINE ~/data/ppp6/cice/baselines/ +setenv ICE_MACHINE_SUBMIT qsub +setenv ICE_MACHINE_TPNODE 80 +setenv ICE_MACHINE_ACCT unused +setenv ICE_MACHINE_QUEUE development +setenv ICE_MACHINE_BLDTHRDS 40 +setenv ICE_MACHINE_QSTAT qstat diff --git a/configuration/scripts/machines/env.ppp6_intel19 b/configuration/scripts/machines/env.ppp6_intel19 new file mode 100644 index 000000000..d41242630 --- /dev/null +++ b/configuration/scripts/machines/env.ppp6_intel19 @@ -0,0 +1,39 @@ +#!/bin/csh -f + +set inp = "undefined" +if ($#argv == 1) then + set inp = $1 +endif + +if ("$inp" != "-nomodules") then + +# package loader +source /fs/ssm/main/opt/ssmuse/ssmuse-1.11/ssmuse_1.11_all/bin/ssmuse-boot.csh >& /dev/null +set ssmuse=`which ssmuse-csh` +# Intel compiler +source $ssmuse -d /fs/ssm/main/opt/intelcomp/intelpsxe-cluster-19.0.3.199 +setenv FOR_DUMP_CORE_FILE 1 +# Intel MPI +source $ssmuse -d /fs/ssm/hpco/exp/intelpsxe-impi-19.0.3.199 +setenv FI_PROVIDER verbs +setenv I_MPI_DEBUG_COREDUMP 1 +# NetCDF +source $ssmuse -d hpco/exp/hdf5-netcdf4/serial/static/intel-19.0.3.199/02 + +# OpenMP +setenv OMP_STACKSIZE 64M + +endif + +setenv ICE_MACHINE_MACHNAME ppp6 +setenv ICE_MACHINE_ENVNAME intel19 +setenv ICE_MACHINE_MAKE make +setenv ICE_MACHINE_WKDIR ~/data/site6/cice/runs/ +setenv ICE_MACHINE_INPUTDATA /space/hall6/sitestore/eccc/cmd/e/sice500/ +setenv ICE_MACHINE_BASELINE ~/data/site6/cice/baseline/ +setenv ICE_MACHINE_SUBMIT "qsub" +setenv ICE_MACHINE_TPNODE 80 +setenv ICE_MACHINE_ACCT P0000000 +setenv ICE_MACHINE_QUEUE "debug" +setenv ICE_MACHINE_BLDTHRDS 24 +setenv ICE_MACHINE_QSTAT "qstat " diff --git a/configuration/scripts/machines/env.robert_intel b/configuration/scripts/machines/env.robert_intel new file mode 100644 index 000000000..43c11a529 --- /dev/null +++ b/configuration/scripts/machines/env.robert_intel @@ -0,0 +1,39 @@ +#!/bin/csh -f + +set inp = "undefined" +if ($#argv == 1) then + set inp = $1 +endif + +if ("$inp" != "-nomodules") then + +# Package loader +source /fs/ssm/main/opt/ssmuse/ssmuse-1.11/ssmuse_1.11_all/bin/ssmuse-boot.csh >& /dev/null +set ssmuse=`which ssmuse-csh` +# Intel compiler + Intel MPI +source $ssmuse -d /fs/ssm/main/opt/intelcomp/inteloneapi-2022.1.2/intelcomp+mpi+mkl +# module use /home/sice500/modulefiles +# module load -s icc mpi +setenv FOR_DUMP_CORE_FILE 1 +setenv I_MPI_DEBUG_COREDUMP 1 +# NetCDF +source $ssmuse -d main/opt/hdf5-netcdf4/serial/shared/inteloneapi-2022.1.2/01 + +# OpenMP +setenv OMP_STACKSIZE 64M + +endif + +setenv ICE_MACHINE_MACHNAME robert +setenv ICE_MACHINE_ENVNAME intel +setenv ICE_MACHINE_MAKE make +setenv ICE_MACHINE_WKDIR ~/data/robert/cice/runs/ +setenv ICE_MACHINE_INPUTDATA /space/hall6/sitestore/eccc/cmd/e/sice500/ +setenv ICE_MACHINE_BASELINE ~/data/robert/cice/baselines/ +setenv ICE_MACHINE_SUBMIT qsub +setenv ICE_MACHINE_TPNODE 80 +setenv ICE_MACHINE_MAXRUNLENGTH 3 +setenv ICE_MACHINE_ACCT unused +setenv ICE_MACHINE_QUEUE development +setenv ICE_MACHINE_BLDTHRDS 40 +setenv ICE_MACHINE_QSTAT qstat diff --git a/configuration/scripts/machines/env.underhill_intel b/configuration/scripts/machines/env.underhill_intel new file mode 100644 index 000000000..90192853e --- /dev/null +++ b/configuration/scripts/machines/env.underhill_intel @@ -0,0 +1,39 @@ +#!/bin/csh -f + +set inp = "undefined" +if ($#argv == 1) then + set inp = $1 +endif + +if ("$inp" != "-nomodules") then + +# Package loader +source /fs/ssm/main/opt/ssmuse/ssmuse-1.11/ssmuse_1.11_all/bin/ssmuse-boot.csh >& /dev/null +set ssmuse=`which ssmuse-csh` +# Intel compiler + Intel MPI +source $ssmuse -d /fs/ssm/main/opt/intelcomp/inteloneapi-2022.1.2/intelcomp+mpi+mkl +# module use /home/sice500/modulefiles +# module load -s icc mpi +setenv FOR_DUMP_CORE_FILE 1 +setenv I_MPI_DEBUG_COREDUMP 1 +# NetCDF +source $ssmuse -d main/opt/hdf5-netcdf4/serial/shared/inteloneapi-2022.1.2/01 + +# OpenMP +setenv OMP_STACKSIZE 64M + +endif + +setenv ICE_MACHINE_MACHNAME underhill +setenv ICE_MACHINE_ENVNAME intel +setenv ICE_MACHINE_MAKE make +setenv ICE_MACHINE_WKDIR ~/data/underhill/cice/runs/ +setenv ICE_MACHINE_INPUTDATA /space/hall5/sitestore/eccc/cmd/e/sice500/ +setenv ICE_MACHINE_BASELINE ~/data/underhill/cice/baselines/ +setenv ICE_MACHINE_SUBMIT qsub +setenv ICE_MACHINE_MAXRUNLENGTH 3 +setenv ICE_MACHINE_TPNODE 80 +setenv ICE_MACHINE_ACCT unused +setenv ICE_MACHINE_QUEUE development +setenv ICE_MACHINE_BLDTHRDS 40 +setenv ICE_MACHINE_QSTAT qstat