From 380f22f7b357d50a9744c41ee00c407a40172897 Mon Sep 17 00:00:00 2001 From: Mariana Vertenstein Date: Mon, 16 Jan 2017 15:47:51 -0700 Subject: [PATCH 001/219] first set of commits for doing glc time averaging at different frequency than coupling --- .../mct/cime_config/config_component_acme.xml | 11 + .../mct/cime_config/config_component_cesm.xml | 11 + src/drivers/mct/main/cesm_comp_mod.F90 | 70 +++--- src/drivers/mct/shr/seq_timemgr_mod.F90 | 200 ++++++++---------- 4 files changed, 159 insertions(+), 133 deletions(-) diff --git a/src/drivers/mct/cime_config/config_component_acme.xml b/src/drivers/mct/cime_config/config_component_acme.xml index 5fa62d08e9fd..9885951e3841 100644 --- a/src/drivers/mct/cime_config/config_component_acme.xml +++ b/src/drivers/mct/cime_config/config_component_acme.xml @@ -360,6 +360,17 @@ Number of glc coupling intervals per NCPL_BASE_PERIOD. + + integer + 1 + + 1 + + run_coupling + env_run.xml + Number of glc coupling intervals relative to NCPL_BASE_PERIOD for averaging of GLC input. + + integer 8 diff --git a/src/drivers/mct/cime_config/config_component_cesm.xml b/src/drivers/mct/cime_config/config_component_cesm.xml index fbad2fde32f2..2b40a2a27b3f 100644 --- a/src/drivers/mct/cime_config/config_component_cesm.xml +++ b/src/drivers/mct/cime_config/config_component_cesm.xml @@ -200,6 +200,17 @@ Number of glc coupling intervals per NCPL_BASE_PERIOD. + + integer + 1 + + 1 + + run_coupling + env_run.xml + Number of glc coupling intervals relative to NCPL_BASE_PERIOD for averaging of GLC input. + + integer 8 diff --git a/src/drivers/mct/main/cesm_comp_mod.F90 b/src/drivers/mct/main/cesm_comp_mod.F90 index afc6b863ff4f..ed85fc8b7239 100644 --- a/src/drivers/mct/main/cesm_comp_mod.F90 +++ b/src/drivers/mct/main/cesm_comp_mod.F90 @@ -91,6 +91,7 @@ module cesm_comp_mod use seq_timemgr_mod, only: seq_timemgr_alarm_ocnrun use seq_timemgr_mod, only: seq_timemgr_alarm_icerun use seq_timemgr_mod, only: seq_timemgr_alarm_glcrun + use seq_timemgr_mod, only: seq_timemgr_alarm_glcrun_avg use seq_timemgr_mod, only: seq_timemgr_alarm_ocnnext use seq_timemgr_mod, only: seq_timemgr_alarm_tprof use seq_timemgr_mod, only: seq_timemgr_alarm_histavg @@ -252,6 +253,7 @@ module cesm_comp_mod logical :: ocnrun_alarm ! ocn run alarm logical :: ocnnext_alarm ! ocn run alarm on next timestep logical :: glcrun_alarm ! glc run alarm + logical :: glcrun_avg_alarm ! glc run averaging alarm logical :: rofrun_alarm ! rof run alarm logical :: wavrun_alarm ! wav run alarm logical :: esprun_alarm ! esp run alarm @@ -2117,6 +2119,11 @@ subroutine cesm_run() ! (this is time that models should have before they return ! to the driver). Write timestamp and run alarm status !---------------------------------------------------------- + ! Note that the glcrun_avg_alarm just controls what is passed to glc in terms + ! of averaged fields - it does NOT control when glc is called currently - + ! glc will be called on the glcrun_alarm setting - but it might not be passed relevant + ! info if the time averaging period to accumulate information passed to glc is greater + ! than the glcrun interval call seq_timemgr_clockAdvance( seq_SyncClock, force_stop, force_stop_ymd, force_stop_tod) call seq_timemgr_EClockGetData( EClock_d, curr_ymd=ymd, curr_tod=tod ) @@ -2127,6 +2134,7 @@ subroutine cesm_run() rofrun_alarm = seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_rofrun) icerun_alarm = seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_icerun) glcrun_alarm = seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_glcrun) + glcrun_avg_alarm = seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_glcrun_avg) wavrun_alarm = seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_wavrun) esprun_alarm = seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_esprun) ocnrun_alarm = seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_ocnrun) @@ -2198,7 +2206,7 @@ subroutine cesm_run() if (tod == 0) t24hr_alarm = .true. if (month==1 .and. day==1 .and. tod==0) t1yr_alarm = .true. - call seq_infodata_putData(infodata, glcrun_alarm=glcrun_alarm) + call seq_infodata_putData(infodata, glcrun_alarm=glcrun_alarm) !??? TODO - why is this here ??? if (seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_datestop)) then if (iamroot_CPLID) then @@ -2915,32 +2923,42 @@ subroutine cesm_run() !| GLC SETUP-SEND !---------------------------------------------------------- - if (glc_present .and. glcrun_alarm) then - - !---------------------------------------------------- - !| glc prep-merge - !---------------------------------------------------- - - if (iamin_CPLID .and. glc_prognostic) then - call cesm_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:GLCPREP_BARRIER') - call t_drvstartf ('CPL:GLCPREP',cplrun=.true.,barrier=mpicom_CPLID) - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - - if (lnd_c2_glc) then - call prep_glc_accum_avg(timer='CPL:glcprep_avg') - - ! Note that l2x_gx is obtained from mapping the module variable l2gacc_lx - call prep_glc_calc_l2x_gx(fractions_lx, timer='CPL:glcprep_lnd2glc') - - call prep_glc_mrg(infodata, fractions_gx, timer_mrg='CPL:glcprep_mrgx2g') + ! NOTE - only create appropriate input to glc if the avg_alarm is on + if (glc_present) then + if (glcrun_avg_alarm) then + !---------------------------------------------------- + !| glc prep-merge + !---------------------------------------------------- + + if (iamin_CPLID .and. glc_prognostic) then + call cesm_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:GLCPREP_BARRIER') + call t_drvstartf ('CPL:GLCPREP',cplrun=.true.,barrier=mpicom_CPLID) + if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) + + if (lnd_c2_glc) then + call prep_glc_accum_avg(timer='CPL:glcprep_avg') + + ! Note that l2x_gx is obtained from mapping the module variable l2gacc_lx + call prep_glc_calc_l2x_gx(fractions_lx, timer='CPL:glcprep_lnd2glc') + + call prep_glc_mrg(infodata, fractions_gx, timer_mrg='CPL:glcprep_mrgx2g') + + call component_diag(infodata, glc, flow='x2c', comment='send glc', & + info_debug=info_debug, timer_diag='CPL:glcprep_diagav') + endif + + if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) + call t_drvstopf ('CPL:GLCPREP',cplrun=.true.) - call component_diag(infodata, glc, flow='x2c', comment='send glc', & - info_debug=info_debug, timer_diag='CPL:glcprep_diagav') + ! Set seq_infodata flag for valid data + call seq_infodata_PutData(glc_valid_input=.true.) endif - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - call t_drvstopf ('CPL:GLCPREP',cplrun=.true.) - endif + else + if (iamin_CPLID .and. glc_prognostic) then + ! Set seq_infodata flag for unvalid data + call seq_infodata_PutData(glc_valid_input=.false.) + end if + end if !---------------------------------------------------- !| cpl -> glc @@ -3410,7 +3428,7 @@ subroutine cesm_run() !| GLC RECV-POST !---------------------------------------------------------- - if (glc_present .and. glcrun_alarm) then + if (glc_present .and. glcrun_avg_alarm) then !---------------------------------------------------------- !| glc -> cpl diff --git a/src/drivers/mct/shr/seq_timemgr_mod.F90 b/src/drivers/mct/shr/seq_timemgr_mod.F90 index 8a297ce9e048..574dd1475acf 100644 --- a/src/drivers/mct/shr/seq_timemgr_mod.F90 +++ b/src/drivers/mct/shr/seq_timemgr_mod.F90 @@ -1,8 +1,4 @@ !=============================================================================== -! SVN $Id: seq_timemgr_mod.F90 68253 2015-02-18 22:24:57Z mvertens $ -! SVN $URL: https://svn-ccsm-models.cgd.ucar.edu/drv/seq_mct/trunk_tags/drvseq5_1_15/shr/seq_timemgr_mod.F90 $ -!=============================================================================== -!BOP =========================================================================== ! ! !MODULE: seq_timemgr_mod --- Time-manager module ! @@ -104,6 +100,7 @@ module seq_timemgr_mod ! seq_timemgr_alarm_ocnrun ! seq_timemgr_alarm_icerun ! seq_timemgr_alarm_glcrun +! seq_timemgr_alarm_glcrun_avg ! seq_timemgr_alarm_wavrun ! seq_timemgr_alarm_esprun ! seq_timemgr_alarm_ocnnext @@ -112,8 +109,6 @@ module seq_timemgr_mod ! seq_timemgr_alarm_pause ! seq_timemgr_alarm_barrier -!EOP - private:: seq_timemgr_alarmGet private:: seq_timemgr_alarmInit private:: seq_timemgr_EClockInit @@ -153,6 +148,7 @@ module seq_timemgr_mod seq_timemgr_nclock_wav = 7, & seq_timemgr_nclock_rof = 8, & seq_timemgr_nclock_esp = 9 + integer(SHR_KIND_IN),private,parameter :: max_clocks = 9 character(len=*),public,parameter :: & seq_timemgr_clock_drv = 'seq_timemgr_clock_drv' , & @@ -168,49 +164,54 @@ module seq_timemgr_mod (/'drv ','atm ','lnd ','ocn ', & 'ice ','glc ','wav ','rof ','esp '/) + ! Alarms on both component clocks and driver clock integer(SHR_KIND_IN),private,parameter :: & - seq_timemgr_nalarm_restart = 1, & - seq_timemgr_nalarm_run = 2, & - seq_timemgr_nalarm_stop = 3, & - seq_timemgr_nalarm_datestop= 4, & - seq_timemgr_nalarm_history = 5, & - seq_timemgr_nalarm_atmrun = 6, & - seq_timemgr_nalarm_lndrun = 7, & - seq_timemgr_nalarm_ocnrun = 8, & - seq_timemgr_nalarm_icerun = 9, & - seq_timemgr_nalarm_glcrun =10, & - seq_timemgr_nalarm_ocnnext =11, & - seq_timemgr_nalarm_tprof =12, & - seq_timemgr_nalarm_histavg =13, & - seq_timemgr_nalarm_rofrun =14, & - seq_timemgr_nalarm_wavrun =15, & - seq_timemgr_nalarm_esprun =16, & - seq_timemgr_nalarm_pause =17, & - seq_timemgr_nalarm_barrier =18, & + seq_timemgr_nalarm_restart = 1 , & ! driver and component clock alarm + seq_timemgr_nalarm_run = 2 , & ! driver and component clock alarm + seq_timemgr_nalarm_stop = 3 , & ! driver and component clock alarm + seq_timemgr_nalarm_datestop = 4 , & ! driver and component clock alarm + seq_timemgr_nalarm_history = 5 , & ! driver and component clock alarm + seq_timemgr_nalarm_atmrun = 6 , & ! driver only clock alarm + seq_timemgr_nalarm_lndrun = 7 , & ! driver only clock alarm + seq_timemgr_nalarm_ocnrun = 8 , & ! driver only clock alarm + seq_timemgr_nalarm_icerun = 9 , & ! driver only clock alarm + seq_timemgr_nalarm_glcrun =10 , & ! driver only clock alarm + seq_timemgr_nalarm_glcrun_avg =11 , & ! driver only clock alarm + seq_timemgr_nalarm_ocnnext =12 , & ! driver only clock alarm + seq_timemgr_nalarm_tprof =13 , & ! driver and component clock alarm + seq_timemgr_nalarm_histavg =14 , & ! driver and component clock alarm + seq_timemgr_nalarm_rofrun =15 , & ! driver only clock alarm + seq_timemgr_nalarm_wavrun =16 , & ! driver only clock alarm + seq_timemgr_nalarm_esprun =17 , & ! driver only clock alarm + seq_timemgr_nalarm_pause =18 , & + seq_timemgr_nalarm_barrier =19 , & ! driver and component clock alarm max_alarms = seq_timemgr_nalarm_barrier + character(len=*),public,parameter :: & - seq_timemgr_alarm_restart = 'seq_timemgr_alarm_restart ', & - seq_timemgr_alarm_run = 'seq_timemgr_alarm_run ', & - seq_timemgr_alarm_stop = 'seq_timemgr_alarm_stop ', & - seq_timemgr_alarm_datestop= 'seq_timemgr_alarm_datestop', & - seq_timemgr_alarm_history = 'seq_timemgr_alarm_history ', & - seq_timemgr_alarm_atmrun = 'seq_timemgr_alarm_atmrun ', & - seq_timemgr_alarm_lndrun = 'seq_timemgr_alarm_lndrun ', & - seq_timemgr_alarm_ocnrun = 'seq_timemgr_alarm_ocnrun ', & - seq_timemgr_alarm_icerun = 'seq_timemgr_alarm_icerun ', & - seq_timemgr_alarm_glcrun = 'seq_timemgr_alarm_glcrun ', & - seq_timemgr_alarm_ocnnext = 'seq_timemgr_alarm_ocnnext ', & - seq_timemgr_alarm_tprof = 'seq_timemgr_alarm_tprof ', & - seq_timemgr_alarm_histavg = 'seq_timemgr_alarm_histavg ', & - seq_timemgr_alarm_rofrun = 'seq_timemgr_alarm_rofrun ', & - seq_timemgr_alarm_wavrun = 'seq_timemgr_alarm_wavrun ', & - seq_timemgr_alarm_esprun = 'seq_timemgr_alarm_esprun ', & - seq_timemgr_alarm_pause = 'seq_timemgr_alarm_pause ', & - seq_timemgr_alarm_barrier = 'seq_timemgr_alarm_barrier ' + seq_timemgr_alarm_restart = 'seq_timemgr_alarm_restart ', & + seq_timemgr_alarm_run = 'seq_timemgr_alarm_run ', & + seq_timemgr_alarm_stop = 'seq_timemgr_alarm_stop ', & + seq_timemgr_alarm_datestop = 'seq_timemgr_alarm_datestop', & + seq_timemgr_alarm_history = 'seq_timemgr_alarm_history ', & + seq_timemgr_alarm_atmrun = 'seq_timemgr_alarm_atmrun ', & + seq_timemgr_alarm_lndrun = 'seq_timemgr_alarm_lndrun ', & + seq_timemgr_alarm_ocnrun = 'seq_timemgr_alarm_ocnrun ', & + seq_timemgr_alarm_icerun = 'seq_timemgr_alarm_icerun ', & + seq_timemgr_alarm_glcrun = 'seq_timemgr_alarm_glcrun ', & + seq_timemgr_alarm_glcrun_avg = 'seq_timemgr_alarm_glcrun_avg' , & + seq_timemgr_alarm_ocnnext = 'seq_timemgr_alarm_ocnnext ', & + seq_timemgr_alarm_tprof = 'seq_timemgr_alarm_tprof ', & + seq_timemgr_alarm_histavg = 'seq_timemgr_alarm_histavg ', & + seq_timemgr_alarm_rofrun = 'seq_timemgr_alarm_rofrun ', & + seq_timemgr_alarm_wavrun = 'seq_timemgr_alarm_wavrun ', & + seq_timemgr_alarm_esprun = 'seq_timemgr_alarm_esprun ', & + seq_timemgr_alarm_pause = 'seq_timemgr_alarm_pause ', & + seq_timemgr_alarm_barrier = 'seq_timemgr_alarm_barrier ' type EClock_pointer ! needed for array of pointers type(ESMF_Clock),pointer :: EClock => null() end type EClock_pointer + type seq_timemgr_type private type(EClock_pointer) :: ECP(max_clocks) ! ESMF clocks, array of pointers @@ -229,7 +230,6 @@ module seq_timemgr_mod contains -!=============================================================================== !=============================================================================== ! !IROUTINE: seq_timemgr_clockInit -- Initializes clocks ! @@ -269,7 +269,6 @@ subroutine seq_timemgr_clockInit(SyncClock, nmlfile, restart, restart_file, pioi type(ESMF_clock),target, intent(IN) :: EClock_wav ! wav clock type(ESMF_clock),target, intent(IN) :: EClock_esp ! esp clock type(file_desc_t) :: pioid -!EOP !----- local ----- character(len=*), parameter :: subname = '(seq_timemgr_clockInit) ' @@ -360,15 +359,15 @@ subroutine seq_timemgr_clockInit(SyncClock, nmlfile, restart, restart_file, pioi ! Notes: !------------------------------------------------------------------------------- - SyncClock%ECP(seq_timemgr_nclock_drv)%EClock => EClock_drv - SyncClock%ECP(seq_timemgr_nclock_atm)%EClock => EClock_atm - SyncClock%ECP(seq_timemgr_nclock_lnd)%EClock => EClock_lnd - SyncClock%ECP(seq_timemgr_nclock_ocn)%EClock => EClock_ocn - SyncClock%ECP(seq_timemgr_nclock_ice)%EClock => EClock_ice - SyncClock%ECP(seq_timemgr_nclock_glc)%EClock => EClock_glc - SyncClock%ECP(seq_timemgr_nclock_rof)%EClock => EClock_rof - SyncClock%ECP(seq_timemgr_nclock_wav)%EClock => EClock_wav - SyncClock%ECP(seq_timemgr_nclock_esp)%EClock => EClock_esp + SyncClock%ECP(seq_timemgr_nclock_drv)%EClock => EClock_drv + SyncClock%ECP(seq_timemgr_nclock_atm)%EClock => EClock_atm + SyncClock%ECP(seq_timemgr_nclock_lnd)%EClock => EClock_lnd + SyncClock%ECP(seq_timemgr_nclock_ocn)%EClock => EClock_ocn + SyncClock%ECP(seq_timemgr_nclock_ice)%EClock => EClock_ice + SyncClock%ECP(seq_timemgr_nclock_glc)%EClock => EClock_glc + SyncClock%ECP(seq_timemgr_nclock_rof)%EClock => EClock_rof + SyncClock%ECP(seq_timemgr_nclock_wav)%EClock => EClock_wav + SyncClock%ECP(seq_timemgr_nclock_esp)%EClock => EClock_esp call mpi_comm_rank(mpicom,iam,ierr) @@ -481,6 +480,11 @@ subroutine seq_timemgr_clockInit(SyncClock, nmlfile, restart, restart_file, pioi if (wav_cpl_dt == 0) wav_cpl_dt = atm_cpl_dt ! Copy atm coupling time into wav if (esp_cpl_dt == 0) esp_cpl_dt = atm_cpl_dt ! Copy atm coupling time into esp + if (glc_cpl_avg_dt == 0) then + ! set default average coupling interval + glc_cpl_avg_dt = glc_cpl_dt + end if + if ( ref_ymd == 0 ) then ref_ymd = start_ymd ref_tod = start_tod @@ -555,6 +559,7 @@ subroutine seq_timemgr_clockInit(SyncClock, nmlfile, restart, restart_file, pioi write(logunit,F0I) trim(subname),' ice_cpl_dt = ',ice_cpl_dt write(logunit,F0I) trim(subname),' ocn_cpl_dt = ',ocn_cpl_dt write(logunit,F0I) trim(subname),' glc_cpl_dt = ',glc_cpl_dt + write(logunit,F0I) trim(subname),' glc_cpl_avg_dt = ',glc_cpl_avg_dt write(logunit,F0I) trim(subname),' rof_cpl_dt = ',rof_cpl_dt write(logunit,F0I) trim(subname),' wav_cpl_dt = ',wav_cpl_dt write(logunit,F0I) trim(subname),' esp_cpl_dt = ',esp_cpl_dt @@ -644,6 +649,7 @@ subroutine seq_timemgr_clockInit(SyncClock, nmlfile, restart, restart_file, pioi call shr_mpi_bcast( ice_cpl_dt, mpicom ) call shr_mpi_bcast( ocn_cpl_dt, mpicom ) call shr_mpi_bcast( glc_cpl_dt, mpicom ) + call shr_mpi_bcast( glc_cpl_avg_dt, mpicom ) call shr_mpi_bcast( rof_cpl_dt, mpicom ) call shr_mpi_bcast( wav_cpl_dt, mpicom ) call shr_mpi_bcast( esp_cpl_dt, mpicom ) @@ -706,17 +712,18 @@ subroutine seq_timemgr_clockInit(SyncClock, nmlfile, restart, restart_file, pioi call seq_timemgr_ETimeInit( RefTime , ref_ymd , ref_tod , "Reference date" ) call seq_timemgr_ETimeInit( CurrTime , curr_ymd , curr_tod , "Current date") - ! --- Figure out what CCSM time-stepping interval should be. --------------- + ! --- Figure out what time-stepping interval should be. --------------- dtime = 0 - dtime(seq_timemgr_nclock_atm ) = atm_cpl_dt - dtime(seq_timemgr_nclock_lnd ) = lnd_cpl_dt - dtime(seq_timemgr_nclock_ocn ) = ocn_cpl_dt - dtime(seq_timemgr_nclock_ice ) = ice_cpl_dt - dtime(seq_timemgr_nclock_glc ) = glc_cpl_dt - dtime(seq_timemgr_nclock_rof ) = rof_cpl_dt - dtime(seq_timemgr_nclock_wav ) = wav_cpl_dt - dtime(seq_timemgr_nclock_esp ) = esp_cpl_dt + dtime(seq_timemgr_nclock_atm ) = atm_cpl_dt + dtime(seq_timemgr_nclock_lnd ) = lnd_cpl_dt + dtime(seq_timemgr_nclock_ocn ) = ocn_cpl_dt + dtime(seq_timemgr_nclock_ice ) = ice_cpl_dt + dtime(seq_timemgr_nclock_glc ) = glc_cpl_dt + dtime(seq_timemgr_nclock_glc_avg ) = glc_cpl_avg_dt + dtime(seq_timemgr_nclock_rof ) = rof_cpl_dt + dtime(seq_timemgr_nclock_wav ) = wav_cpl_dt + dtime(seq_timemgr_nclock_esp ) = esp_cpl_dt ! --- this finds the min of dtime excluding the driver value --- dtime(seq_timemgr_nclock_drv) = maxval(dtime) @@ -729,7 +736,7 @@ subroutine seq_timemgr_clockInit(SyncClock, nmlfile, restart, restart_file, pioi endif enddo - ! --- Initialize clocks and alarms --- + ! --- Initialize component and driver clocks and alarms common to components amd drivver clocks --- do n = 1,max_clocks call ESMF_TimeIntervalSet( TimeStep, s=dtime(n), rc=rc ) @@ -829,15 +836,16 @@ subroutine seq_timemgr_clockInit(SyncClock, nmlfile, restart, restart_file, pioi ! via namelist. tcraig, 10/2007 ! -------------------------------------------------------------------- - offset(seq_timemgr_nclock_drv) = 0 - offset(seq_timemgr_nclock_atm) = atm_cpl_offset - offset(seq_timemgr_nclock_lnd) = lnd_cpl_offset - offset(seq_timemgr_nclock_ocn) = ocn_cpl_offset - offset(seq_timemgr_nclock_ice) = ice_cpl_offset - offset(seq_timemgr_nclock_glc) = glc_cpl_offset - offset(seq_timemgr_nclock_rof) = rof_cpl_offset - offset(seq_timemgr_nclock_wav) = wav_cpl_offset - offset(seq_timemgr_nclock_esp) = esp_cpl_offset + offset(seq_timemgr_nclock_drv) = 0 + offset(seq_timemgr_nclock_atm) = atm_cpl_offset + offset(seq_timemgr_nclock_lnd) = lnd_cpl_offset + offset(seq_timemgr_nclock_ocn) = ocn_cpl_offset + offset(seq_timemgr_nclock_ice) = ice_cpl_offset + offset(seq_timemgr_nclock_glc) = glc_cpl_offset + offset(seq_timemgr_nclock_glc_avg) = glc_cpl_offset + offset(seq_timemgr_nclock_rof) = rof_cpl_offset + offset(seq_timemgr_nclock_wav) = wav_cpl_offset + offset(seq_timemgr_nclock_esp) = esp_cpl_offset do n = 1,max_clocks if (abs(offset(n)) > dtime(n)) then @@ -922,6 +930,18 @@ subroutine seq_timemgr_clockInit(SyncClock, nmlfile, restart, restart_file, pioi RefTime = OffsetTime, & alarmname = trim(seq_timemgr_alarm_glcrun)) + ! --- this is the glcrun_avg alarm (there ^) offset by a -dtime of the driver + call ESMF_TimeIntervalSet( TimeStep, s=offset(seq_timemgr_nclock_glc_avg), rc=rc ) + OffsetTime = CurrTime + TimeStep + call ESMF_TimeIntervalSet( TimeStep, s=-offset(seq_timemgr_nclock_drv), rc=rc ) + OffsetTime = OffsetTime + TimeStep + call seq_timemgr_alarmInit(SyncClock%ECP(seq_timemgr_nclock_drv)%EClock, & + EAlarm = SyncClock%EAlarm(seq_timemgr_nclock_drv,seq_timemgr_nalarm_glcrun), & + option = seq_timemgr_optNSeconds, & + opt_n = glc_cpl_avg_dt, & + RefTime = OffsetTime, & + alarmname = trim(seq_timemgr_alarm_glcrun_avg)) + call ESMF_TimeIntervalSet( TimeStep, s=offset(seq_timemgr_nclock_ocn), rc=rc ) OffsetTime = CurrTime + TimeStep call seq_timemgr_alarmInit(SyncClock%ECP(seq_timemgr_nclock_drv)%EClock, & @@ -991,8 +1011,6 @@ subroutine seq_timemgr_EClockGetData( EClock, curr_yr, curr_mon, curr_day, & ! and reference date character(len=*) , intent(OUT), optional :: calendar ! calendar type -!EOP - !----- local ----- character(len=*), parameter :: subname = '(seq_timemgr_EClockGetData) ' type(ESMF_Time) :: CurrentTime ! Current time @@ -1124,8 +1142,6 @@ subroutine seq_timemgr_clockAdvance( SyncClock, force_stop, force_stop_ymd, forc integer, optional, intent(in) :: force_stop_ymd ! force stop ymd integer, optional, intent(in) :: force_stop_tod ! force stop tod -!EOP - !----- local ----- character(len=*), parameter :: subname = '(seq_timemgr_clockAdvance) ' integer :: n @@ -1241,8 +1257,6 @@ subroutine seq_timemgr_alarmInit( EClock, EAlarm, option, opt_n, opt_ymd, opt_to type(ESMF_Time) ,optional, intent(IN) :: RefTime ! ref time character(len=*) ,optional, intent(IN) :: alarmname ! alarm name -!EOP - !----- local ----- character(len=*), parameter :: subname = '(seq_timemgr_alarmInit): ' integer :: rc ! Return code @@ -1485,8 +1499,6 @@ subroutine seq_timemgr_alarmGet( EAlarm, next_ymd, next_tod, prev_ymd, prev_tod, integer(SHR_KIND_IN), intent(OUT), optional :: IntYrs ! alarm int yrs character(len=*) , intent(OUT), optional :: name ! alarm name -!EOP - !----- local ----- character(len=*), parameter :: subname = '(seq_timemgr_alarmGet) ' integer :: yy, mm, dd, sec ! Return time values @@ -1552,8 +1564,6 @@ subroutine seq_timemgr_AlarmSetOn( EClock, alarmname) type(ESMF_Clock), intent(INOUT) :: EClock ! clock/alarm character(len=*), intent(IN), optional :: alarmname ! alarmname -!EOP - !----- local ----- character(len=*), parameter :: subname = '(seq_timemgr_alarmSetOn) ' character(len=*), parameter :: xalarm = 'unset' @@ -1636,8 +1646,6 @@ subroutine seq_timemgr_AlarmSetOff( EClock, alarmname) type(ESMF_Clock), intent(INOUT) :: EClock ! clock/alarm character(len=*), intent(IN), optional :: alarmname ! alarmname -!EOP - !----- local ----- character(len=*), parameter :: subname = '(seq_timemgr_alarmSetOff) ' character(len=*), parameter :: xalarm = 'unset' @@ -1720,8 +1728,6 @@ logical function seq_timemgr_alarmIsOn( EClock, alarmname) type(ESMF_Clock), intent(IN) :: EClock ! clock/alarm character(len=*), intent(IN) :: alarmname ! which alarm -!EOP - !----- local ----- character(len=*), parameter :: subname = '(seq_timemgr_alarmIsOn) ' character(len=*), parameter :: xalarm = 'unset' @@ -1803,8 +1809,6 @@ logical function seq_timemgr_restartAlarmIsOn( EClock) type(ESMF_Clock) , intent(IN) :: EClock ! clock/alarm -!EOP - !----- local ----- integer :: rc character(len=*), parameter :: subname = '(seq_timemgr_restartAlarmIsOn) ' @@ -1836,8 +1840,6 @@ logical function seq_timemgr_stopAlarmIsOn( EClock) type(ESMF_Clock) , intent(IN) :: EClock ! clock/alarm -!EOP - !----- local ----- character(len=*), parameter :: subname = '(seq_timemgr_stopAlarmIsOn) ' @@ -1868,8 +1870,6 @@ logical function seq_timemgr_historyAlarmIsOn( EClock) type(ESMF_Clock) , intent(IN) :: EClock ! clock/alarm -!EOP - !----- local ----- integer :: rc character(len=*), parameter :: subname = '(seq_timemgr_historyAlarmIsOn) ' @@ -1906,8 +1906,6 @@ subroutine seq_timemgr_ETimeInit( ETime, ymd, tod, desc ) integer , intent(in), optional :: tod ! Time of day in seconds character(len=*), intent(in), optional :: desc ! Description of time to set -!EOP - !----- local ----- character(len=*), parameter :: subname = '(seq_timemgr_ETimeInit) ' integer :: yr, mon, day ! Year, month, day as integers @@ -1965,8 +1963,6 @@ subroutine seq_timemgr_ETimeGet( ETime, offset, ymd, tod ) integer, optional, intent(OUT) :: ymd ! date of day integer, optional, intent(OUT) :: tod ! Time of day -!EOP - !----- local ----- character(len=*), parameter :: subname = '(seq_timemgr_ETimeGet) ' type(ESMF_Time) :: ETimeAdd ! ESMF time + offset @@ -2035,8 +2031,6 @@ subroutine seq_timemgr_EClockInit( TimeStep, StartTime, RefTime, CurrTime, ECloc type(ESMF_Time) , intent(IN) :: CurrTime ! Current time type(ESMF_Clock) , intent(OUT) :: EClock ! Output ESMF clock -!EOP - !----- local ----- character(len=*), parameter :: subname = '(seq_timemgr_EClockInit) ' integer :: rc ! ESMF return code @@ -2098,8 +2092,6 @@ logical function seq_timemgr_EClockDateInSync( EClock, ymd, tod, prev) integer, intent(IN) :: tod ! Time of day (sec) logical, optional,intent(IN) :: prev ! If should get previous time -!EOP - !----- local ----- character(len=*), parameter :: subname = "(seq_timemgr_EClockDateInSync) " type(ESMF_Time) :: ETime @@ -2150,9 +2142,7 @@ subroutine seq_timemgr_clockPrint( SyncClock ) ! !INPUT/OUTPUT PARAMETERS: - type(seq_timemgr_type), intent(in) :: SyncClock ! Input clock to print - -!EOP + type(seq_timemgr_type), intent(in) :: SyncClock ! Input clock to print character(len=*), parameter :: subname = "(seq_timemgr_clockPrint) " integer(SHR_KIND_IN) :: m,n @@ -2258,8 +2248,6 @@ subroutine seq_timemgr_ESMFDebug( EClock, ETime, ETimeInterval, istring ) type(ESMF_TimeInterval), optional, intent(inout) :: ETimeInterval ! ESMF Time Interval character(len=*), optional, intent(in) :: istring -!EOP - !----- local ----- character(len=*), parameter :: subname = '(seq_timemgr_ESMFDebug) ' character(len=128) :: timestring @@ -2332,8 +2320,6 @@ subroutine seq_timemgr_ESMFCodeCheck( rc, msg ) integer, intent(in) :: rc ! return code from ESMF character(len=*),optional,intent(in) :: msg ! error message -!EOP - character(len=*),parameter :: subname = 'seq_timemgr_ESMFCodeCheck' !------------------------------------------------------------------------------- ! Notes: From f168784746cf7f5008c44462cbd193c879babfc7 Mon Sep 17 00:00:00 2001 From: mvertens Date: Mon, 16 Jan 2017 22:28:17 -0700 Subject: [PATCH 002/219] changes to get glc to time average inside the coupler over a year --- src/drivers/mct/cime_config/buildnml | 2 +- .../cime_config/namelist_definition_drv.xml | 45 +++++++++---------- src/drivers/mct/main/cesm_comp_mod.F90 | 8 ++-- src/drivers/mct/shr/seq_infodata_mod.F90 | 15 +++++-- src/drivers/mct/shr/seq_timemgr_mod.F90 | 18 ++++---- 5 files changed, 46 insertions(+), 42 deletions(-) diff --git a/src/drivers/mct/cime_config/buildnml b/src/drivers/mct/cime_config/buildnml index 4e29c3ff83cf..75e3270f9b10 100755 --- a/src/drivers/mct/cime_config/buildnml +++ b/src/drivers/mct/cime_config/buildnml @@ -100,7 +100,7 @@ def _create_drv_namelists(case, infile, confdir, nmlgen, files): totaldt = cpl_dt * int(ncpl) if totaldt != basedt: expect(False, " %s ncpl doesn't divide base dt evenly" %comp) - nmlgen.set_value(comp.lower() + '_cpl_dt', value=cpl_dt) + nmlgen.add_default(comp.lower() + '_cpl_dt', value=cpl_dt) # elif comp.lower() is not 'cpl': #-------------------------------- diff --git a/src/drivers/mct/cime_config/namelist_definition_drv.xml b/src/drivers/mct/cime_config/namelist_definition_drv.xml index 844bcc65512f..e8a554f9af9a 100644 --- a/src/drivers/mct/cime_config/namelist_definition_drv.xml +++ b/src/drivers/mct/cime_config/namelist_definition_drv.xml @@ -1387,7 +1387,7 @@ - + integer time seq_timemgr_inparm @@ -1398,12 +1398,9 @@ NCPL_BASE_PERIOD is also set in env_run.xml and is the base period associated with NCPL coupling frequency, and has valid values: hour,day,year,decade - - -999 - - + integer time seq_timemgr_inparm @@ -1414,12 +1411,9 @@ NCPL_BASE_PERIOD is also set in env_run.xml and is the base period associated with NCPL coupling frequency, nad has valid values: hour,day,year,decade - - -999 - - + integer time seq_timemgr_inparm @@ -1428,12 +1422,9 @@ currently set by default to 10800 seconds. default: 10800 - - 10800 - - + integer time seq_timemgr_inparm @@ -1444,12 +1435,9 @@ NCPL_BASE_PERIOD is also set in env_run.xml and is the base period associated with NCPL coupling frequency, nad has valid values: hour,day,year,decade - - -999 - - + integer time seq_timemgr_inparm @@ -1465,7 +1453,7 @@ - + integer time seq_timemgr_inparm @@ -1476,12 +1464,22 @@ NCPL_BASE_PERIOD is also set in env_run.xml and is the base period associated with NCPL coupling frequency, nad has valid values: hour,day,year,decade - - -999 - - + + integer + time + seq_timemgr_inparm + + glc coupling averging interval in seconds + set via GLC_AVG_NCPL in env_run.xml. + GLC_AVG_NCPL is averging period for glc per NCPL_BASE_PERIOD + NCPL_BASE_PERIOD is also set in env_run.xml and is the base period + associated with NCPL coupling frequency, nad has valid values: hour,day,year,decade + + + + integer time seq_timemgr_inparm @@ -1492,9 +1490,6 @@ NCPL_BASE_PERIOD is also set in env_run.xml and is the base period associated with NCPL coupling frequency, nad has valid values: hour,day,year,decade - - -999 - diff --git a/src/drivers/mct/main/cesm_comp_mod.F90 b/src/drivers/mct/main/cesm_comp_mod.F90 index ed85fc8b7239..8553e6ba812f 100644 --- a/src/drivers/mct/main/cesm_comp_mod.F90 +++ b/src/drivers/mct/main/cesm_comp_mod.F90 @@ -2950,13 +2950,13 @@ subroutine cesm_run() if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) call t_drvstopf ('CPL:GLCPREP',cplrun=.true.) - ! Set seq_infodata flag for valid data - call seq_infodata_PutData(glc_valid_input=.true.) + ! Set seq_infodata flag for valid data + call seq_infodata_PutData(infodata, glc_valid_input=.true.) endif else if (iamin_CPLID .and. glc_prognostic) then - ! Set seq_infodata flag for unvalid data - call seq_infodata_PutData(glc_valid_input=.false.) + ! Set seq_infodata flag for unvalid data + call seq_infodata_PutData(infodata, glc_valid_input=.false.) end if end if diff --git a/src/drivers/mct/shr/seq_infodata_mod.F90 b/src/drivers/mct/shr/seq_infodata_mod.F90 index 7341e4c2af2f..3ffb841874c4 100644 --- a/src/drivers/mct/shr/seq_infodata_mod.F90 +++ b/src/drivers/mct/shr/seq_infodata_mod.F90 @@ -250,6 +250,8 @@ MODULE seq_infodata_mod real(shr_kind_r8) :: max_cplstep_time ! abort if cplstep time exceeds this value !--- set from restart file --- character(SHR_KIND_CL) :: rest_case_name ! Short case identification + !--- set by driver and may be time varying + logical :: glc_valid_input ! is valid accumulated data being sent to prognostic glc end type seq_infodata_type ! --- public interfaces -------------------------------------------------------- @@ -555,6 +557,7 @@ SUBROUTINE seq_infodata_Init( infodata, nmlfile, ID, pioid) mct_usealltoall = .false. mct_usevector = .false. max_cplstep_time = 0.0 + !--------------------------------------------------------------------------- ! Read in namelist !--------------------------------------------------------------------------- @@ -717,13 +720,13 @@ SUBROUTINE seq_infodata_Init( infodata, nmlfile, ID, pioid) infodata%atm_aero = .false. infodata%glcrun_alarm = .false. infodata%glc_g2lupdate = .false. + infodata%glc_valid_input = .true. if (associated(infodata%pause_resume)) then deallocate(infodata%pause_resume) end if nullify(infodata%pause_resume) infodata%max_cplstep_time = max_cplstep_time - !--------------------------------------------------------------- ! check orbital mode, reset unused parameters, validate settings !--------------------------------------------------------------- @@ -906,7 +909,7 @@ SUBROUTINE seq_infodata_GetData_explicit( infodata, cime_model, case_name, case_ atm_pause, lnd_pause, ocn_pause, ice_pause, glc_pause, rof_pause, & wav_pause, cpl_pause, atm_resume, lnd_resume, ocn_resume, & ice_resume, glc_resume, rof_resume, wav_resume, cpl_resume, & - mct_usealltoall, mct_usevector, max_cplstep_time) + mct_usealltoall, mct_usevector, max_cplstep_time, glc_valid_input) implicit none @@ -1068,6 +1071,7 @@ SUBROUTINE seq_infodata_GetData_explicit( infodata, cime_model, case_name, case_ logical, optional, intent(OUT) :: glcrun_alarm ! glc run alarm logical, optional, intent(OUT) :: glc_g2lupdate ! update glc2lnd fields in lnd model real(shr_kind_r8), optional, intent(out) :: max_cplstep_time + logical, optional, intent(OUT) :: glc_valid_input logical, optional, intent(OUT) :: atm_pause ! atm write pause restart file logical, optional, intent(OUT) :: lnd_pause ! lnd write pause restart file logical, optional, intent(OUT) :: ice_pause ! ice write pause restart file @@ -1370,6 +1374,7 @@ SUBROUTINE seq_infodata_GetData_explicit( infodata, cime_model, case_name, case_ end if end if if ( present(max_cplstep_time) ) max_cplstep_time = infodata%max_cplstep_time + if ( present(glc_valid_input)) glc_valid_input = infodata%glc_valid_input END SUBROUTINE seq_infodata_GetData_explicit @@ -1538,7 +1543,7 @@ SUBROUTINE seq_infodata_PutData_explicit( infodata, cime_model, case_name, case_ atm_pause, lnd_pause, ocn_pause, ice_pause, glc_pause, rof_pause, & wav_pause, cpl_pause, atm_resume, lnd_resume, ocn_resume, & ice_resume, glc_resume, rof_resume, wav_resume, cpl_resume, & - mct_usealltoall, mct_usevector ) + mct_usealltoall, mct_usevector, glc_valid_input) implicit none @@ -1698,6 +1703,7 @@ SUBROUTINE seq_infodata_PutData_explicit( infodata, cime_model, case_name, case_ logical, optional, intent(IN) :: atm_aero ! atm aerosols logical, optional, intent(IN) :: glcrun_alarm ! glc run alarm logical, optional, intent(IN) :: glc_g2lupdate ! update glc2lnd fields in lnd model + logical, optional, intent(IN) :: glc_valid_input logical, optional, intent(IN) :: atm_pause ! atm pause logical, optional, intent(IN) :: lnd_pause ! lnd pause logical, optional, intent(IN) :: ice_pause ! ice pause @@ -1875,6 +1881,7 @@ SUBROUTINE seq_infodata_PutData_explicit( infodata, cime_model, case_name, case_ if ( present(atm_aero) ) infodata%atm_aero = atm_aero if ( present(glcrun_alarm) ) infodata%glcrun_alarm = glcrun_alarm if ( present(glc_g2lupdate) ) infodata%glc_g2lupdate = glc_g2lupdate + if ( present(glc_valid_input) ) infodata%glc_valid_input = glc_valid_input if ( present(atm_pause) ) then if (associated(infodata%pause_resume)) then infodata%pause_resume%atm_pause = atm_pause @@ -2293,6 +2300,7 @@ subroutine seq_infodata_bcast(infodata,mpicom) call shr_mpi_bcast(infodata%atm_aero, mpicom) call shr_mpi_bcast(infodata%glcrun_alarm, mpicom) call shr_mpi_bcast(infodata%glc_g2lupdate, mpicom) + call shr_mpi_bcast(infodata%glc_valid_input, mpicom) if (associated(infodata%pause_resume)) then call shr_mpi_bcast(infodata%pause_resume%atm_pause, mpicom) call shr_mpi_bcast(infodata%pause_resume%lnd_pause, mpicom) @@ -2624,6 +2632,7 @@ subroutine seq_infodata_Exchange(infodata,ID,type) call shr_mpi_bcast(infodata%precip_fact, mpicom,pebcast=pebcast) call shr_mpi_bcast(infodata%glcrun_alarm, mpicom,pebcast=pebcast) call shr_mpi_bcast(infodata%glc_g2lupdate, mpicom,pebcast=pebcast) + call shr_mpi_bcast(infodata%glc_valid_input, mpicom,pebcast=pebcast) endif end subroutine seq_infodata_Exchange diff --git a/src/drivers/mct/shr/seq_timemgr_mod.F90 b/src/drivers/mct/shr/seq_timemgr_mod.F90 index 574dd1475acf..6a63cdc8d684 100644 --- a/src/drivers/mct/shr/seq_timemgr_mod.F90 +++ b/src/drivers/mct/shr/seq_timemgr_mod.F90 @@ -322,6 +322,7 @@ subroutine seq_timemgr_clockInit(SyncClock, nmlfile, restart, restart_file, pioi integer(SHR_KIND_IN) :: ice_cpl_dt ! Sea-Ice coupling interval integer(SHR_KIND_IN) :: ocn_cpl_dt ! Ocean coupling interval integer(SHR_KIND_IN) :: glc_cpl_dt ! Glc coupling interval + integer(SHR_KIND_IN) :: glc_avg_cpl_dt ! Glc avering coupling interval integer(SHR_KIND_IN) :: rof_cpl_dt ! Runoff coupling interval integer(SHR_KIND_IN) :: wav_cpl_dt ! Wav coupling interval integer(SHR_KIND_IN) :: esp_cpl_dt ! Esp coupling interval @@ -352,7 +353,7 @@ subroutine seq_timemgr_clockInit(SyncClock, nmlfile, restart, restart_file, pioi start_ymd, start_tod, ref_ymd, ref_tod, & atm_cpl_dt, ocn_cpl_dt, ice_cpl_dt, lnd_cpl_dt, & atm_cpl_offset, lnd_cpl_offset, ocn_cpl_offset, & - ice_cpl_offset, glc_cpl_dt, glc_cpl_offset, & + ice_cpl_offset, glc_cpl_dt, glc_cpl_offset, glc_avg_cpl_dt, & wav_cpl_dt, wav_cpl_offset, esp_cpl_dt, esp_cpl_offset, & rof_cpl_dt, rof_cpl_offset, end_restart !------------------------------------------------------------------------------- @@ -414,6 +415,7 @@ subroutine seq_timemgr_clockInit(SyncClock, nmlfile, restart, restart_file, pioi ice_cpl_dt = 0 ocn_cpl_dt = 0 glc_cpl_dt = 0 + glc_avg_cpl_dt = 0 rof_cpl_dt = 0 wav_cpl_dt = 0 esp_cpl_dt = 0 @@ -480,9 +482,9 @@ subroutine seq_timemgr_clockInit(SyncClock, nmlfile, restart, restart_file, pioi if (wav_cpl_dt == 0) wav_cpl_dt = atm_cpl_dt ! Copy atm coupling time into wav if (esp_cpl_dt == 0) esp_cpl_dt = atm_cpl_dt ! Copy atm coupling time into esp - if (glc_cpl_avg_dt == 0) then + if (glc_avg_cpl_dt == 0) then ! set default average coupling interval - glc_cpl_avg_dt = glc_cpl_dt + glc_avg_cpl_dt = glc_cpl_dt end if if ( ref_ymd == 0 ) then @@ -559,7 +561,7 @@ subroutine seq_timemgr_clockInit(SyncClock, nmlfile, restart, restart_file, pioi write(logunit,F0I) trim(subname),' ice_cpl_dt = ',ice_cpl_dt write(logunit,F0I) trim(subname),' ocn_cpl_dt = ',ocn_cpl_dt write(logunit,F0I) trim(subname),' glc_cpl_dt = ',glc_cpl_dt - write(logunit,F0I) trim(subname),' glc_cpl_avg_dt = ',glc_cpl_avg_dt + write(logunit,F0I) trim(subname),' glc_avg_cpl_dt = ',glc_avg_cpl_dt write(logunit,F0I) trim(subname),' rof_cpl_dt = ',rof_cpl_dt write(logunit,F0I) trim(subname),' wav_cpl_dt = ',wav_cpl_dt write(logunit,F0I) trim(subname),' esp_cpl_dt = ',esp_cpl_dt @@ -649,7 +651,7 @@ subroutine seq_timemgr_clockInit(SyncClock, nmlfile, restart, restart_file, pioi call shr_mpi_bcast( ice_cpl_dt, mpicom ) call shr_mpi_bcast( ocn_cpl_dt, mpicom ) call shr_mpi_bcast( glc_cpl_dt, mpicom ) - call shr_mpi_bcast( glc_cpl_avg_dt, mpicom ) + call shr_mpi_bcast( glc_avg_cpl_dt, mpicom ) call shr_mpi_bcast( rof_cpl_dt, mpicom ) call shr_mpi_bcast( wav_cpl_dt, mpicom ) call shr_mpi_bcast( esp_cpl_dt, mpicom ) @@ -720,7 +722,6 @@ subroutine seq_timemgr_clockInit(SyncClock, nmlfile, restart, restart_file, pioi dtime(seq_timemgr_nclock_ocn ) = ocn_cpl_dt dtime(seq_timemgr_nclock_ice ) = ice_cpl_dt dtime(seq_timemgr_nclock_glc ) = glc_cpl_dt - dtime(seq_timemgr_nclock_glc_avg ) = glc_cpl_avg_dt dtime(seq_timemgr_nclock_rof ) = rof_cpl_dt dtime(seq_timemgr_nclock_wav ) = wav_cpl_dt dtime(seq_timemgr_nclock_esp ) = esp_cpl_dt @@ -842,7 +843,6 @@ subroutine seq_timemgr_clockInit(SyncClock, nmlfile, restart, restart_file, pioi offset(seq_timemgr_nclock_ocn) = ocn_cpl_offset offset(seq_timemgr_nclock_ice) = ice_cpl_offset offset(seq_timemgr_nclock_glc) = glc_cpl_offset - offset(seq_timemgr_nclock_glc_avg) = glc_cpl_offset offset(seq_timemgr_nclock_rof) = rof_cpl_offset offset(seq_timemgr_nclock_wav) = wav_cpl_offset offset(seq_timemgr_nclock_esp) = esp_cpl_offset @@ -931,14 +931,14 @@ subroutine seq_timemgr_clockInit(SyncClock, nmlfile, restart, restart_file, pioi alarmname = trim(seq_timemgr_alarm_glcrun)) ! --- this is the glcrun_avg alarm (there ^) offset by a -dtime of the driver - call ESMF_TimeIntervalSet( TimeStep, s=offset(seq_timemgr_nclock_glc_avg), rc=rc ) + call ESMF_TimeIntervalSet( TimeStep, s=offset(seq_timemgr_nclock_glc), rc=rc ) OffsetTime = CurrTime + TimeStep call ESMF_TimeIntervalSet( TimeStep, s=-offset(seq_timemgr_nclock_drv), rc=rc ) OffsetTime = OffsetTime + TimeStep call seq_timemgr_alarmInit(SyncClock%ECP(seq_timemgr_nclock_drv)%EClock, & EAlarm = SyncClock%EAlarm(seq_timemgr_nclock_drv,seq_timemgr_nalarm_glcrun), & option = seq_timemgr_optNSeconds, & - opt_n = glc_cpl_avg_dt, & + opt_n = glc_avg_cpl_dt, & RefTime = OffsetTime, & alarmname = trim(seq_timemgr_alarm_glcrun_avg)) From 77327f8925adccc069ed38350f60f51dfb888242 Mon Sep 17 00:00:00 2001 From: Mariana Vertenstein Date: Tue, 24 Jan 2017 20:03:50 -0700 Subject: [PATCH 003/219] changes to have new yearly averaging for cism --- .../mct/cime_config/config_component_acme.xml | 10 ++-- .../mct/cime_config/config_component_cesm.xml | 10 ++-- .../cime_config/namelist_definition_drv.xml | 17 +++---- src/drivers/mct/shr/seq_timemgr_mod.F90 | 46 +++++++++---------- 4 files changed, 38 insertions(+), 45 deletions(-) diff --git a/src/drivers/mct/cime_config/config_component_acme.xml b/src/drivers/mct/cime_config/config_component_acme.xml index 9885951e3841..df92064541c5 100644 --- a/src/drivers/mct/cime_config/config_component_acme.xml +++ b/src/drivers/mct/cime_config/config_component_acme.xml @@ -360,12 +360,10 @@ Number of glc coupling intervals per NCPL_BASE_PERIOD. - - integer - 1 - - 1 - + + char + yearly,daily + daily run_coupling env_run.xml Number of glc coupling intervals relative to NCPL_BASE_PERIOD for averaging of GLC input. diff --git a/src/drivers/mct/cime_config/config_component_cesm.xml b/src/drivers/mct/cime_config/config_component_cesm.xml index 2b40a2a27b3f..41e9a1b88512 100644 --- a/src/drivers/mct/cime_config/config_component_cesm.xml +++ b/src/drivers/mct/cime_config/config_component_cesm.xml @@ -200,12 +200,10 @@ Number of glc coupling intervals per NCPL_BASE_PERIOD. - - integer - 1 - - 1 - + + char + yearly,daily + yearly run_coupling env_run.xml Number of glc coupling intervals relative to NCPL_BASE_PERIOD for averaging of GLC input. diff --git a/src/drivers/mct/cime_config/namelist_definition_drv.xml b/src/drivers/mct/cime_config/namelist_definition_drv.xml index e8a554f9af9a..72ee860e6180 100644 --- a/src/drivers/mct/cime_config/namelist_definition_drv.xml +++ b/src/drivers/mct/cime_config/namelist_definition_drv.xml @@ -1448,9 +1448,6 @@ NCPL_BASE_PERIOD is also set in env_run.xml and is the base period associated with NCPL coupling frequency, nad has valid values: hour,day,year,decade - - -999 - @@ -1466,16 +1463,16 @@ - - integer + + char time seq_timemgr_inparm + yearly,daily + + $GLC_AVG_PERIOD + - glc coupling averging interval in seconds - set via GLC_AVG_NCPL in env_run.xml. - GLC_AVG_NCPL is averging period for glc per NCPL_BASE_PERIOD - NCPL_BASE_PERIOD is also set in env_run.xml and is the base period - associated with NCPL coupling frequency, nad has valid values: hour,day,year,decade + glc coupling averging period diff --git a/src/drivers/mct/shr/seq_timemgr_mod.F90 b/src/drivers/mct/shr/seq_timemgr_mod.F90 index 6a63cdc8d684..7a0735ca1df5 100644 --- a/src/drivers/mct/shr/seq_timemgr_mod.F90 +++ b/src/drivers/mct/shr/seq_timemgr_mod.F90 @@ -132,11 +132,12 @@ module seq_timemgr_mod seq_timemgr_optNMonth = "nmonth" , & seq_timemgr_optNYears = "nyears" , & seq_timemgr_optNYear = "nyear" , & + seq_timemgr_optDaily = "daily" , & seq_timemgr_optMonthly = "monthly" , & seq_timemgr_optYearly = "yearly" , & seq_timemgr_optDate = "date" , & seq_timemgr_optIfdays0 = "ifdays0" , & - seq_timemgr_optEnd = "end" + seq_timemgr_optEnd = "end" integer(SHR_KIND_IN),private,parameter :: & seq_timemgr_nclock_drv = 1, & @@ -244,7 +245,7 @@ subroutine seq_timemgr_clockInit(SyncClock, nmlfile, restart, restart_file, pioi Eclock_rof, EClock_wav, Eclock_esp) ! !USES: - use pio, only : file_desc_T + use pio, only : file_desc_T use shr_string_mod, only : shr_string_toupper use shr_file_mod, only : shr_file_getunit, shr_file_freeunit use shr_mpi_mod, only : shr_mpi_bcast @@ -322,7 +323,7 @@ subroutine seq_timemgr_clockInit(SyncClock, nmlfile, restart, restart_file, pioi integer(SHR_KIND_IN) :: ice_cpl_dt ! Sea-Ice coupling interval integer(SHR_KIND_IN) :: ocn_cpl_dt ! Ocean coupling interval integer(SHR_KIND_IN) :: glc_cpl_dt ! Glc coupling interval - integer(SHR_KIND_IN) :: glc_avg_cpl_dt ! Glc avering coupling interval + character(SHR_KIND_CS) :: glc_avg_period ! Glc avering coupling period integer(SHR_KIND_IN) :: rof_cpl_dt ! Runoff coupling interval integer(SHR_KIND_IN) :: wav_cpl_dt ! Wav coupling interval integer(SHR_KIND_IN) :: esp_cpl_dt ! Esp coupling interval @@ -353,7 +354,7 @@ subroutine seq_timemgr_clockInit(SyncClock, nmlfile, restart, restart_file, pioi start_ymd, start_tod, ref_ymd, ref_tod, & atm_cpl_dt, ocn_cpl_dt, ice_cpl_dt, lnd_cpl_dt, & atm_cpl_offset, lnd_cpl_offset, ocn_cpl_offset, & - ice_cpl_offset, glc_cpl_dt, glc_cpl_offset, glc_avg_cpl_dt, & + ice_cpl_offset, glc_cpl_dt, glc_cpl_offset, glc_avg_period, & wav_cpl_dt, wav_cpl_offset, esp_cpl_dt, esp_cpl_offset, & rof_cpl_dt, rof_cpl_offset, end_restart !------------------------------------------------------------------------------- @@ -415,7 +416,7 @@ subroutine seq_timemgr_clockInit(SyncClock, nmlfile, restart, restart_file, pioi ice_cpl_dt = 0 ocn_cpl_dt = 0 glc_cpl_dt = 0 - glc_avg_cpl_dt = 0 + glc_avg_period = seq_timemgr_optDaily rof_cpl_dt = 0 wav_cpl_dt = 0 esp_cpl_dt = 0 @@ -482,11 +483,6 @@ subroutine seq_timemgr_clockInit(SyncClock, nmlfile, restart, restart_file, pioi if (wav_cpl_dt == 0) wav_cpl_dt = atm_cpl_dt ! Copy atm coupling time into wav if (esp_cpl_dt == 0) esp_cpl_dt = atm_cpl_dt ! Copy atm coupling time into esp - if (glc_avg_cpl_dt == 0) then - ! set default average coupling interval - glc_avg_cpl_dt = glc_cpl_dt - end if - if ( ref_ymd == 0 ) then ref_ymd = start_ymd ref_tod = start_tod @@ -561,7 +557,7 @@ subroutine seq_timemgr_clockInit(SyncClock, nmlfile, restart, restart_file, pioi write(logunit,F0I) trim(subname),' ice_cpl_dt = ',ice_cpl_dt write(logunit,F0I) trim(subname),' ocn_cpl_dt = ',ocn_cpl_dt write(logunit,F0I) trim(subname),' glc_cpl_dt = ',glc_cpl_dt - write(logunit,F0I) trim(subname),' glc_avg_cpl_dt = ',glc_avg_cpl_dt + write(logunit,F0I) trim(subname),' glc_avg_period = ',glc_avg_period write(logunit,F0I) trim(subname),' rof_cpl_dt = ',rof_cpl_dt write(logunit,F0I) trim(subname),' wav_cpl_dt = ',wav_cpl_dt write(logunit,F0I) trim(subname),' esp_cpl_dt = ',esp_cpl_dt @@ -651,7 +647,7 @@ subroutine seq_timemgr_clockInit(SyncClock, nmlfile, restart, restart_file, pioi call shr_mpi_bcast( ice_cpl_dt, mpicom ) call shr_mpi_bcast( ocn_cpl_dt, mpicom ) call shr_mpi_bcast( glc_cpl_dt, mpicom ) - call shr_mpi_bcast( glc_avg_cpl_dt, mpicom ) + call shr_mpi_bcast( glc_avg_period, mpicom ) call shr_mpi_bcast( rof_cpl_dt, mpicom ) call shr_mpi_bcast( wav_cpl_dt, mpicom ) call shr_mpi_bcast( esp_cpl_dt, mpicom ) @@ -929,18 +925,22 @@ subroutine seq_timemgr_clockInit(SyncClock, nmlfile, restart, restart_file, pioi opt_n = dtime(seq_timemgr_nclock_glc), & RefTime = OffsetTime, & alarmname = trim(seq_timemgr_alarm_glcrun)) - - ! --- this is the glcrun_avg alarm (there ^) offset by a -dtime of the driver - call ESMF_TimeIntervalSet( TimeStep, s=offset(seq_timemgr_nclock_glc), rc=rc ) - OffsetTime = CurrTime + TimeStep - call ESMF_TimeIntervalSet( TimeStep, s=-offset(seq_timemgr_nclock_drv), rc=rc ) - OffsetTime = OffsetTime + TimeStep - call seq_timemgr_alarmInit(SyncClock%ECP(seq_timemgr_nclock_drv)%EClock, & - EAlarm = SyncClock%EAlarm(seq_timemgr_nclock_drv,seq_timemgr_nalarm_glcrun), & - option = seq_timemgr_optNSeconds, & - opt_n = glc_avg_cpl_dt, & - RefTime = OffsetTime, & + if (glc_avg_period == seq_timemgr_optDaily) then + call seq_timemgr_alarmInit(SyncClock%ECP(seq_timemgr_nclock_drv)%EClock, & + EAlarm = SyncClock%EAlarm(seq_timemgr_nclock_drv,seq_timemgr_nalarm_glcrun_avg), & + option = seq_timemgr_optNSeconds, & + opt_n = 86400, & + RefTime = OffsetTime, & alarmname = trim(seq_timemgr_alarm_glcrun_avg)) + else if (glc_avg_period == seq_timemgr_optYearly) then + call seq_timemgr_alarmInit(SyncClock%ECP(seq_timemgr_nclock_drv)%EClock, & + EAlarm = SyncClock%EAlarm(seq_timemgr_nclock_drv,seq_timemgr_nalarm_glcrun_avg), & + option = seq_timemgr_optYearly, & + RefTime = OffsetTime, & + alarmname = trim(seq_timemgr_alarm_glcrun_avg)) + else + call shr_sys_abort(subname//':: glc_avg_period can only be yearly or daily') + end if call ESMF_TimeIntervalSet( TimeStep, s=offset(seq_timemgr_nclock_ocn), rc=rc ) OffsetTime = CurrTime + TimeStep From 12b11ff5b49bca723b5ef7133bbbf28955dd2298 Mon Sep 17 00:00:00 2001 From: Mariana Vertenstein Date: Thu, 26 Jan 2017 14:44:33 -0700 Subject: [PATCH 004/219] bug fix for compilation --- src/drivers/mct/shr/seq_timemgr_mod.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/drivers/mct/shr/seq_timemgr_mod.F90 b/src/drivers/mct/shr/seq_timemgr_mod.F90 index 7a0735ca1df5..92e6783dda8e 100644 --- a/src/drivers/mct/shr/seq_timemgr_mod.F90 +++ b/src/drivers/mct/shr/seq_timemgr_mod.F90 @@ -557,7 +557,7 @@ subroutine seq_timemgr_clockInit(SyncClock, nmlfile, restart, restart_file, pioi write(logunit,F0I) trim(subname),' ice_cpl_dt = ',ice_cpl_dt write(logunit,F0I) trim(subname),' ocn_cpl_dt = ',ocn_cpl_dt write(logunit,F0I) trim(subname),' glc_cpl_dt = ',glc_cpl_dt - write(logunit,F0I) trim(subname),' glc_avg_period = ',glc_avg_period + write(logunit,F0A) trim(subname),' glc_avg_period = ',glc_avg_period write(logunit,F0I) trim(subname),' rof_cpl_dt = ',rof_cpl_dt write(logunit,F0I) trim(subname),' wav_cpl_dt = ',wav_cpl_dt write(logunit,F0I) trim(subname),' esp_cpl_dt = ',esp_cpl_dt From 8878fd7fe8328b901fa8a964fdaca06650a6d7b9 Mon Sep 17 00:00:00 2001 From: Bill Sacks Date: Wed, 5 Apr 2017 11:25:22 -0600 Subject: [PATCH 005/219] Minor cleanup Mostly changes comments and documentation Also removes a trailing comma from a list in python --- src/drivers/mct/cime_config/config_component_acme.xml | 8 +++++++- src/drivers/mct/cime_config/config_component_cesm.xml | 8 +++++++- src/drivers/mct/cime_config/namelist_definition_drv.xml | 7 ++++++- src/drivers/mct/main/cesm_comp_mod.F90 | 6 ++++-- src/drivers/mct/shr/seq_timemgr_mod.F90 | 2 +- 5 files changed, 25 insertions(+), 6 deletions(-) diff --git a/src/drivers/mct/cime_config/config_component_acme.xml b/src/drivers/mct/cime_config/config_component_acme.xml index df92064541c5..4f0d2ddb1f13 100644 --- a/src/drivers/mct/cime_config/config_component_acme.xml +++ b/src/drivers/mct/cime_config/config_component_acme.xml @@ -366,7 +366,13 @@ daily run_coupling env_run.xml - Number of glc coupling intervals relative to NCPL_BASE_PERIOD for averaging of GLC input. + Period at which coupler averages fields sent to GLC. + This supports doing the averaging to GLC less frequently than GLC is called + (i.e., separating the averaging frequency from the calling frequency). + This is useful because there are benefits to only averaging the GLC inputs + as frequently as they are really needed (yearly for CISM), but GLC needs to + still be called more frequently than that in order to support mid-year restarts. + diff --git a/src/drivers/mct/cime_config/config_component_cesm.xml b/src/drivers/mct/cime_config/config_component_cesm.xml index 41e9a1b88512..3bad9d17e6b5 100644 --- a/src/drivers/mct/cime_config/config_component_cesm.xml +++ b/src/drivers/mct/cime_config/config_component_cesm.xml @@ -206,7 +206,13 @@ yearly run_coupling env_run.xml - Number of glc coupling intervals relative to NCPL_BASE_PERIOD for averaging of GLC input. + Period at which coupler averages fields sent to GLC. + This supports doing the averaging to GLC less frequently than GLC is called + (i.e., separating the averaging frequency from the calling frequency). + This is useful because there are benefits to only averaging the GLC inputs + as frequently as they are really needed (yearly for CISM), but GLC needs to + still be called more frequently than that in order to support mid-year restarts. + diff --git a/src/drivers/mct/cime_config/namelist_definition_drv.xml b/src/drivers/mct/cime_config/namelist_definition_drv.xml index 72ee860e6180..7b26ef9768a6 100644 --- a/src/drivers/mct/cime_config/namelist_definition_drv.xml +++ b/src/drivers/mct/cime_config/namelist_definition_drv.xml @@ -1472,7 +1472,12 @@ $GLC_AVG_PERIOD - glc coupling averging period + Period at which coupler averages fields sent to GLC. + This supports doing the averaging to GLC less frequently than GLC is called + (i.e., separating the averaging frequency from the calling frequency). + This is useful because there are benefits to only averaging the GLC inputs + as frequently as they are really needed (yearly for CISM), but GLC needs to + still be called more frequently than that in order to support mid-year restarts. diff --git a/src/drivers/mct/main/cesm_comp_mod.F90 b/src/drivers/mct/main/cesm_comp_mod.F90 index 8553e6ba812f..d6c572843c92 100644 --- a/src/drivers/mct/main/cesm_comp_mod.F90 +++ b/src/drivers/mct/main/cesm_comp_mod.F90 @@ -2206,7 +2206,9 @@ subroutine cesm_run() if (tod == 0) t24hr_alarm = .true. if (month==1 .and. day==1 .and. tod==0) t1yr_alarm = .true. - call seq_infodata_putData(infodata, glcrun_alarm=glcrun_alarm) !??? TODO - why is this here ??? + ! TODO(wjs, 2017-04-05) I think glcrun_alarm can be removed from infodata: It used + ! to be needed by CLM, but no longer is needed. + call seq_infodata_putData(infodata, glcrun_alarm=glcrun_alarm) if (seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_datestop)) then if (iamroot_CPLID) then @@ -2955,7 +2957,7 @@ subroutine cesm_run() endif else if (iamin_CPLID .and. glc_prognostic) then - ! Set seq_infodata flag for unvalid data + ! Set seq_infodata flag for invalid data call seq_infodata_PutData(infodata, glc_valid_input=.false.) end if end if diff --git a/src/drivers/mct/shr/seq_timemgr_mod.F90 b/src/drivers/mct/shr/seq_timemgr_mod.F90 index 92e6783dda8e..f0f5d6daf165 100644 --- a/src/drivers/mct/shr/seq_timemgr_mod.F90 +++ b/src/drivers/mct/shr/seq_timemgr_mod.F90 @@ -733,7 +733,7 @@ subroutine seq_timemgr_clockInit(SyncClock, nmlfile, restart, restart_file, pioi endif enddo - ! --- Initialize component and driver clocks and alarms common to components amd drivver clocks --- + ! --- Initialize component and driver clocks and alarms common to components and driver clocks --- do n = 1,max_clocks call ESMF_TimeIntervalSet( TimeStep, s=dtime(n), rc=rc ) From 40059b8345600b202cfa511394dc900bae318ae9 Mon Sep 17 00:00:00 2001 From: Bill Sacks Date: Wed, 5 Apr 2017 12:14:18 -0600 Subject: [PATCH 006/219] Call seq_infodata_putData for glc_valid_input from all tasks I'm not sure this is necessary, but it seems safest --- src/drivers/mct/main/cesm_comp_mod.F90 | 49 +++++++++++++------------- 1 file changed, 25 insertions(+), 24 deletions(-) diff --git a/src/drivers/mct/main/cesm_comp_mod.F90 b/src/drivers/mct/main/cesm_comp_mod.F90 index d6c572843c92..4faf16af8a8c 100644 --- a/src/drivers/mct/main/cesm_comp_mod.F90 +++ b/src/drivers/mct/main/cesm_comp_mod.F90 @@ -2932,32 +2932,33 @@ subroutine cesm_run() !| glc prep-merge !---------------------------------------------------- - if (iamin_CPLID .and. glc_prognostic) then - call cesm_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:GLCPREP_BARRIER') - call t_drvstartf ('CPL:GLCPREP',cplrun=.true.,barrier=mpicom_CPLID) - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - - if (lnd_c2_glc) then - call prep_glc_accum_avg(timer='CPL:glcprep_avg') - - ! Note that l2x_gx is obtained from mapping the module variable l2gacc_lx - call prep_glc_calc_l2x_gx(fractions_lx, timer='CPL:glcprep_lnd2glc') - - call prep_glc_mrg(infodata, fractions_gx, timer_mrg='CPL:glcprep_mrgx2g') - - call component_diag(infodata, glc, flow='x2c', comment='send glc', & - info_debug=info_debug, timer_diag='CPL:glcprep_diagav') - endif - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - call t_drvstopf ('CPL:GLCPREP',cplrun=.true.) + if (glc_prognostic) then + if (iamin_CPLID) then + call cesm_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:GLCPREP_BARRIER') + call t_drvstartf ('CPL:GLCPREP',cplrun=.true.,barrier=mpicom_CPLID) + if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) + + if (lnd_c2_glc) then + call prep_glc_accum_avg(timer='CPL:glcprep_avg') + + ! Note that l2x_gx is obtained from mapping the module variable l2gacc_lx + call prep_glc_calc_l2x_gx(fractions_lx, timer='CPL:glcprep_lnd2glc') + + call prep_glc_mrg(infodata, fractions_gx, timer_mrg='CPL:glcprep_mrgx2g') + + call component_diag(infodata, glc, flow='x2c', comment='send glc', & + info_debug=info_debug, timer_diag='CPL:glcprep_diagav') + endif + + if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) + call t_drvstopf ('CPL:GLCPREP',cplrun=.true.) + + end if ! iamin_CPLID - ! Set seq_infodata flag for valid data call seq_infodata_PutData(infodata, glc_valid_input=.true.) - endif - else - if (iamin_CPLID .and. glc_prognostic) then - ! Set seq_infodata flag for invalid data + end if ! glc_prognostic + else ! .not. glcrun_avg_alarm + if (glc_prognostic) then call seq_infodata_PutData(infodata, glc_valid_input=.false.) end if end if From 076847be8194471461721717aab505f5d335cf5b Mon Sep 17 00:00:00 2001 From: Bill Sacks Date: Wed, 5 Apr 2017 12:34:26 -0600 Subject: [PATCH 007/219] Do GLC RECV-POST based on glcrun_alarm Mariana had changed this to use glcrun_avg_alarm. But I think this work should really be done every time glc is run, even if it's not an averaging time. --- src/drivers/mct/main/cesm_comp_mod.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/drivers/mct/main/cesm_comp_mod.F90 b/src/drivers/mct/main/cesm_comp_mod.F90 index 4faf16af8a8c..12f403d76016 100644 --- a/src/drivers/mct/main/cesm_comp_mod.F90 +++ b/src/drivers/mct/main/cesm_comp_mod.F90 @@ -3431,7 +3431,7 @@ subroutine cesm_run() !| GLC RECV-POST !---------------------------------------------------------- - if (glc_present .and. glcrun_avg_alarm) then + if (glc_present .and. glcrun_alarm) then !---------------------------------------------------------- !| glc -> cpl From 27ddee4f19f45bb4ecc393d0d179009ed11bfda3 Mon Sep 17 00:00:00 2001 From: Bill Sacks Date: Wed, 5 Apr 2017 20:23:11 -0600 Subject: [PATCH 008/219] Support GLC_AVG_PERIOD = 'glc_coupling_period' rather than 'daily' If specifying GLC_AVG_PERIOD = 'glc_coupling_period', we'll automatically set the averaging clock to match the glc run clock. This provides more robustness than the earlier hard-coded 'daily' option, in case the glc coupling period is not actually daily. --- .../mct/cime_config/config_component_acme.xml | 7 +++++-- .../mct/cime_config/config_component_cesm.xml | 5 ++++- .../mct/cime_config/namelist_definition_drv.xml | 5 ++++- src/drivers/mct/shr/seq_timemgr_mod.F90 | 15 ++++++++------- 4 files changed, 21 insertions(+), 11 deletions(-) diff --git a/src/drivers/mct/cime_config/config_component_acme.xml b/src/drivers/mct/cime_config/config_component_acme.xml index 4f0d2ddb1f13..a102d40624d1 100644 --- a/src/drivers/mct/cime_config/config_component_acme.xml +++ b/src/drivers/mct/cime_config/config_component_acme.xml @@ -362,8 +362,8 @@ char - yearly,daily - daily + glc_coupling_period,yearly + glc_coupling_period run_coupling env_run.xml Period at which coupler averages fields sent to GLC. @@ -372,6 +372,9 @@ This is useful because there are benefits to only averaging the GLC inputs as frequently as they are really needed (yearly for CISM), but GLC needs to still be called more frequently than that in order to support mid-year restarts. + + Setting GLC_AVG_PERIOD to 'glc_coupling_period' means that the averaging is + done exactly when the GLC is called (governed by GLC_NCPL). diff --git a/src/drivers/mct/cime_config/config_component_cesm.xml b/src/drivers/mct/cime_config/config_component_cesm.xml index 3bad9d17e6b5..cf424aa34646 100644 --- a/src/drivers/mct/cime_config/config_component_cesm.xml +++ b/src/drivers/mct/cime_config/config_component_cesm.xml @@ -202,7 +202,7 @@ char - yearly,daily + glc_coupling_period,yearly yearly run_coupling env_run.xml @@ -212,6 +212,9 @@ This is useful because there are benefits to only averaging the GLC inputs as frequently as they are really needed (yearly for CISM), but GLC needs to still be called more frequently than that in order to support mid-year restarts. + + Setting GLC_AVG_PERIOD to 'glc_coupling_period' means that the averaging is + done exactly when the GLC is called (governed by GLC_NCPL). diff --git a/src/drivers/mct/cime_config/namelist_definition_drv.xml b/src/drivers/mct/cime_config/namelist_definition_drv.xml index 7b26ef9768a6..98d0b8283c24 100644 --- a/src/drivers/mct/cime_config/namelist_definition_drv.xml +++ b/src/drivers/mct/cime_config/namelist_definition_drv.xml @@ -1467,7 +1467,7 @@ char time seq_timemgr_inparm - yearly,daily + glc_coupling_period,yearly $GLC_AVG_PERIOD @@ -1478,6 +1478,9 @@ This is useful because there are benefits to only averaging the GLC inputs as frequently as they are really needed (yearly for CISM), but GLC needs to still be called more frequently than that in order to support mid-year restarts. + + Setting glc_avg_period to 'glc_coupling_period' means that the averaging is + done exactly when the GLC is called (governed by GLC_NCPL). diff --git a/src/drivers/mct/shr/seq_timemgr_mod.F90 b/src/drivers/mct/shr/seq_timemgr_mod.F90 index f0f5d6daf165..dd525998ae22 100644 --- a/src/drivers/mct/shr/seq_timemgr_mod.F90 +++ b/src/drivers/mct/shr/seq_timemgr_mod.F90 @@ -132,12 +132,12 @@ module seq_timemgr_mod seq_timemgr_optNMonth = "nmonth" , & seq_timemgr_optNYears = "nyears" , & seq_timemgr_optNYear = "nyear" , & - seq_timemgr_optDaily = "daily" , & seq_timemgr_optMonthly = "monthly" , & seq_timemgr_optYearly = "yearly" , & seq_timemgr_optDate = "date" , & seq_timemgr_optIfdays0 = "ifdays0" , & - seq_timemgr_optEnd = "end" + seq_timemgr_optEnd = "end" , & + seq_timemgr_optGLCCouplingPeriod = "glc_coupling_period" integer(SHR_KIND_IN),private,parameter :: & seq_timemgr_nclock_drv = 1, & @@ -416,7 +416,7 @@ subroutine seq_timemgr_clockInit(SyncClock, nmlfile, restart, restart_file, pioi ice_cpl_dt = 0 ocn_cpl_dt = 0 glc_cpl_dt = 0 - glc_avg_period = seq_timemgr_optDaily + glc_avg_period = seq_timemgr_optGLCCouplingPeriod rof_cpl_dt = 0 wav_cpl_dt = 0 esp_cpl_dt = 0 @@ -925,13 +925,14 @@ subroutine seq_timemgr_clockInit(SyncClock, nmlfile, restart, restart_file, pioi opt_n = dtime(seq_timemgr_nclock_glc), & RefTime = OffsetTime, & alarmname = trim(seq_timemgr_alarm_glcrun)) - if (glc_avg_period == seq_timemgr_optDaily) then + if (glc_avg_period == seq_timemgr_optGLCCouplingPeriod) then + ! Create this alarm identically to the glcrun alarm (which is created above) call seq_timemgr_alarmInit(SyncClock%ECP(seq_timemgr_nclock_drv)%EClock, & EAlarm = SyncClock%EAlarm(seq_timemgr_nclock_drv,seq_timemgr_nalarm_glcrun_avg), & option = seq_timemgr_optNSeconds, & - opt_n = 86400, & + opt_n = dtime(seq_timemgr_nclock_glc), & RefTime = OffsetTime, & - alarmname = trim(seq_timemgr_alarm_glcrun_avg)) + alarmname = trim(seq_timemgr_alarm_glcrun_avg)) else if (glc_avg_period == seq_timemgr_optYearly) then call seq_timemgr_alarmInit(SyncClock%ECP(seq_timemgr_nclock_drv)%EClock, & EAlarm = SyncClock%EAlarm(seq_timemgr_nclock_drv,seq_timemgr_nalarm_glcrun_avg), & @@ -939,7 +940,7 @@ subroutine seq_timemgr_clockInit(SyncClock, nmlfile, restart, restart_file, pioi RefTime = OffsetTime, & alarmname = trim(seq_timemgr_alarm_glcrun_avg)) else - call shr_sys_abort(subname//':: glc_avg_period can only be yearly or daily') + call shr_sys_abort(subname//':: glc_avg_period can only be glc_coupling_period or yearly') end if call ESMF_TimeIntervalSet( TimeStep, s=offset(seq_timemgr_nclock_ocn), rc=rc ) From 97068a59182e8ace9943f2e452b4a009d95bfb89 Mon Sep 17 00:00:00 2001 From: Bill Sacks Date: Wed, 5 Apr 2017 20:40:55 -0600 Subject: [PATCH 009/219] Add an error check Abort if it's ever the case that glcrun_avg_alarm is true, but glcrun_alarm is false --- src/drivers/mct/main/cesm_comp_mod.F90 | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/drivers/mct/main/cesm_comp_mod.F90 b/src/drivers/mct/main/cesm_comp_mod.F90 index 12f403d76016..0bd192a873bf 100644 --- a/src/drivers/mct/main/cesm_comp_mod.F90 +++ b/src/drivers/mct/main/cesm_comp_mod.F90 @@ -2928,6 +2928,16 @@ subroutine cesm_run() ! NOTE - only create appropriate input to glc if the avg_alarm is on if (glc_present) then if (glcrun_avg_alarm) then + + if (.not. glcrun_alarm) then + write(logunit,*) 'ERROR: glcrun_avg_alarm is true, but glcrun_alarm is false' + write(logunit,*) 'Make sure that NCPL_BASE_PERIOD, GLC_NCPL and GLC_AVG_PERIOD' + write(logunit,*) 'are set so that glc averaging only happens at glc coupling times.' + write(logunit,*) '(It is allowable for glc coupling to be more frequent than glc averaging,' + write(logunit,*) 'but not for glc averaging to be more frequent than glc coupling.)' + call shr_sys_abort(subname//' glcrun_avg_alarm is true, but glcrun_alarm is false') + end if + !---------------------------------------------------- !| glc prep-merge !---------------------------------------------------- From 2a9e5ffc9d8661ba7bb73c3210746c337289acd2 Mon Sep 17 00:00:00 2001 From: jayeshkrishna Date: Fri, 7 Apr 2017 11:14:36 -0500 Subject: [PATCH 010/219] Adding pio_set_rearr_opts to pio1 Adding the pio_set_rearr_opts() function, that sets the rearranger options for an iosys, to pio1. This interface is already present in pio2. This interface would allow setting of rearranger options without exposing the pio_rearr_opt_t type to the user. --- pio/piolib_mod.F90 | 73 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 73 insertions(+) diff --git a/pio/piolib_mod.F90 b/pio/piolib_mod.F90 index c56e84fa45d7..5d9a44aa576a 100644 --- a/pio/piolib_mod.F90 +++ b/pio/piolib_mod.F90 @@ -58,6 +58,7 @@ module piolib_mod public :: PIO_init, & PIO_finalize, & PIO_initdecomp, & + PIO_set_rearr_opts,& PIO_openfile, & PIO_syncfile, & PIO_createfile, & @@ -1622,6 +1623,78 @@ subroutine init_iosystem_rearr_options(iosystem) end subroutine init_iosystem_rearr_options + function PIO_set_rearr_opts(iosystem, comm_type, fcd,& + enable_hs_c2i, enable_isend_c2i,& + max_pend_req_c2i,& + enable_hs_i2c, enable_isend_i2c,& + max_pend_req_i2c) result(ierr) + + use pio_types + + type (iosystem_desc_t), intent(inout) :: iosystem + integer, intent(in) :: comm_type, fcd + logical, intent(in) :: enable_hs_c2i, enable_hs_i2c + logical, intent(in) :: enable_isend_c2i, enable_isend_i2c + integer, intent(in) :: max_pend_req_c2i, max_pend_req_i2c + + integer :: ierr + + ierr = PIO_NOERR + + if(max_pend_req_c2i < 0) then + if(max_pend_req_c2i /= PIO_REARR_COMM_UNLIMITED_PEND_REQ) then + call piodie(__PIO_FILE__,__LINE__,& + "Invalid max pend req (comp to io) specified") + end if + end if + if(max_pend_req_i2c < 0) then + if(max_pend_req_i2c /= PIO_REARR_COMM_UNLIMITED_PEND_REQ) then + call piodie(__PIO_FILE__,__LINE__,& + "Invalid max pend req (io to comp) specified") + end if + end if + + iosystem%rearr_opts%comm_type = comm_type + + ! Reset to defaults + iosystem%rearr_opts%comm_fc_opts_comp2io%enable_hs = .false. + iosystem%rearr_opts%comm_fc_opts_comp2io%enable_isend = .false. + iosystem%rearr_opts%comm_fc_opts_comp2io%max_pend_req = DEF_P2P_MAXREQ + + iosystem%rearr_opts%comm_fc_opts_io2comp%enable_hs = .false. + iosystem%rearr_opts%comm_fc_opts_io2comp%enable_isend = .false. + iosystem%rearr_opts%comm_fc_opts_io2comp%max_pend_req = DEF_P2P_MAXREQ + if(iosystem%rearr_opts%comm_type == PIO_REARR_COMM_COLL) then + ! Init/Reset rest of the structure to valid values + iosystem%rearr_opts%fcd = PIO_REARR_COMM_FC_2D_DISABLE + else if(iosystem%rearr_opts%comm_type == PIO_REARR_COMM_P2P) then + iosystem%rearr_opts%fcd = fcd + if(iosystem%rearr_opts%fcd == PIO_REARR_COMM_FC_2D_DISABLE) then + ! Nothing to do here - the opts are already reset to defaults above + else if(iosystem%rearr_opts%fcd == PIO_REARR_COMM_FC_1D_COMP2IO) then + iosystem%rearr_opts%comm_fc_opts_comp2io%enable_hs = enable_hs_c2i + iosystem%rearr_opts%comm_fc_opts_comp2io%enable_isend = enable_isend_c2i + iosystem%rearr_opts%comm_fc_opts_comp2io%max_pend_req = max_pend_req_c2i + else if(iosystem%rearr_opts%fcd == PIO_REARR_COMM_FC_1D_IO2COMP) then + iosystem%rearr_opts%comm_fc_opts_io2comp%enable_hs = enable_hs_i2c + iosystem%rearr_opts%comm_fc_opts_io2comp%enable_isend = enable_isend_i2c + iosystem%rearr_opts%comm_fc_opts_io2comp%max_pend_req = max_pend_req_i2c + else if(iosystem%rearr_opts%fcd == PIO_REARR_COMM_FC_2D_ENABLE) then + iosystem%rearr_opts%comm_fc_opts_comp2io%enable_hs = enable_hs_c2i + iosystem%rearr_opts%comm_fc_opts_comp2io%enable_isend = enable_isend_c2i + iosystem%rearr_opts%comm_fc_opts_comp2io%max_pend_req = max_pend_req_c2i + + iosystem%rearr_opts%comm_fc_opts_io2comp%enable_hs = enable_hs_i2c + iosystem%rearr_opts%comm_fc_opts_io2comp%enable_isend = enable_isend_i2c + iosystem%rearr_opts%comm_fc_opts_io2comp%max_pend_req = max_pend_req_i2c + else + call piodie(__PIO_FILE__,__LINE__, "Invalid flow control dir specified") + end if + else + call piodie(__PIO_FILE__,__LINE__, "Invalid comm type specified") + end if + + end function PIO_set_rearr_opts !> !! @public From 14f45291bf68ee206df39236b65a9134a6fd8e18 Mon Sep 17 00:00:00 2001 From: Bill Sacks Date: Mon, 10 Apr 2017 13:06:32 -0600 Subject: [PATCH 011/219] Zero the x2g fields if .not. glcrun_avg_alarm In principle we shouldn't need to zero the fields at these times - instead, glc should just ignore the fields at these times. However, some tests (like an ERS or ERI test that stops the final run segment mid-year) can fail if we don't explicitly zero the fields, because the x2g fields can then differ upon restart. The refactored logic in cesm_comp_mod also results in these changes: (1) This code: !---------------------------------------------------- !| cpl -> glc !---------------------------------------------------- if (iamin_CPLALLGLCID .and. glc_prognostic) then call component_exch(glc, flow='x2c', & infodata=infodata, infodata_string='cpl2glc_run', & mpicom_barrier=mpicom_CPLALLGLCID, run_barriers=run_barriers, & timer_barrier='CPL:C2G_BARRIER', timer_comp_exch='CPL:C2G', & timer_map_exch='CPL:c2g_glcx2glcg', timer_infodata_exch='CPL:c2g_infoexch') endif will now only be executed if glcrun_alarm is true. That's what happened on master, but the previous version of the branch executed it even if glcrun_alarm was false. (2) This code: call cesm_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:GLCPREP_BARRIER') call t_drvstartf ('CPL:GLCPREP',cplrun=.true.,barrier=mpicom_CPLID) if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) and then: if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) call t_drvstopf ('CPL:GLCPREP',cplrun=.true.) will happen even if .not. glcrun_avg_alarm --- src/drivers/mct/main/cesm_comp_mod.F90 | 73 ++++++++++++++------------ src/drivers/mct/main/prep_glc_mod.F90 | 27 ++++++++++ 2 files changed, 66 insertions(+), 34 deletions(-) diff --git a/src/drivers/mct/main/cesm_comp_mod.F90 b/src/drivers/mct/main/cesm_comp_mod.F90 index 0bd192a873bf..da8414d1a12e 100644 --- a/src/drivers/mct/main/cesm_comp_mod.F90 +++ b/src/drivers/mct/main/cesm_comp_mod.F90 @@ -2146,6 +2146,16 @@ subroutine cesm_run() barrier_alarm = seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_barrier) pause_alarm = seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_pause) + ! Check alarm consistency + if (glcrun_avg_alarm .and. .not. glcrun_alarm) then + write(logunit,*) 'ERROR: glcrun_avg_alarm is true, but glcrun_alarm is false' + write(logunit,*) 'Make sure that NCPL_BASE_PERIOD, GLC_NCPL and GLC_AVG_PERIOD' + write(logunit,*) 'are set so that glc averaging only happens at glc coupling times.' + write(logunit,*) '(It is allowable for glc coupling to be more frequent than glc averaging,' + write(logunit,*) 'but not for glc averaging to be more frequent than glc coupling.)' + call shr_sys_abort(subname//' glcrun_avg_alarm is true, but glcrun_alarm is false') + end if + ! Determine wich components need to write pause (restart) files if (pause_alarm) then if (trim(pause_component_list) == 'all') then @@ -2925,50 +2935,45 @@ subroutine cesm_run() !| GLC SETUP-SEND !---------------------------------------------------------- - ! NOTE - only create appropriate input to glc if the avg_alarm is on - if (glc_present) then - if (glcrun_avg_alarm) then - - if (.not. glcrun_alarm) then - write(logunit,*) 'ERROR: glcrun_avg_alarm is true, but glcrun_alarm is false' - write(logunit,*) 'Make sure that NCPL_BASE_PERIOD, GLC_NCPL and GLC_AVG_PERIOD' - write(logunit,*) 'are set so that glc averaging only happens at glc coupling times.' - write(logunit,*) '(It is allowable for glc coupling to be more frequent than glc averaging,' - write(logunit,*) 'but not for glc averaging to be more frequent than glc coupling.)' - call shr_sys_abort(subname//' glcrun_avg_alarm is true, but glcrun_alarm is false') - end if + if (glc_present .and. glcrun_alarm) then + + !---------------------------------------------------- + !| glc prep-merge + !---------------------------------------------------- - !---------------------------------------------------- - !| glc prep-merge - !---------------------------------------------------- + if (iamin_CPLID .and. glc_prognostic) then + call cesm_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:GLCPREP_BARRIER') + call t_drvstartf ('CPL:GLCPREP',cplrun=.true.,barrier=mpicom_CPLID) + if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - if (glc_prognostic) then - if (iamin_CPLID) then - call cesm_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:GLCPREP_BARRIER') - call t_drvstartf ('CPL:GLCPREP',cplrun=.true.,barrier=mpicom_CPLID) - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) + if (lnd_c2_glc) then + ! NOTE - only create appropriate input to glc if the avg_alarm is on + if (glcrun_avg_alarm) then + call prep_glc_accum_avg(timer='CPL:glcprep_avg') - if (lnd_c2_glc) then - call prep_glc_accum_avg(timer='CPL:glcprep_avg') + ! Note that l2x_gx is obtained from mapping the module variable l2gacc_lx + call prep_glc_calc_l2x_gx(fractions_lx, timer='CPL:glcprep_lnd2glc') - ! Note that l2x_gx is obtained from mapping the module variable l2gacc_lx - call prep_glc_calc_l2x_gx(fractions_lx, timer='CPL:glcprep_lnd2glc') + call prep_glc_mrg(infodata, fractions_gx, timer_mrg='CPL:glcprep_mrgx2g') - call prep_glc_mrg(infodata, fractions_gx, timer_mrg='CPL:glcprep_mrgx2g') + call component_diag(infodata, glc, flow='x2c', comment='send glc', & + info_debug=info_debug, timer_diag='CPL:glcprep_diagav') - call component_diag(infodata, glc, flow='x2c', comment='send glc', & - info_debug=info_debug, timer_diag='CPL:glcprep_diagav') - endif + else + call prep_glc_zero_fields() + end if ! glcrun_avg_alarm + end if ! lnd_c2_glc - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - call t_drvstopf ('CPL:GLCPREP',cplrun=.true.) + if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) + call t_drvstopf ('CPL:GLCPREP',cplrun=.true.) - end if ! iamin_CPLID + end if ! iamin_CPLID .and. glc_prognostic + ! Set the infodata field on all tasks (not just those with iamin_CPLID). + if (glc_prognostic) then + if (glcrun_avg_alarm) then call seq_infodata_PutData(infodata, glc_valid_input=.true.) - end if ! glc_prognostic - else ! .not. glcrun_avg_alarm - if (glc_prognostic) then + else call seq_infodata_PutData(infodata, glc_valid_input=.false.) end if end if diff --git a/src/drivers/mct/main/prep_glc_mod.F90 b/src/drivers/mct/main/prep_glc_mod.F90 index 0bf96b83b0a8..52eeb02bfa1d 100644 --- a/src/drivers/mct/main/prep_glc_mod.F90 +++ b/src/drivers/mct/main/prep_glc_mod.F90 @@ -32,6 +32,8 @@ module prep_glc_mod public :: prep_glc_calc_l2x_gx + public :: prep_glc_zero_fields + public :: prep_glc_get_l2x_gx public :: prep_glc_get_l2gacc_lx public :: prep_glc_get_l2gacc_lx_cnt @@ -444,6 +446,31 @@ end subroutine prep_glc_map_one_field_lnd2glc !================================================================================================ + subroutine prep_glc_zero_fields() + + !--------------------------------------------------------------- + ! Description + ! Set glc inputs to zero + ! + ! This is appropriate during time intervals when we're not sending valid data to glc. + ! In principle we shouldn't need to zero the fields at these times (instead, glc + ! should just ignore the fields at these times). However, some tests (like an ERS or + ! ERI test that stops the final run segment mid-year) can fail if we don't explicitly + ! zero the fields, because these x2g fields can then differ upon restart. + + ! Local Variables + integer :: egi + type(mct_avect), pointer :: x2g_gx + !--------------------------------------------------------------- + + do egi = 1,num_inst_glc + x2g_gx => component_get_x2c_cx(glc(egi)) + call mct_aVect_zero(x2g_gx) + end do + end subroutine prep_glc_zero_fields + + !================================================================================================ + function prep_glc_get_l2x_gx() type(mct_aVect), pointer :: prep_glc_get_l2x_gx(:) prep_glc_get_l2x_gx => l2x_gx(:) From 9b6df0f6589c9c65c2c33d6a0a502c1ed16f92c4 Mon Sep 17 00:00:00 2001 From: jayeshkrishna Date: Tue, 11 Apr 2017 13:18:48 -0500 Subject: [PATCH 012/219] Export pio_set_rearr_opts Making pio_set_rearr_opts public in the pio module --- pio/pio.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pio/pio.F90 b/pio/pio.F90 index e1070be8f935..b1627850b070 100644 --- a/pio/pio.F90 +++ b/pio/pio.F90 @@ -12,7 +12,7 @@ module pio ! only pio_offset is intended for export from kinds use pio_kinds, only : pio_offset - use piolib_mod, only : pio_initdecomp, & + use piolib_mod, only : pio_initdecomp, pio_set_rearr_opts, & pio_openfile, pio_closefile, pio_createfile, pio_setdebuglevel, & pio_seterrorhandling, pio_setframe, pio_init, pio_get_local_array_size, & pio_freedecomp, pio_syncfile,pio_numtowrite,pio_numtoread,pio_setiotype, & From c01a9062677f7860232b0f7280c3f4fe149c235a Mon Sep 17 00:00:00 2001 From: jayeshkrishna Date: Fri, 7 Apr 2017 12:22:19 -0500 Subject: [PATCH 013/219] Bring changes from ESMCI/cime to pio1 These changes were made in ESMCI/cime/src/externals/pio1/* instead of the pio1 branch. This commit includes changes in, * ESMCI/cime: #a28d68ae , PR #1177 * ESMCI/cime: #30c6e5640a92c688e2262687b858b3457ff951ac : PIO1 needs to use the findNetCDF.cmake from pio2 --- pio/CMakeLists.txt | 53 ++++++++++--------- pio/pionfput_mod.F90.in | 113 ++++++++++++++++++++-------------------- 2 files changed, 85 insertions(+), 81 deletions(-) diff --git a/pio/CMakeLists.txt b/pio/CMakeLists.txt index a11d679b906c..3b10c1a2cbda 100644 --- a/pio/CMakeLists.txt +++ b/pio/CMakeLists.txt @@ -1,4 +1,4 @@ -IF( NOT GENF90_PATH) +IF( NOT GENF90_PATH) SET (GENF90_PATH ${CMAKE_CURRENT_SOURCE_DIR}/bin) ENDIF() @@ -9,9 +9,8 @@ CMAKE_MINIMUM_REQUIRED(VERSION 2.8.5) IF (USER_CMAKE_MODULE_PATH) SET (CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${USER_CMAKE_MODULE_PATH}) ELSE() - SET (CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/cmake") + SET (CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/../../pio2/cmake") ENDIF() - find_file( TESTFILE NAMES TryCSizeOf.f90 PATHS ${CMAKE_MODULE_PATH} NO_DEFAULT_PATH) get_filename_component( TESTFILEPATH ${TESTFILE} PATH) @@ -21,7 +20,7 @@ SET(PIO_LIB_DIR ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR} CACHE S #SET(bld_PIO_DEFINITIONS) -TRY_COMPILE(WITH_CSIZEOF ${CMAKE_CURRENT_BINARY_DIR}/tryCompileCSIZEOF +TRY_COMPILE(WITH_CSIZEOF ${CMAKE_CURRENT_BINARY_DIR}/tryCompileCSIZEOF ${TESTFILEPATH}/TryCSizeOf.f90) #MESSAGE(STATUS "c_sizeof test ${WITH_CSIZEOF}") IF(${WITH_CSIZEOF} STREQUAL FALSE) @@ -31,38 +30,40 @@ endif() # Netcdf is required +SET (CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/../../pio2/cmake" ${CMAKE_MODULE_PATH}) + #SET (NETCDF_FIND_COMPONENTS F90) -FIND_PACKAGE(NETCDF_Fortran REQUIRED) -IF (${NETCDF_Fortran_FOUND}) - MESSAGE("Building PIO with netcdf support ") - SET(pio_include_dirs_ ${pio_include_dirs_} ${NETCDF_Fortran_INCLUDE_DIR}) - SET(bld_PIO_DEFINITIONS ${bld_PIO_DEFINITIONS} ${NETCDF_Fortran_DEFINITIONS}) +FIND_PACKAGE(NetCDF "4.3.3" COMPONENTS C Fortran) +IF (${NetCDF_Fortran_FOUND}) + MESSAGE("Building PIO with netcdf support ") + SET(pio_include_dirs_ ${pio_include_dirs_} ${NetCDF_Fortran_INCLUDE_DIR}) + SET(bld_PIO_DEFINITIONS ${bld_PIO_DEFINITIONS} ${NetCDF_Fortran_DEFINITIONS}) +ELSE() + MESSAGE("Building PIO without netcdf support ${NetCDF_C_FOUND} ${NetCDF_Fortran_FOUND}") ENDIF () -# SET(bld_PIO_DEFINITIONS ${bld_PIO_DEFINITIONS} -D_NETCDF4) - # PNetcdf is optional but used by default -OPTION(WITH_PNETCDF "Whether to build with PNetcdf" TRUE) +OPTION(WITH_PNETCDF "Whether to build with PnetCDF" TRUE) IF (${WITH_PNETCDF}) - FIND_PACKAGE(Pnetcdf REQUIRED) + FIND_PACKAGE(PnetCDF REQUIRED) ELSE () MESSAGE(WARNING "Warning: Not building with PNetcdf - cannot run all regression tests.") ENDIF () OPTION(PIO_BIG_ENDIAN "Specify that the machine is big endian" test) -IF ("${PIO_BIG_ENDIAN}" STREQUAL "test") +IF ("${PIO_BIG_ENDIAN}" STREQUAL "test") INCLUDE(TestBigEndian) TestBigEndian(PIO_BIG_ENDIAN_TEST) - IF(PIO_BIG_ENDIAN_TEST) + IF(PIO_BIG_ENDIAN_TEST) SET(PIO_BIG_ENDIAN ON CACHE BOOL "") ELSE() SET(PIO_BIG_ENDIAN OFF CACHE BOOL "") ENDIF() ELSE() SET(PIO_BIG_ENDIAN ${PIO_BIG_ENDIAN} CACHE BOOL "") -ENDIF() +ENDIF() IF (PIO_FILESYSTEM_HINTS STREQUAL "lustre") @@ -74,23 +75,25 @@ ELSEIF(PIO_FILESYSTEM_HINTS STREQUAL "gpfs") ELSEIF(NOT "${PIO_FILESYSTEM_HINTS}" STREQUAL "") MESSAGE(WARNING "${PIO_FILESYSTEM_HINTS} not valid option for PIO_FILESYSTEM_HINTS; use gpfs or lustre.") ENDIF() - -IF(NETCDF_C_FOUND) - SET(pio_include_dirs_ ${pio_include_dirs_} ${NETCDF_C_INCLUDE_DIR}) - SET(bld_PIO_DEFINITIONS ${bld_PIO_DEFINITIONS} ${NETCDF_C_DEFINITIONS}) +IF(NetCDF_Fortran_FOUND) + SET(pio_include_dirs_ ${pio_include_dirs_} ${NetCDF_Fortran_INCLUDE_DIR}) + SET(bld_PIO_DEFINITIONS ${bld_PIO_DEFINITIONS} -D_NETCDF ${NetCDF_Fortran_DEFINITIONS}) + if (${NetCDF_C_HAS_PARALLEL}) + SET(bld_PIO_DEFINITIONS ${bld_PIO_DEFINITIONS} -D_NETCDF4) + ENDIF() ELSE() SET(bld_PIO_DEFINITIONS ${bld_PIO_DEFINITIONS} -D_NONETCDF) ENDIF() -IF(PNETCDF_FOUND) - SET(pio_include_dirs_ ${pio_include_dirs_} ${PNETCDF_INCLUDE_DIR}) +IF(PnetCDF_C_FOUND) + SET(pio_include_dirs_ ${pio_include_dirs_} ${PNetCDF_INCLUDE_DIR}) SET(bld_PIO_DEFINITIONS ${bld_PIO_DEFINITIONS} -D_PNETCDF) ELSE() SET(bld_PIO_DEFINITIONS ${bld_PIO_DEFINITIONS} -D_NOPNETCDF) ENDIF() OPTION(PIO_USE_MPIIO "Enable support for MPI-IO (default auto detect)" ON) -IF (PIO_USE_MPIIO) - TRY_COMPILE(MPIIO_SUCCESS ${CMAKE_CURRENT_BINARY_DIR}/tryCompileMPIIO +IF (PIO_USE_MPIIO) + TRY_COMPILE(MPIIO_SUCCESS ${CMAKE_CURRENT_BINARY_DIR}/tryCompileMPIIO ${TESTFILEPATH}/TryMPIIO.f90) IF (${MPIIO_SUCCESS}) MESSAGE(STATUS "MPIIO detected and enabled.") @@ -99,7 +102,7 @@ IF (PIO_USE_MPIIO) SET(PIO_USE_MPIIO FALSE) ENDIF() ENDIF() -IF (${PIO_USE_MPIIO}) +IF (${PIO_USE_MPIIO}) SET(bld_PIO_DEFINITIONS ${bld_PIO_DEFINITIONS} -DUSEMPIIO) ENDIF() diff --git a/pio/pionfput_mod.F90.in b/pio/pionfput_mod.F90.in index 6d8e883d9260..1ce9353ba2aa 100644 --- a/pio/pionfput_mod.F90.in +++ b/pio/pionfput_mod.F90.in @@ -1,6 +1,6 @@ #define __PIO_FILE__ "pionfput_mod.F90" !> -!! @file +!! @file !! $Revision$ !! $LastChangedDate$ !! @brief Write Routines for non-decomposed NetCDF data. @@ -17,7 +17,7 @@ module pionfput_mod use pio_utils, only : check_netcdf use pio_msg_mod - use pio_support, only : Debug, DebugIO, piodie + use pio_support, only : Debug, DebugIO, piodie #ifdef _NETCDF use netcdf ! _EXTERNAL #endif @@ -45,10 +45,10 @@ module pionfput_mod !! @defgroup PIO_put_var PIO_put_var !! @brief Writes netcdf metadata to a file !! @details The put_var interface is provided as a simplified interface to -!! write variables to a netcdf format file. -!! @warning Although this is a collective call the variable is written from the +!! write variables to a netcdf format file. +!! @warning Although this is a collective call the variable is written from the !! root IO task, no consistancy check is made with data passed on other tasks. -!! +!! !< public :: put_var interface put_var @@ -67,7 +67,7 @@ contains !! @details !! @param File @copydoc file_desc_t !! @param varid : The netcdf variable identifier -!! @param index : +!! @param index : !! @param ival : The value for the netcdf metadata !! @retval ierr @copydoc error_return !< @@ -82,9 +82,9 @@ contains #ifdef TIMING call t_startf("PIO:pio_put_var1_text") -#endif +#endif ierr=PIO_NOERR - iotype = File%iotype + iotype = File%iotype if(debug) print *,__PIO_FILE__,__LINE__,ival,iotype, index ios=>File%iosystem @@ -118,12 +118,12 @@ contains ! count(:) = 0 ! end if - select case (iotype) + select case (iotype) #ifdef _PNETCDF case(pio_iotype_pnetcdf) !#ifdef USE_INDEP_WRITE ierr = nfmpi_begin_indep_data(File%fh) - + if(Ios%io_rank==0 .and. (ierr==NF_EINDEP .or. ierr==PIO_NOERR)) then print *,__PIO_FILE__,__LINE__,index,count,trim(ival) ierr = nfmpi_put_vara (File%fh, varid, int(index,kind=PIO_OFFSET), & @@ -151,7 +151,7 @@ contains if (Ios%io_rank == 0) then ierr = nf90_put_var(File%fh, varid, ival, start=index) end if -#endif +#endif case default print *,__PIO_FILE__,__LINE__,iotype call piodie(__PIO_FILE__,__LINE__,"bad iotype specified") @@ -162,7 +162,7 @@ contains #ifdef TIMING call t_stopf("PIO:pio_put_var1_text") -#endif +#endif end function put_var1_text ! TYPE int,real,double !> @@ -172,7 +172,7 @@ contains !! @details !! @param File @copydoc file_desc_t !! @param varid : The netcdf variable identifier -!! @param index : +!! @param index : !! @param ival : The value for the netcdf metadata !! @retval ierr @copydoc error_return !< @@ -187,9 +187,9 @@ contains #ifdef TIMING call t_startf("PIO:pio_put_var1_{TYPE}") -#endif +#endif ierr=PIO_NOERR - iotype = File%iotype + iotype = File%iotype if(debug) print *,__PIO_FILE__,__LINE__,ival,iotype, index ios=>File%iosystem @@ -213,7 +213,7 @@ contains if(Ios%IOProc) then - select case (iotype) + select case (iotype) #ifdef _PNETCDF case(pio_iotype_pnetcdf) allocate(count(size(index))) @@ -239,7 +239,7 @@ contains #endif #ifdef _NETCDF #ifdef _NETCDF4 - case (pio_iotype_netcdf4p) + case (pio_iotype_netcdf4p) ierr=nf90_var_par_access(File%fh, varid, NF90_COLLECTIVE) ierr = nf90_put_var(File%fh, varid, ival, start=index) #endif @@ -258,7 +258,7 @@ contains #ifdef TIMING call t_stopf("PIO:pio_put_var1_{TYPE}") -#endif +#endif end function put_var1_{TYPE} !> @@ -268,7 +268,7 @@ contains !! @details !! @param File @copydoc file_desc_t !! @param vardesc @copydoc var_desc_t -!! @param start : +!! @param start : !! @param ival : The value for the netcdf metadata !! @retval ierr @copydoc error_return !< @@ -305,13 +305,13 @@ contains integer :: start({DIMS}+1), count({DIMS}+1) #ifdef TIMING call t_startf("PIO:pio_put_var_{DIMS}d_text") -#endif +#endif ierr=PIO_NOERR - iotype = File%iotype + iotype = File%iotype start = 1 count = 0 - is=0 + is=0 ios=>File%iosystem @@ -334,7 +334,7 @@ contains endif if(ios%async_interface ) then -#if({DIMS}==0) +#if({DIMS}==0) call MPI_BCAST(ival,len_trim(ival),MPI_CHARACTER,ios%CompMaster, ios%my_comm , mpierr) #else call MPI_BCAST(ival,size(ival),MPI_CHARACTER,ios%CompMaster, ios%my_comm , mpierr) @@ -351,7 +351,7 @@ contains end do #endif end if - select case (iotype) + select case (iotype) #ifdef _PNETCDF case(pio_iotype_pnetcdf) if(ios%io_rank>0) count = 0 @@ -365,7 +365,7 @@ contains ! This is a workaround for a bug in the netcdf f90 interface ! The netcdf bug is that when you use nf90_put_var ! to write a scalar string the trailing blanks are stripped by the specific -! function nf90_put_var_text before it calls nf_put_vars_text. +! function nf90_put_var_text before it calls nf_put_vars_text. ! if (Ios%io_rank == 0) then ! ierr = nf_put_vars_text(File%fh, varid, (/1/), (/len(ival)/), (/1/), ival) ! else @@ -382,7 +382,7 @@ contains ! This is a workaround for a bug in the netcdf f90 interface ! The netcdf bug is that when you use nf90_put_var ! to write a scalar string the trailing blanks are stripped by the specific -! function nf90_put_var_text before it calls nf_put_vars_text. +! function nf90_put_var_text before it calls nf_put_vars_text. ierr = nf_put_vars_text(File%fh, varid, (/1/), (/len(ival)/), (/1/), ival) #else ierr = nf90_put_var(File%fh, varid, ival) @@ -399,7 +399,7 @@ contains call check_netcdf(File,ierr,__PIO_FILE__,__LINE__) #ifdef TIMING call t_stopf("PIO:pio_put_var_{DIMS}d_text") -#endif +#endif end function put_var_{DIMS}d_text ! DIMS 1,2,3,4,5 @@ -429,10 +429,10 @@ contains ierr=PIO_NOERR - iotype = File%iotype + iotype = File%iotype start = 1 count = 0 - is=0 + is=0 #ifdef _PNETCDF if(iotype == pio_iotype_pnetcdf) then @@ -445,7 +445,7 @@ contains #endif #ifdef TIMING call t_startf("PIO:pio_put_var_{DIMS}d_{TYPE}") -#endif +#endif ios=>File%iosystem if(ios%async_interface .and. .not. ios%ioproc ) then @@ -471,7 +471,7 @@ contains count(i+is) = size(ival,i) end do end if - select case (iotype) + select case (iotype) #ifdef _PNETCDF case(pio_iotype_pnetcdf) if(Ios%io_rank>0) count=0 @@ -497,7 +497,7 @@ contains call check_netcdf(File,ierr,__PIO_FILE__,__LINE__) #ifdef TIMING call t_stopf("PIO:pio_put_var_{DIMS}d_{TYPE}") -#endif +#endif end function put_var_{DIMS}d_{TYPE} ! TYPE int,real,double @@ -523,14 +523,14 @@ contains ierr=PIO_NOERR - iotype = File%iotype + iotype = File%iotype start = 1 count = 1 - is=0 + is=0 #ifdef TIMING call t_startf("PIO:pio_put_var_0d_{TYPE}") -#endif +#endif ios=>File%iosystem if(ios%async_interface .and. .not. ios%ioproc ) then @@ -547,7 +547,7 @@ contains end if if(Ios%IOProc) then - select case (iotype) + select case (iotype) #ifdef _PNETCDF case(pio_iotype_pnetcdf) if(Ios%io_rank>0) count=0 @@ -573,7 +573,7 @@ contains call check_netcdf(File,ierr,__PIO_FILE__,__LINE__) #ifdef TIMING call t_stopf("PIO:pio_put_var_0d_{TYPE}") -#endif +#endif end function put_var_0d_{TYPE} ! DIMS 0,1,2,3,4,5 @@ -624,10 +624,10 @@ contains integer :: dims({DIMS}), xlen, itype, slen #ifdef TIMING call t_startf("PIO:pio_put_vara_{DIMS}d_text") -#endif +#endif ndims=0 ierr=0 - iotype = File%iotype + iotype = File%iotype ios=>File%iosystem xlen = len(ival) if(.not. ios%async_interface .or. .not. ios%ioproc ) then @@ -657,24 +657,24 @@ contains call MPI_BCAST(xlen,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) endif - if(ios%async_interface ) then + if(ios%async_interface ) then call MPI_BCAST(ndims,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) call MPI_BCAST(ival,xlen*size(ival),MPI_CHARACTER,ios%CompMaster, ios%my_comm , mpierr) end if - + if(Ios%IOProc) then allocate(pstart(ndims),pcount(ndims)) if(Ios%io_rank==0) then pstart = start(1:ndims) pcount = count(1:ndims) else - pstart=1 ! avoids an unessasary pnetcdf error + pstart=1 ! avoids an unessasary pnetcdf error pcount=0 endif - select case (iotype) + select case (iotype) #ifdef _PNETCDF case(pio_iotype_pnetcdf) clen=count(1) @@ -697,9 +697,10 @@ contains #endif #endif #ifdef _NETCDF -! case(pio_iotype_netcdf4p) -! ierr = nf90_put_var(File%fh, varid, ival, start=int(pstart), count=int(pcount)) - case(pio_iotype_netcdf, pio_iotype_netcdf4c, pio_iotype_netcdf4p) + case(pio_iotype_netcdf4p) + ierr=nf90_var_par_access(File%fh, varid, NF90_COLLECTIVE) + ierr = nf90_put_var(File%fh, varid, ival, start=int(pstart), count=int(pcount)) + case(pio_iotype_netcdf, pio_iotype_netcdf4c) ! Only io proc 0 will do writing if (Ios%io_rank == 0) then ierr = nf90_put_var(File%fh, varid, ival, start=int(pstart), count=int(pcount)) @@ -715,7 +716,7 @@ contains #ifdef TIMING call t_stopf("PIO:pio_put_vara_{DIMS}d_{TYPE}") -#endif +#endif end function put_vara_{DIMS}d_text ! TYPE int,real,double ! DIMS 1,2,3,4,5 @@ -745,9 +746,9 @@ contains integer :: dims({DIMS}), xlen, itype, slen #ifdef TIMING call t_startf("PIO:pio_put_vara_{DIMS}d_{TYPE}") -#endif +#endif ierr=0 - iotype = File%iotype + iotype = File%iotype ios=>File%iosystem xlen=1 if(debug) print *,__PIO_FILE__,__LINE__,varid, iotype, start, count @@ -774,15 +775,15 @@ contains #endif endif - if(ios%async_interface ) then + if(ios%async_interface ) then call MPI_BCAST(ndims,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) call MPI_BCAST(ival,xlen*size(ival),{MPITYPE},ios%CompMaster, ios%my_comm , mpierr) end if - + if(Ios%IOProc) then - select case (iotype) + select case (iotype) #ifdef _PNETCDF case(pio_iotype_pnetcdf) allocate(pstart(ndims),pcount(ndims)) @@ -790,7 +791,7 @@ contains pstart = start(1:ndims) pcount = count(1:ndims) else - pstart=1 ! avoids an unessasary pnetcdf error + pstart=1 ! avoids an unessasary pnetcdf error pcount=0 endif @@ -835,7 +836,7 @@ contains #ifdef TIMING call t_stopf("PIO:pio_put_vara_{DIMS}d_{TYPE}") -#endif +#endif end function put_vara_{DIMS}d_{TYPE} ! DIMS 1,2,3,4,5 @@ -846,8 +847,8 @@ contains !! @details !! @param File @copydoc file_desc_t !! @param vardesc @copydoc var_desc_t -!! @param start : -!! @param count : +!! @param start : +!! @param count : !! @param ival : The value for the netcdf metadata !! @retval ierr @copydoc error_return !< From 04f99af06a7248ebf5f3c4bb6d789b7f85585911 Mon Sep 17 00:00:00 2001 From: jayeshkrishna Date: Tue, 11 Apr 2017 11:00:32 -0500 Subject: [PATCH 014/219] Making sure that we look for PnetCDF fortran libs The previous version of cmake configure did not look for PnetCDF Fortran libraries. * Make sure that we look for NetCDF Fortran (C not required) libs * Make sure that we look for PnetCDF Fortran (by default only C libraries are looked up) libs * Also setting the PnetCDF fortran include paths correctly. Without the fix a standalone build for PIO1 fails because the configure never detected pnetcdf fortran libs. --- pio/CMakeLists.txt | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pio/CMakeLists.txt b/pio/CMakeLists.txt index 3b10c1a2cbda..9779dec1a816 100644 --- a/pio/CMakeLists.txt +++ b/pio/CMakeLists.txt @@ -33,7 +33,7 @@ endif() SET (CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/../../pio2/cmake" ${CMAKE_MODULE_PATH}) #SET (NETCDF_FIND_COMPONENTS F90) -FIND_PACKAGE(NetCDF "4.3.3" COMPONENTS C Fortran) +FIND_PACKAGE(NetCDF "4.3.3" COMPONENTS Fortran) IF (${NetCDF_Fortran_FOUND}) MESSAGE("Building PIO with netcdf support ") SET(pio_include_dirs_ ${pio_include_dirs_} ${NetCDF_Fortran_INCLUDE_DIR}) @@ -46,7 +46,7 @@ ENDIF () # PNetcdf is optional but used by default OPTION(WITH_PNETCDF "Whether to build with PnetCDF" TRUE) IF (${WITH_PNETCDF}) - FIND_PACKAGE(PnetCDF REQUIRED) + FIND_PACKAGE(PnetCDF REQUIRED COMPONENTS Fortran) ELSE () MESSAGE(WARNING "Warning: Not building with PNetcdf - cannot run all regression tests.") ENDIF () @@ -84,8 +84,8 @@ IF(NetCDF_Fortran_FOUND) ELSE() SET(bld_PIO_DEFINITIONS ${bld_PIO_DEFINITIONS} -D_NONETCDF) ENDIF() -IF(PnetCDF_C_FOUND) - SET(pio_include_dirs_ ${pio_include_dirs_} ${PNetCDF_INCLUDE_DIR}) +IF(PnetCDF_Fortran_FOUND) + SET(pio_include_dirs_ ${pio_include_dirs_} ${PnetCDF_Fortran_INCLUDE_DIRS}) SET(bld_PIO_DEFINITIONS ${bld_PIO_DEFINITIONS} -D_PNETCDF) ELSE() SET(bld_PIO_DEFINITIONS ${bld_PIO_DEFINITIONS} -D_NOPNETCDF) From 09ed442a659131c0ed30208cc8ad6ffdb1691266 Mon Sep 17 00:00:00 2001 From: Steve Goldhaber Date: Thu, 16 Feb 2017 23:03:09 -0700 Subject: [PATCH 015/219] New method for setting and detecting pause alarms in component models Refactor access to components which are paused Pause works with the driver and at least one active component (POP). Active component (POP) able to correctly process resume signal. PRE test refactored to be more flexible. Find good restart variable to change for POP PRE test (PSURF_CUR). Modify code and scripts to error if NINST_ESP > 1. Fixed NCK test to not set NINST_ESP > 1 Added PRE test back to cime_developer. Fails with CIME_MODEL=acme. --- scripts/lib/CIME/SystemTests/nck.py | 6 +- scripts/lib/CIME/SystemTests/pre.py | 69 ++- scripts/lib/CIME/case.py | 7 +- scripts/lib/CIME/case_setup.py | 3 + scripts/lib/update_acme_tests.py | 4 +- .../data_comps/desp/desp_comp_mod.F90 | 109 ++-- .../data_comps/desp/esp_comp_mct.F90 | 46 +- src/drivers/mct/cime_config/buildexe | 4 + src/drivers/mct/cime_config/buildnml | 24 + .../mct/cime_config/config_component.xml | 17 + .../cime_config/namelist_definition_drv.xml | 51 +- src/drivers/mct/main/cesm_comp_mod.F90 | 96 ++- src/drivers/mct/main/seq_io_mod.F90 | 7 +- src/drivers/mct/main/seq_rest_mod.F90 | 8 +- src/drivers/mct/shr/seq_comm_mct.F90 | 48 +- src/drivers/mct/shr/seq_infodata_mod.F90 | 566 ++++++++---------- src/drivers/mct/shr/seq_timemgr_mod.F90 | 309 ++++++++-- 17 files changed, 790 insertions(+), 584 deletions(-) diff --git a/scripts/lib/CIME/SystemTests/nck.py b/scripts/lib/CIME/SystemTests/nck.py index be474cbb4af4..e8c18caff179 100644 --- a/scripts/lib/CIME/SystemTests/nck.py +++ b/scripts/lib/CIME/SystemTests/nck.py @@ -46,7 +46,11 @@ def _case_one_setup(self): def _case_two_setup(self): for comp in self._comp_classes: - self._case.set_value("NINST_%s"%comp, 2) + if (comp == "ESP"): + self._case.set_value("NINST_%s"%comp, 1) + else: + self._case.set_value("NINST_%s"%comp, 2) + ntasks = self._case.get_value("NTASKS_%s"%comp) self._case.set_value("NTASKS_%s"%comp, ntasks*2) diff --git a/scripts/lib/CIME/SystemTests/pre.py b/scripts/lib/CIME/SystemTests/pre.py index 22bb3f3bb207..7917a24a9925 100644 --- a/scripts/lib/CIME/SystemTests/pre.py +++ b/scripts/lib/CIME/SystemTests/pre.py @@ -2,6 +2,7 @@ Implementation of the CIME pause/resume test: Tests having driver 'pause' (write cpl restart file) and 'resume' (read cpl restart file) without changing restart file. Compare to non-pause/resume run. +Test can also be run with other component combinations. """ @@ -19,8 +20,9 @@ class PRE(SystemTestsCompareTwo): ############################################################################### """ Implementation of the CIME pause/resume test: Tests having driver - 'pause' (write cpl restart file) and 'resume' (read cpl restart file) - without changing restart file. Compare to non-pause/resume run. + 'pause' (write cpl and/or other restart file(s)) and 'resume' + (read cpl and/or other restart file(s)) without changing restart + file. Compare to non-pause/resume run. """ ########################################################################### @@ -31,26 +33,35 @@ def __init__(self, case): run_two_suffix='pr', run_one_description='no pause/resume', run_two_description='pause/resume') + self._stopopt = 'ndays' + self._stopn = 5 + self._pausediv = 5 # Number of pause cycles per run ########################################################################### def _case_one_setup(self): ########################################################################### case_setup(self._case, test_mode=True, reset=True) + self._stopopt = self._case.get_value("STOP_OPTION") + self._stopn = self._case.get_value("STOP_N") ########################################################################### def _case_two_setup(self): ########################################################################### # Set up a pause/resume run - stopopt = self._case.get_value("STOP_OPTION") - stopn = self._case.get_value("STOP_N") - expect((stopn % 5) == 0, "ERROR: PRE test requires that STOP_N be divisible by five") - pausen = stopn / 5 - expect(pausen > 0, "ERROR: pause_n (%d) must be > 0, stop_n is %d"%(pausen, stopn)) - self._case.set_value("PAUSE_OPTION", stopopt) + self._case.set_value("STOP_OPTION", self._stopopt) + self._case.set_value("STOP_N", self._stopn) + if self._stopn > 3: + pausen = 2 + else: + pausen = 1 + # End if + + self._case.set_value("PAUSE_OPTION", self._stopopt) self._case.set_value("PAUSE_N", pausen) + comps = [ x.lower() for x in self._case.get_values("COMP_CLASSES") ] pcl = self._case.get_value("PAUSE_COMPONENT_LIST") - expect(pcl == "cpl", - "ERROR: PRE test expected PAUSE_COMPONENT_LIST = 'cpl', found '%s'"%pcl) + expect(pcl == "all" or set(pcl.split(':')).issubset(comps), + "PRE ERROR: Invalid PAUSE_COMPONENT_LIST, '%s'"%pcl) self._case.flush() @@ -65,37 +76,35 @@ def run_phase(self): self._activate_case2() rundir2 = self._case.get_value("RUNDIR") should_match = (self._case.get_value("DESP_MODE") == "NOCHANGE") + compare_ok = True pause_comps = self._case.get_value("PAUSE_COMPONENT_LIST") expect((pause_comps != 'none'), "Pause/Resume (PRE) test has no pause components") if pause_comps == 'all': pause_comps = self._case.get_values("COMP_CLASSES") else: pause_comps = pause_comps.split(':') - # End if + for comp in pause_comps: - restart_files_1 = glob.glob(os.path.join(rundir1, '*.%s.r.*'%comp)) - if len(restart_files_1) != 1: - logger.error("Wrong number of case1 %s restart files, %d", comp, len(restart_files_1)) - # End if - restart_files_2 = glob.glob(os.path.join(rundir2, '*.%s.r.*'%comp)) - if len(restart_files_2) != 5: - logger.error("Wrong number of case2 %s restart files, %d", comp, len(restart_files_2)) - # End if + comp_name = self._case.get_value('COMP_%s'%comp.upper()) + rname = '*.%s.r.*'%comp_name + restart_files_1 = glob.glob(os.path.join(rundir1, rname)) + expect((len(restart_files_1) > 0), "No case1 restart files for %s"%comp) + restart_files_2 = glob.glob(os.path.join(rundir2, rname)) + expect((len(restart_files_2) > len(restart_files_1)), + "No pause (restart) files found in case2 for %s"%comp) # Do cprnc of restart files. - rfile1 = restart_files_1[0] + rfile1 = restart_files_1[len(restart_files_1) - 1] # rfile2 has to match rfile1 (same time string) parts = os.path.basename(rfile1).split(".") glob_str = "*.%s"%".".join(parts[len(parts)-4:]) restart_files_2 = glob.glob(os.path.join(rundir2, glob_str)) - if len(restart_files_2) < 1: - logger.error("Missing case2 restart file, %s", glob_str) - # End if - if len(restart_files_2) > 1: - logger.error("Multiple case2 restart files, %s", glob_str) - # End if + expect((len(restart_files_2) == 1), + "Missing case2 restart file, %s", glob_str) rfile2 = restart_files_2[0] ok, out = cprnc(comp, rfile1, rfile2, self._case, rundir2) - logger.warning("CPRNC result: %s, file = %s"%(ok, out)) - expect((should_match == ok), - "%s restart files%s match"%(comp, " do not" if should_match else "")) - # End for + logger.warning("CPRNC result for %s: %s"%(os.path.basename(rfile1), "PASS" if (ok == should_match) else "FAIL")) + compare_ok = compare_ok and (should_match == ok) + + expect(compare_ok, + "Not all restart files %s"%("matched" if should_match else "failed to match")) + diff --git a/scripts/lib/CIME/case.py b/scripts/lib/CIME/case.py index 99a7b9cb8a45..32543f9d6ebb 100644 --- a/scripts/lib/CIME/case.py +++ b/scripts/lib/CIME/case.py @@ -700,7 +700,12 @@ def configure(self, compset_name, grid_name, machine_name=None, if compclass == "CPL": continue key = "NINST_%s"%compclass - mach_pes_obj.set_value(key, ninst) + # ESP models are currently limited to 1 instance + if compclass == "ESP": + mach_pes_obj.set_value(key, 1) + else: + mach_pes_obj.set_value(key, ninst) + key = "NTASKS_%s"%compclass if key not in pes_ntasks.keys(): mach_pes_obj.set_value(key,1) diff --git a/scripts/lib/CIME/case_setup.py b/scripts/lib/CIME/case_setup.py index 94f3fccec876..a75843cecd66 100644 --- a/scripts/lib/CIME/case_setup.py +++ b/scripts/lib/CIME/case_setup.py @@ -127,6 +127,9 @@ def _case_setup_impl(case, caseroot, clean=False, test_mode=False, reset=False, continue ninst = case.get_value("NINST_%s" % comp) ntasks = case.get_value("NTASKS_%s" % comp) + # ESP models are currently limited to 1 instance + expect((comp != "ESP") or (ninst == 1), + "ESP components may only have one instance") if ninst > ntasks: if ntasks == 1: case.set_value("NTASKS_%s" % comp, ninst) diff --git a/scripts/lib/update_acme_tests.py b/scripts/lib/update_acme_tests.py index 0e3b061281cc..cdfd7c670733 100644 --- a/scripts/lib/update_acme_tests.py +++ b/scripts/lib/update_acme_tests.py @@ -48,8 +48,8 @@ "ERP.f45_g37_rx1.A", "SMS_D_Ln9.f19_g16_rx1.A", "DAE.f19_f19.A", - "SMS.T42_T42.S") -# "PRE.f19_f19.ADESP") + "SMS.T42_T42.S", + "PRE.f19_f19.ADESP") ), # diff --git a/src/components/data_comps/desp/desp_comp_mod.F90 b/src/components/data_comps/desp/desp_comp_mod.F90 index ab86eb3c2419..d48b5e06a5d9 100644 --- a/src/components/data_comps/desp/desp_comp_mod.F90 +++ b/src/components/data_comps/desp/desp_comp_mod.F90 @@ -15,7 +15,8 @@ module desp_comp_mod use shr_strdata_mod, only: shr_strdata_type, shr_strdata_advance use shr_strdata_mod, only: shr_strdata_pioinit - use seq_timemgr_mod, only: seq_timemgr_EClockGetData, seq_timemgr_RestartAlarmIsOn + use seq_timemgr_mod, only: seq_timemgr_EClockGetData + use seq_timemgr_mod, only: seq_timemgr_RestartAlarmIsOn use seq_comm_mct, only: seq_comm_inst, seq_comm_name, seq_comm_suffix implicit none @@ -30,39 +31,29 @@ module desp_comp_mod public :: desp_comp_final !-------------------------------------------------------------------------- -! Public data +! Public module data !-------------------------------------------------------------------------- - - integer, parameter, public :: atm_ind = 1 - integer, parameter, public :: lnd_ind = 2 - integer, parameter, public :: ice_ind = 3 - integer, parameter, public :: ocn_ind = 4 - integer, parameter, public :: glc_ind = 5 - integer, parameter, public :: rof_ind = 6 - integer, parameter, public :: wav_ind = 7 - integer, parameter, public :: cpl_ind = 8 - integer, parameter, public :: max_ind = cpl_ind + integer, public, parameter :: desp_num_comps = 8 + character(len=3), public, parameter :: comp_names(desp_num_comps) = & + (/ 'atm', 'lnd', 'ice', 'ocn', 'glc', 'rof', 'wav', 'drv' /) !-------------------------------------------------------------------------- ! Private data !-------------------------------------------------------------------------- - character(len=CS) :: myModelName = 'esp' ! user defined model name + character(len=CS) :: myModelName = 'esp' ! user defined model name integer(IN) :: mpicom - integer(IN) :: COMPID ! mct comp id - integer(IN) :: my_task ! my task in mpi communicator mpicom - integer(IN) :: npes ! total number of tasks - integer(IN), parameter :: master_task=0 ! task number of master task - integer(IN) :: logunit ! logging unit number + integer(IN) :: COMPID ! mct comp id + integer(IN) :: my_task ! my task in mpi communicator mpicom + integer(IN) :: npes ! total number of tasks + integer(IN), parameter :: master_task=0 ! task number of master task + integer(IN) :: logunit ! logging unit number integer(IN) :: loglevel - integer :: inst_index ! number of current instance (ie. 1) - character(len=16) :: inst_name ! fullname of current instance (ie. "lnd_0001") - character(len=16) :: inst_suffix ! char string associated with instance - ! (ie. "_0001" or "") - character(len=CL) :: desp_mode ! mode of operation - - character(len=3), parameter :: comp_names(8) = (/ 'atm', 'lnd', 'ice', & - 'ocn', 'glc', 'rof', 'wav', 'drv' /) + integer :: inst_index ! number of current instance (ie. 1) + character(len=16) :: inst_name ! fullname of current instance (ie. "lnd_0001") + character(len=16) :: inst_suffix ! char string associated with instance + ! (ie. "_0001" or "") + character(len=CL) :: desp_mode ! mode of operation character(len=*), parameter :: rpprefix = 'rpointer.' character(len=*), parameter :: rpfile = rpprefix//'esp' character(len=*), parameter :: nullstr = 'undefined' @@ -148,16 +139,10 @@ subroutine desp_comp_init(EClock, espid, mpicom_in, phase, read_restart, & desp_mode, info_debug, restfilm !--- formats --- - character(*), parameter :: F00 = "('(desp_comp_init) ',8a)" - character(*), parameter :: F0L = "('(desp_comp_init) ',a, l2)" - character(*), parameter :: F01 = "('(desp_comp_init) ',a,5i8)" - character(*), parameter :: F02 = "('(desp_comp_init) ',a,4es13.6)" - character(*), parameter :: F03 = "('(desp_comp_init) ',a,i8,a)" - character(*), parameter :: F04 = "('(desp_comp_init) ',2a,2i8,'s')" - character(*), parameter :: F05 = "('(desp_comp_init) ',a,2f10.4)" - character(*), parameter :: F90 = "('(desp_comp_init) ',73('='))" - character(*), parameter :: F91 = "('(desp_comp_init) ',73('-'))" character(*), parameter :: subName = "(desp_comp_init) " + character(*), parameter :: F00 = "('"//subName//"',8a)" + character(*), parameter :: F01 = "('"//subName//"',a,5i8)" + character(*), parameter :: F04 = "('"//subName//"',2a,2i8,'s')" !------------------------------------------------------------------------------- call t_startf('DESP_INIT') @@ -219,8 +204,8 @@ subroutine desp_comp_init(EClock, espid, mpicom_in, phase, read_restart, & write(logunit,F01) 'ERROR: reading input namelist, '//trim(filename)//' iostat=',ierr call shr_sys_abort(subName//': namelist read error '//trim(filename)) end if - write(logunit,F01)' info_debug = ',info_debug - write(logunit,F00)' restfilm = ',trim(restfilm) + write(logunit,F01) 'info_debug = ',info_debug + write(logunit,F00) 'restfilm = ',trim(restfilm) write(logunit,F01) 'inst_index = ',inst_index write(logunit,F00) 'inst_name = ',trim(inst_name) write(logunit,F00) 'inst_suffix = ',trim(inst_suffix) @@ -360,7 +345,7 @@ subroutine desp_comp_run(EClock, case_name, pause_sig, atm_resume, & type(ESMF_Clock), intent(in) :: EClock character(len=*), intent(in) :: case_name - logical, intent(in) :: pause_sig(max_ind) + logical, intent(in) :: pause_sig(desp_num_comps) character(len=CL), intent(inout) :: atm_resume(num_inst_atm) character(len=CL), intent(inout) :: lnd_resume(num_inst_lnd) character(len=CL), intent(inout) :: rof_resume(num_inst_rof) @@ -445,45 +430,45 @@ subroutine desp_comp_run(EClock, case_name, pause_sig, atm_resume, & ! Find the active components and their restart files ! Note hard-coded variable names are just for testing. This could be ! changed if this feature comes to regular use - do ind = 1, max_ind + do ind = 1, desp_num_comps if (pause_sig(ind)) then - select case (ind) - case(atm_ind) + select case (comp_names(ind)) + case('atm') call get_restart_filenames(ind, atm_resume, errcode) allocate(rfilenames(size(atm_resume))) rfilenames = atm_resume varname = 'T' - case(lnd_ind) + case('lnd') call get_restart_filenames(ind, lnd_resume, errcode) allocate(rfilenames(size(lnd_resume))) rfilenames = lnd_resume varname = 'T' - case(ice_ind) + case('ice') call get_restart_filenames(ind, ice_resume, errcode) allocate(rfilenames(size(ice_resume))) rfilenames = ice_resume varname = 'T' - case(ocn_ind) + case('ocn') call get_restart_filenames(ind, ocn_resume, errcode) allocate(rfilenames(size(ocn_resume))) rfilenames = ocn_resume - varname = 'T' - case(glc_ind) + varname = 'PSURF_CUR' + case('glc') call get_restart_filenames(ind, glc_resume, errcode) allocate(rfilenames(size(glc_resume))) rfilenames = glc_resume varname = 'T' - case(rof_ind) + case('rof') call get_restart_filenames(ind, rof_resume, errcode) allocate(rfilenames(size(rof_resume))) rfilenames = rof_resume varname = 'T' - case(wav_ind) + case('wav') call get_restart_filenames(ind, wav_resume, errcode) allocate(rfilenames(size(wav_resume))) rfilenames = wav_resume varname = 'T' - case(cpl_ind) + case('drv') call get_restart_filenames(ind, cpl_resume, errcode) allocate(rfilenames(1)) rfilenames(1) = cpl_resume @@ -520,6 +505,9 @@ subroutine desp_comp_run(EClock, case_name, pause_sig, atm_resume, & write(logunit, *) subname, 'Found restart file ',trim(rfilenames(inst)) end if call esp_pio_modify_variable(COMPID, mpicom, rfilenames(inst), varname, var_found) + if (.not. var_found) then + call shr_sys_abort(subname//'Variable, '//trim(varname)//', not found on '//rfilenames(inst)) + end if end do case (null_mode) ! Since DESP is not 'present' for this mode, we should not get here. @@ -642,22 +630,22 @@ subroutine get_restart_filenames_a(comp_ind, filenames, retcode) filenames = ' ' num_inst = size(filenames) allocate(ids(num_inst)) - select case (comp_ind) - case(atm_ind) + select case (comp_names(comp_ind)) + case('atm') ids = ATMID - case(lnd_ind) + case('lnd') ids = LNDID - case(ice_ind) + case('ice') ids = ICEID - case(ocn_ind) + case('ocn') ids = OCNID - case(glc_ind) + case('glc') ids = GLCID - case(rof_ind) + case('rof') ids = ROFID - case(wav_ind) + case('wav') ids = WAVID - case(cpl_ind) + case('drv') ids = CPLID case default call shr_sys_abort(subname//'Unrecognized comp_ind') @@ -668,6 +656,11 @@ subroutine get_restart_filenames_a(comp_ind, filenames, retcode) rpointer_name = rpprefix//comp_names(comp_ind)//trim(seq_comm_suffix(ids(ind))) if (my_task == master_task) then inquire(file=rpointer_name, EXIST=file_exists) + ! POP decided to not follow the convention + if ((.not. file_exists) .and. (trim(comp_names(comp_ind)) == 'ocn')) then + rpointer_name = rpprefix//comp_names(comp_ind)//".restart"//trim(seq_comm_suffix(ids(ind))) + inquire(file=rpointer_name, EXIST=file_exists) + end if if (.not. file_exists) then retcode = NO_RPOINTER else diff --git a/src/components/data_comps/desp/esp_comp_mct.F90 b/src/components/data_comps/desp/esp_comp_mct.F90 index 549a1ce1027c..3f3c07a98874 100644 --- a/src/components/data_comps/desp/esp_comp_mct.F90 +++ b/src/components/data_comps/desp/esp_comp_mct.F90 @@ -2,10 +2,11 @@ module esp_comp_mct ! !USES: - use esmf, only: ESMF_Clock - use mct_mod, only: mct_aVect - use seq_cdata_mod, only: seq_cdata - use seq_infodata_mod, only: seq_infodata_type + use esmf, only: ESMF_Clock + use mct_mod, only: mct_aVect + use seq_cdata_mod, only: seq_cdata + use seq_infodata_mod, only: seq_infodata_type + use desp_comp_mod, only: desp_num_comps ! !PUBLIC TYPES: implicit none @@ -19,6 +20,11 @@ module esp_comp_mct public :: esp_run_mct public :: esp_final_mct + !-------------------------------------------------------------------------- + ! Private module data + !-------------------------------------------------------------------------- + integer :: comp_index(desp_num_comps) + !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CONTAINS !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -36,9 +42,11 @@ module esp_comp_mct ! !INTERFACE: --------------------------------------------------------------- subroutine esp_init_mct(EClock, cdata, x2a, a2x, NLFilename) - use desp_comp_mod, only: desp_comp_init + use desp_comp_mod, only: desp_comp_init, comp_names use seq_cdata_mod, only: seq_cdata_setptrs use seq_infodata_mod, only: seq_infodata_putData, seq_infodata_GetData + use seq_timemgr_mod, only: seq_timemgr_pause_component_index + ! !INPUT/OUTPUT PARAMETERS: @@ -49,6 +57,7 @@ subroutine esp_init_mct(EClock, cdata, x2a, a2x, NLFilename) !EOP + integer :: ind integer :: ESPID integer :: mpicom_esp integer :: esp_phase @@ -73,6 +82,11 @@ subroutine esp_init_mct(EClock, cdata, x2a, a2x, NLFilename) call seq_infodata_PutData(infodata, & esp_present=esp_present, esp_prognostic=esp_prognostic) + ! Retrieve component indices from the time manager + do ind = 1, desp_num_comps + comp_index(ind) = seq_timemgr_pause_component_index(comp_names(ind)) + end do + end subroutine esp_init_mct !============================================================================ @@ -88,14 +102,14 @@ end subroutine esp_init_mct ! !INTERFACE: ------------------------------------------------------------------ subroutine esp_run_mct( EClock, cdata, x2a, a2x) + use shr_kind_mod, only: CL=>SHR_KIND_CL use seq_comm_mct, only: num_inst_atm, num_inst_lnd, num_inst_rof use seq_comm_mct, only: num_inst_ocn, num_inst_ice, num_inst_glc use seq_comm_mct, only: num_inst_wav - use desp_comp_mod, only: desp_comp_run, atm_ind, lnd_ind, ocn_ind - use desp_comp_mod, only: ice_ind, glc_ind, rof_ind, wav_ind, cpl_ind, max_ind + use desp_comp_mod, only: desp_comp_run use seq_cdata_mod, only: seq_cdata_setptrs use seq_infodata_mod, only: seq_infodata_putData, seq_infodata_GetData - use shr_kind_mod, only: CL=>SHR_KIND_CL + use seq_timemgr_mod, only: seq_timemgr_pause_component_active ! !INPUT/OUTPUT PARAMETERS: @@ -106,8 +120,9 @@ subroutine esp_run_mct( EClock, cdata, x2a, a2x) !EOP + integer :: ind type(seq_infodata_type), pointer :: infodata - logical :: pause_sig(max_ind) + logical :: pause_sig(desp_num_comps) character(len=CL) :: atm_resume(num_inst_atm) character(len=CL) :: lnd_resume(num_inst_lnd) character(len=CL) :: rof_resume(num_inst_rof) @@ -120,16 +135,13 @@ subroutine esp_run_mct( EClock, cdata, x2a, a2x) character(len=*), parameter :: subName = "(esp_run_mct) " !-------------------------------------------------------------------------- - ! Grab infodata + ! Grab infodata and case name call seq_cdata_setptrs(cdata, infodata=infodata) + call seq_infodata_GetData(infodata, case_name=case_name) ! Find out if we should be running - ! The data ESP component only runs during a pause alarm - call seq_infodata_GetData(infodata, atm_pause=pause_sig(atm_ind), & - lnd_pause=pause_sig(lnd_ind), ocn_pause=pause_sig(ocn_ind), & - ice_pause=pause_sig(ice_ind), glc_pause=pause_sig(glc_ind), & - rof_pause=pause_sig(rof_ind), wav_pause=pause_sig(wav_ind), & - cpl_pause=pause_sig(cpl_ind), case_name=case_name) - + do ind = 1, desp_num_comps + pause_sig(ind) = seq_timemgr_pause_component_active(comp_index(ind)) + end do call desp_comp_run(EClock, case_name, pause_sig, atm_resume, lnd_resume, & rof_resume, ocn_resume, ice_resume, glc_resume, wav_resume, & cpl_resume) diff --git a/src/drivers/mct/cime_config/buildexe b/src/drivers/mct/cime_config/buildexe index fbf07f1feb9a..627330611a92 100755 --- a/src/drivers/mct/cime_config/buildexe +++ b/src/drivers/mct/cime_config/buildexe @@ -31,8 +31,12 @@ def _main_func(): gmake = case.get_value("GMAKE") gmake_j = case.get_value("GMAKE_J") model = case.get_value("MODEL") + num_esp = case.get_value("NUM_COMP_INST_ESP") os.environ["PIO_VERSION"] = str(case.get_value("PIO_VERSION")) + expect((num_esp is None) or (int(num_esp) == 1), "ESP component restricted to one instance") + + with open('Filepath', 'w') as out: out.write(os.path.join(caseroot, "SourceMods", "src.drv") + "\n") out.write(os.path.join(cimeroot, "src", "drivers", "mct", "main") + "\n") diff --git a/src/drivers/mct/cime_config/buildnml b/src/drivers/mct/cime_config/buildnml index 71f687381414..0296d662e8c4 100755 --- a/src/drivers/mct/cime_config/buildnml +++ b/src/drivers/mct/cime_config/buildnml @@ -22,6 +22,20 @@ from CIME.XML.files import Files logger = logging.getLogger(__name__) +############################################################################### +def _get_time_in_seconds(time, unit): +############################################################################### + if 'nyear' in unit: + dmult = 365 * 24 * 3600 + elif 'nmonth' in unit: + dmult = 30 * 24 * 3600 + elif 'nday' in unit: + dmult = 24 * 3600 + else: + dmult = 1 + + return dmult * time + ############################################################################### def _create_drv_namelists(case, infile, confdir, nmlgen, files): ############################################################################### @@ -93,6 +107,7 @@ def _create_drv_namelists(case, infile, confdir, nmlgen, files): expect(False, "basedt invalid overflow for NCPL_BASE_PERIOD %s " %ncpl_base_period) comps = case.get_values("COMP_CLASSES") + mindt = basedt for comp in comps: ncpl = case.get_value(comp.upper() + '_NCPL') if ncpl is not None: @@ -101,6 +116,7 @@ def _create_drv_namelists(case, infile, confdir, nmlgen, files): if totaldt != basedt: expect(False, " %s ncpl doesn't divide base dt evenly" %comp) nmlgen.set_value(comp.lower() + '_cpl_dt', value=cpl_dt) + mindt = min(mindt, cpl_dt) # elif comp.lower() is not 'cpl': #-------------------------------- @@ -142,6 +158,7 @@ def _create_drv_namelists(case, infile, confdir, nmlgen, files): # Set up the pause_component_list if pause is active pauseo = case.get_value('PAUSE_OPTION') if pauseo != 'never' and pauseo != 'none': + pausen = case.get_value('PAUSE_N') pcl = nmlgen.get_default('pause_component_list') nmlgen.add_default('pause_component_list', pcl) # Check to make sure pause_component_list is valid @@ -154,6 +171,13 @@ def _create_drv_namelists(case, infile, confdir, nmlgen, files): "Invalid PAUSE_COMPONENT_LIST, %s is not a valid component type"%comp) # End for # End if + # Set esp interval + if 'nstep' in pauseo: + esp_time = mindt + else: + esp_time = _get_time_in_seconds(pausen, pauseo) + + nmlgen.set_value('esp_cpl_dt', value=esp_time) # End if pause is active #-------------------------------- diff --git a/src/drivers/mct/cime_config/config_component.xml b/src/drivers/mct/cime_config/config_component.xml index 8019d474bda1..8115287427a0 100644 --- a/src/drivers/mct/cime_config/config_component.xml +++ b/src/drivers/mct/cime_config/config_component.xml @@ -418,6 +418,23 @@ + + logical + TRUE,FALSE + FALSE + run_begin_stop_restart + env_run.xml + + ESP component runs after driver 'pause cycle' + If any component 'pauses' (see PAUSE_OPTION, PAUSE_N and + PAUSE_COMPONENT_LIST XML variables), the ESP component (if + present) will be run to process the component 'pause' (restart) + files and set any required 'resume' signals. + If true, esp_cpl_dt and esp_cpl_offset settings are ignored. + default: false + + + logical TRUE,FALSE diff --git a/src/drivers/mct/cime_config/namelist_definition_drv.xml b/src/drivers/mct/cime_config/namelist_definition_drv.xml index bffe785aef28..87d07229680f 100644 --- a/src/drivers/mct/cime_config/namelist_definition_drv.xml +++ b/src/drivers/mct/cime_config/namelist_definition_drv.xml @@ -1,6 +1,6 @@ - + @@ -352,7 +352,7 @@ - + logical expdef seq_infodata_inparm @@ -1497,6 +1497,23 @@ + + integer + time + seq_timemgr_inparm + + esp run interval in seconds + esp_cpl_dt is the number of times the esp is run per NCPL_BASE_PERIOD + NCPL_BASE_PERIOD is set in env_run.xml and is the base period + associated with NCPL coupling frequency, nad has valid values: hour,day,year,decade + default value set by buildnml to be the pause interval if pause is active + otherwise, it is set to the shortest component coupling time + + + -999 + + + integer time @@ -1569,6 +1586,36 @@ + + integer + time + seq_timemgr_inparm + + esp coupling interval offset in seconds default: 0 + + + 0 + + + + + logical + time + seq_timemgr_inparm + + true => ESP component runs after driver 'pause cycle' + If any component 'pauses' (see PAUSE_OPTION, PAUSE_N and + PAUSE_COMPONENT_LIST XML variables), the ESP component (if + present) will be run to process the component 'pause' (restart) + files and set any required 'resume' signals. + If true, esp_cpl_dt and esp_cpl_offset settings are ignored. + default: true + + + .true. + + + char time diff --git a/src/drivers/mct/main/cesm_comp_mod.F90 b/src/drivers/mct/main/cesm_comp_mod.F90 index afc6b863ff4f..85291d5cf7f8 100644 --- a/src/drivers/mct/main/cesm_comp_mod.F90 +++ b/src/drivers/mct/main/cesm_comp_mod.F90 @@ -99,13 +99,15 @@ module cesm_comp_mod use seq_timemgr_mod, only: seq_timemgr_alarm_esprun use seq_timemgr_mod, only: seq_timemgr_alarm_barrier use seq_timemgr_mod, only: seq_timemgr_alarm_pause - use seq_timemgr_mod, only: pause_component_list=>seq_timemgr_pause_component_list + use seq_timemgr_mod, only: seq_timemgr_pause_active + use seq_timemgr_mod, only: seq_timemgr_pause_component_active + use seq_timemgr_mod, only: seq_timemgr_pause_component_index ! "infodata" gathers various control flags into one datatype use seq_infodata_mod, only: seq_infodata_putData, seq_infodata_GetData use seq_infodata_mod, only: seq_infodata_init, seq_infodata_exchange use seq_infodata_mod, only: seq_infodata_type, seq_infodata_orb_variable_year - use seq_infodata_mod, only: seq_infodata_print + use seq_infodata_mod, only: seq_infodata_print, seq_infodata_init2 ! domain related routines use seq_domain_mct, only : seq_domain_check @@ -257,7 +259,6 @@ module cesm_comp_mod logical :: esprun_alarm ! esp run alarm logical :: tprof_alarm ! timing profile alarm logical :: barrier_alarm ! barrier alarm - logical :: pause_alarm ! pause alarm logical :: t1hr_alarm ! alarm every hour logical :: t2hr_alarm ! alarm every two hours logical :: t3hr_alarm ! alarm every three hours @@ -265,6 +266,8 @@ module cesm_comp_mod logical :: t12hr_alarm ! alarm every twelve hours logical :: t24hr_alarm ! alarm every twentyfour hours logical :: t1yr_alarm ! alarm every year, at start of year + logical :: pause_alarm ! pause alarm + integer :: drv_index ! seq_timemgr index for driver real(r8) :: days_per_year = 365.0 ! days per year @@ -1002,6 +1005,11 @@ subroutine cesm_pre_init2() call seq_timemgr_clockPrint(seq_SyncClock) endif + !---------------------------------------------------------- + !| Initialize infodata items which need the clocks + !---------------------------------------------------------- + call seq_infodata_init2(infodata, GLOID) + call seq_infodata_getData(infodata, & orb_iyear=orb_iyear, & orb_iyear_align=orb_iyear_align, & @@ -1966,6 +1974,11 @@ subroutine cesm_init() call t_stopf('comp_init_cc_atm2') endif ! atm present + !---------------------------------------------------------- + !| Get time manager's index for driver + !---------------------------------------------------------- + drv_index = seq_timemgr_pause_component_index('drv') + !---------------------------------------------------------- !| Read driver restart file, overwrite anything previously sent or computed !---------------------------------------------------------- @@ -2052,8 +2065,8 @@ end subroutine cesm_init !=============================================================================== subroutine cesm_run() - use seq_comm_mct, only: atm_layout, lnd_layout, ice_layout, glc_layout, rof_layout, & - ocn_layout, wav_layout, esp_layout + use seq_comm_mct, only: atm_layout, lnd_layout, ice_layout, glc_layout, & + rof_layout, ocn_layout, wav_layout, esp_layout use shr_string_mod, only: shr_string_listGetIndexF ! gptl timer lookup variables @@ -2062,6 +2075,7 @@ subroutine cesm_run() ! Driver pause/resume logical :: drv_pause ! Driver writes pause restart file character(len=CL) :: drv_resume ! Driver resets state from restart file + integer :: iamroot_ESPID 101 format( A, 2i8, 12A, A, F8.2, A, F8.2 ) 102 format( A, 2i8, A, 8L3 ) @@ -2138,49 +2152,8 @@ subroutine cesm_run() barrier_alarm = seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_barrier) pause_alarm = seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_pause) - ! Determine wich components need to write pause (restart) files - if (pause_alarm) then - if (trim(pause_component_list) == 'all') then - drv_pause = .true. - call seq_infodata_putData(infodata, atm_pause=.true., & - lnd_pause=.true., ocn_pause=.true., ice_pause=.true., & - glc_pause=.true., rof_pause=.true., wav_pause=.true., & - cpl_pause=.true.) - else if (trim(pause_component_list) /= 'none') then - if (shr_string_listGetIndexF(pause_component_list, 'atm') > 0) then - call seq_infodata_putData(infodata, atm_pause=.true.) - end if - if (shr_string_listGetIndexF(pause_component_list, 'lnd') > 0) then - call seq_infodata_putData(infodata, lnd_pause=.true.) - end if - if (shr_string_listGetIndexF(pause_component_list, 'ocn') > 0) then - call seq_infodata_putData(infodata, ocn_pause=.true.) - end if - if (shr_string_listGetIndexF(pause_component_list, 'ice') > 0) then - call seq_infodata_putData(infodata, ice_pause=.true.) - end if - if (shr_string_listGetIndexF(pause_component_list, 'glc') > 0) then - call seq_infodata_putData(infodata, glc_pause=.true.) - end if - if (shr_string_listGetIndexF(pause_component_list, 'rof') > 0) then - call seq_infodata_putData(infodata, rof_pause=.true.) - end if - if (shr_string_listGetIndexF(pause_component_list, 'wav') > 0) then - call seq_infodata_putData(infodata, wav_pause=.true.) - end if - if ( (shr_string_listGetIndexF(pause_component_list, 'cpl') > 0) .or.& - (shr_string_listGetIndexF(pause_component_list, 'drv') > 0)) then - drv_pause = .true. - call seq_infodata_putData(infodata, cpl_pause=.true.) - end if - end if - else - drv_pause = .false. - call seq_infodata_putData(infodata, atm_pause=.false., & - lnd_pause=.false., ocn_pause=.false., ice_pause=.false., & - glc_pause=.false., rof_pause=.false., wav_pause=.false., & - cpl_pause=.false.) - end if ! pause alarm + ! Does the driver need to pause? + drv_pause = pause_alarm .and. seq_timemgr_pause_component_active(drv_index) ! this probably belongs in seq_timemgr somewhere using proper clocks t1hr_alarm = .false. @@ -3726,6 +3699,10 @@ subroutine cesm_run() comp_prognostic=esp_prognostic, comp_num=comp_num_esp, & timer_barrier= 'CPL:ESP_RUN_BARRIER', timer_comp_run='CPL:ESP_RUN', & run_barriers=run_barriers, ymd=ymd, tod=tod,comp_layout=esp_layout) + !--------------------------------------------------------------------- + !| ESP computes resume options for other components -- update everyone + !--------------------------------------------------------------------- + call seq_infodata_exchange(infodata, CPLALLESPID, 'esp2cpl_run') endif !---------------------------------------------------------- @@ -3733,16 +3710,19 @@ subroutine cesm_run() !---------------------------------------------------------- call seq_infodata_GetData(infodata, cpl_resume=drv_resume) if (len_trim(drv_resume) > 0) then - if (iamroot_CPLID) then - write(logunit,103) subname,' Reading restart (resume) file ',trim(drv_resume) - call shr_sys_flush(logunit) - end if - if (iamin_CPLID) then - call seq_rest_read(drv_resume, infodata, & - atm, lnd, ice, ocn, rof, glc, wav, esp, & - fractions_ax, fractions_lx, fractions_ix, fractions_ox, & - fractions_rx, fractions_gx, fractions_wx) - end if + if (iamroot_CPLID) then + write(logunit,103) subname,' Reading restart (resume) file ',trim(drv_resume) + call shr_sys_flush(logunit) + end if + if (iamin_CPLID) then + call seq_rest_read(drv_resume, infodata, & + atm, lnd, ice, ocn, rof, glc, wav, esp, & + fractions_ax, fractions_lx, fractions_ix, fractions_ox, & + fractions_rx, fractions_gx, fractions_wx) + end if + ! Clear the resume file so we don't try to read it again + drv_resume = ' ' + call seq_infodata_PutData(infodata, cpl_resume=drv_resume) end if !---------------------------------------------------------- diff --git a/src/drivers/mct/main/seq_io_mod.F90 b/src/drivers/mct/main/seq_io_mod.F90 index ad8649a6c110..d27fc581c6ef 100644 --- a/src/drivers/mct/main/seq_io_mod.F90 +++ b/src/drivers/mct/main/seq_io_mod.F90 @@ -37,7 +37,8 @@ module seq_io_mod use shr_kind_mod, only: r4 => shr_kind_r4, r8 => shr_kind_r8, in => shr_kind_in use shr_kind_mod, only: cl => shr_kind_cl, cs => shr_kind_cs use shr_sys_mod ! system calls - use seq_comm_mct + use seq_comm_mct, only: logunit, CPLID, seq_comm_setptrs + use seq_comm_mct, only: seq_comm_namelen, seq_comm_name use seq_flds_mod, only : seq_flds_lookup use mct_mod ! mct wrappers use pio @@ -1545,7 +1546,7 @@ subroutine seq_io_read_av(filename,gsmap,AV,dname,pre) implicit none character(len=*),intent(in) :: filename ! file type(mct_gsMap), intent(in) :: gsmap - type(mct_aVect) ,intent(inout):: AV ! data to be written + type(mct_aVect) ,intent(inout):: AV ! data to be read character(len=*),intent(in) :: dname ! name of data character(len=*),intent(in),optional :: pre ! prefix name @@ -1671,7 +1672,7 @@ subroutine seq_io_read_avs(filename,gsmap,AVS,dname,pre) implicit none character(len=*),intent(in) :: filename ! file type(mct_gsMap), intent(in) :: gsmap - type(mct_aVect) ,intent(inout):: AVS(:) ! data to be written + type(mct_aVect) ,intent(inout):: AVS(:) ! data to be read character(len=*),intent(in) :: dname ! name of data character(len=*),intent(in),optional :: pre ! prefix name diff --git a/src/drivers/mct/main/seq_rest_mod.F90 b/src/drivers/mct/main/seq_rest_mod.F90 index fa26e06d028b..6994d954b812 100644 --- a/src/drivers/mct/main/seq_rest_mod.F90 +++ b/src/drivers/mct/main/seq_rest_mod.F90 @@ -40,16 +40,17 @@ module seq_rest_mod seq_comm_iamin, CPLID, GLOID, logunit, loglevel ! "infodata" gathers various control flags into one datatype - use seq_infodata_mod + use seq_infodata_mod, only: seq_infodata_type, seq_infodata_getData ! clock & alarm routines - use seq_timemgr_mod + use seq_timemgr_mod, only: seq_timemgr_type, seq_timemgr_EClockGetData ! diagnostic routines use seq_diag_mct, only: budg_datag ! lower level io routines - use seq_io_mod + use seq_io_mod, only: seq_io_read, seq_io_write, seq_io_enddef + use seq_io_mod, only: seq_io_wopen, seq_io_close ! prep modules - coupler communication between different components use prep_ocn_mod, only: prep_ocn_get_x2oacc_ox @@ -412,7 +413,6 @@ subroutine seq_rest_write(EClock_d, seq_SyncClock, infodata, & if (fk == 1) then whead = .true. wdata = .false. -!! call seq_io_redef(rest_file) elseif (fk == 2) then whead = .false. wdata = .true. diff --git a/src/drivers/mct/shr/seq_comm_mct.F90 b/src/drivers/mct/shr/seq_comm_mct.F90 index 5062704cded6..6dc12259f16f 100644 --- a/src/drivers/mct/shr/seq_comm_mct.F90 +++ b/src/drivers/mct/shr/seq_comm_mct.F90 @@ -96,7 +96,6 @@ module seq_comm_mct !!! internal communications, and one is needed for the global space. !!! All instances of a component type also share a separate communicator !!! All instances of a component type share a communicator with the coupler - !!! Note that ESP models do not need coupler communicators integer, parameter, public :: num_inst_phys = num_inst_atm + num_inst_lnd + & num_inst_ocn + num_inst_ice + & @@ -105,7 +104,7 @@ module seq_comm_mct integer, parameter, public :: num_cpl_phys = num_inst_atm + num_inst_lnd + & num_inst_ocn + num_inst_ice + & num_inst_glc + num_inst_rof + & - num_inst_wav + num_inst_wav + num_inst_esp integer, parameter :: ncomps = (1 + ncouplers + ncomptypes + nphysmod + num_inst_phys + num_cpl_phys) integer, public :: GLOID @@ -127,7 +126,7 @@ module seq_comm_mct integer, public :: CPLALLGLCID integer, public :: CPLALLROFID integer, public :: CPLALLWAVID - integer, public, parameter :: CPLALLESPID = -1 + integer, public :: CPLALLESPID integer, public :: ATMID(num_inst_atm) integer, public :: LNDID(num_inst_lnd) @@ -166,7 +165,9 @@ module seq_comm_mct integer :: gloroot ! the global task number of each comps root on all pes integer :: pethreads ! max number of threads on my task integer :: cplpe ! a common task in mpicom from the cpl group for join mpicoms + ! cplpe is used to broadcast information from the coupler to the component integer :: cmppe ! a common task in mpicom from the component group for join mpicoms + ! cmppe is used to broadcast information from the component to the coupler logical :: set ! has this datatype been set integer, pointer :: petlist(:) ! esmf pet list logical :: petlist_allocated ! whether the petlist pointer variable was allocated @@ -394,7 +395,8 @@ subroutine seq_comm_init(Comm_in, nmlfile) num_inst_min = min(num_inst_min, num_inst_glc) num_inst_min = min(num_inst_min, num_inst_wav) num_inst_min = min(num_inst_min, num_inst_rof) - num_inst_min = min(num_inst_min, num_inst_esp) +! ESP is currently limited to one instance, should not affect other comps +! num_inst_min = min(num_inst_min, num_inst_esp) num_inst_max = num_inst_atm num_inst_max = max(num_inst_max, num_inst_lnd) num_inst_max = max(num_inst_max, num_inst_ocn) @@ -412,7 +414,10 @@ subroutine seq_comm_init(Comm_in, nmlfile) if (num_inst_glc /= num_inst_min .and. num_inst_glc /= num_inst_max) error_state = .true. if (num_inst_wav /= num_inst_min .and. num_inst_wav /= num_inst_max) error_state = .true. if (num_inst_rof /= num_inst_min .and. num_inst_rof /= num_inst_max) error_state = .true. - if (num_inst_esp /= num_inst_min .and. num_inst_esp /= num_inst_max) error_state = .true. + if (num_inst_esp /= 1) then + write(logunit,*) trim(subname),' ERROR: ESP restricted to one instance' + error_state = .true. + end if if (error_state) then write(logunit,*) trim(subname),' ERROR: num_inst inconsistent' @@ -459,6 +464,8 @@ subroutine seq_comm_init(Comm_in, nmlfile) CPLALLROFID = count count = count + 1 CPLALLWAVID = count + count = count + 1 + CPLALLESPID = count do n = 1, num_inst_atm count = count + 1 @@ -841,6 +848,7 @@ subroutine seq_comm_init(Comm_in, nmlfile) call seq_comm_setcomm(ESPID(n), pelist, esp_nthreads, 'ESP', n, num_inst_esp) end do call seq_comm_jcommarr(ESPID,ALLESPID,'ALLESPID',1,1) + call seq_comm_joincomm(CPLID,ALLESPID,CPLALLESPID,'CPLALLESPID',1,1) !! Count the total number of threads @@ -1156,36 +1164,6 @@ subroutine seq_comm_joincomm(ID1,ID2,ID,iname,inst,tinst) seq_comms(ID)%iamroot = .false. endif -! needs to be excluded until mpi_group_size is added to serial mpi in mct -#if (1 == 0) - if (loglevel > 3) then - ! some debug code to prove the join is working ok - ! when joining mpicomms, the local rank may be quite different - ! from either the global or local ranks of the joining comms - call mpi_group_size(seq_comms(ID1)%mpigrp,nsize,ierr) - allocate(pe_t1(nsize),pe_t2(nsize)) - do n = 1,nsize - pe_t1(n) = n-1 - pe_t2(n) = -1 - enddo - call mpi_group_translate_ranks(seq_comms(ID1)%mpigrp, nsize, pe_t1, mpigrp, pe_t2, ierr) - write(logunit,*) 'ID1 ranks ',pe_t1 - write(logunit,*) 'ID1-JOIN ranks ',pe_t2 - deallocate(pe_t1,pe_t2) - - call mpi_group_size(seq_comms(ID2)%mpigrp,nsize,ierr) - allocate(pe_t1(nsize),pe_t2(nsize)) - do n = 1,nsize - pe_t1(n) = n-1 - pe_t2(n) = -1 - enddo - call mpi_group_translate_ranks(seq_comms(ID2)%mpigrp, nsize, pe_t1, mpigrp, pe_t2, ierr) - write(logunit,*) 'ID2 ranks ',pe_t1 - write(logunit,*) 'ID2-JOIN ranks ',pe_t2 - deallocate(pe_t1,pe_t2) - endif -#endif - allocate(pe_t1(1),pe_t2(1)) pe_t1(1) = 0 call mpi_group_translate_ranks(seq_comms(ID1)%mpigrp, 1, pe_t1, mpigrp, pe_t2, ierr) diff --git a/src/drivers/mct/shr/seq_infodata_mod.F90 b/src/drivers/mct/shr/seq_infodata_mod.F90 index 7341e4c2af2f..cf95d024c470 100644 --- a/src/drivers/mct/shr/seq_infodata_mod.F90 +++ b/src/drivers/mct/shr/seq_infodata_mod.F90 @@ -46,6 +46,7 @@ MODULE seq_infodata_mod ! !PUBLIC MEMBER FUNCTIONS public :: seq_infodata_Init ! Initialize + public :: seq_infodata_Init2 ! Init after clocks initialized public :: seq_infodata_GetData ! Get values from object public :: seq_infodata_PutData ! Change values public :: seq_infodata_Print ! print current info @@ -66,14 +67,6 @@ MODULE seq_infodata_mod ! Type to hold pause/resume signaling information type seq_pause_resume_type private - logical :: atm_pause = .false. ! atm write pause restart file - logical :: lnd_pause = .false. ! lnd write pause restart file - logical :: ice_pause = .false. ! ice write pause restart file - logical :: ocn_pause = .false. ! ocn write pause restart file - logical :: glc_pause = .false. ! glc write pause restart file - logical :: rof_pause = .false. ! rof write pause restart file - logical :: wav_pause = .false. ! wav write pause restart file - logical :: cpl_pause = .false. ! cpl write pause restart file character(SHR_KIND_CL) :: atm_resume(num_inst_atm) = ' ' ! atm resume file character(SHR_KIND_CL) :: lnd_resume(num_inst_lnd) = ' ' ! lnd resume file character(SHR_KIND_CL) :: ice_resume(num_inst_ice) = ' ' ! ice resume file @@ -293,12 +286,12 @@ SUBROUTINE seq_infodata_Init( infodata, nmlfile, ID, pioid) ! !USES: - use shr_file_mod, only : shr_file_getUnit, shr_file_freeUnit - use shr_string_mod, only : shr_string_toUpper, shr_string_listAppend - use shr_mpi_mod, only : shr_mpi_bcast - use seq_io_read_mod - use pio, only : file_desc_t - implicit none + use shr_file_mod, only : shr_file_getUnit, shr_file_freeUnit + use shr_string_mod, only : shr_string_toUpper, shr_string_listAppend + use shr_mpi_mod, only : shr_mpi_bcast + use seq_timemgr_mod, only : seq_timemgr_pause_active + use seq_io_read_mod, only : seq_io_read + use pio, only : file_desc_t ! !INPUT/OUTPUT PARAMETERS: @@ -858,6 +851,43 @@ SUBROUTINE seq_infodata_Init( infodata, nmlfile, ID, pioid) END SUBROUTINE seq_infodata_Init +!=============================================================================== +!BOP =========================================================================== +! +! !IROUTINE: seq_infodata_Init2 -- initialize infodata structures +! +! !DESCRIPTION: +! +! Initialize infodata items that depend on the time manager setup +! +! !INTERFACE: ------------------------------------------------------------------ + +SUBROUTINE seq_infodata_Init2(infodata, ID) + +! !USES: + + use seq_timemgr_mod, only : seq_timemgr_pause_active + +! !INPUT/OUTPUT PARAMETERS: + + type(seq_infodata_type), intent(INOUT) :: infodata ! infodata object + integer(SHR_KIND_IN), intent(IN) :: ID ! seq_comm ID +!EOP + + !----- local ----- + integer :: mpicom ! MPI communicator + + call seq_comm_setptrs(ID, mpicom=mpicom) + !---------------------------------------------------------- + !| If pause/resume is active, initialize the resume data + !---------------------------------------------------------- + if (seq_timemgr_pause_active() .and. (.not. associated(infodata%pause_resume))) then + allocate(infodata%pause_resume) + end if + call seq_infodata_bcast(infodata, mpicom) + +END SUBROUTINE seq_infodata_Init2 + !=============================================================================== !=============================================================================== ! !IROUTINE: seq_infodata_GetData_explicit -- Get values from infodata object @@ -896,16 +926,15 @@ SUBROUTINE seq_infodata_GetData_explicit( infodata, cime_model, case_name, case_ cpl_cdf64, orb_iyear, orb_iyear_align, orb_mode, orb_mvelp, & orb_eccen, orb_obliqr, orb_lambm0, orb_mvelpp, wv_sat_scheme, & wv_sat_transition_start, wv_sat_use_tables, wv_sat_table_spacing, & - tfreeze_option, & + tfreeze_option, & glc_phase, rof_phase, atm_phase, lnd_phase, ocn_phase, ice_phase, & wav_phase, esp_phase, wav_nx, wav_ny, atm_nx, atm_ny, & lnd_nx, lnd_ny, rof_nx, rof_ny, ice_nx, ice_ny, ocn_nx, ocn_ny, & glc_nx, glc_ny, eps_frac, eps_amask, & eps_agrid, eps_aarea, eps_omask, eps_ogrid, eps_oarea, & reprosum_use_ddpdd, reprosum_diffmax, reprosum_recompute, & - atm_pause, lnd_pause, ocn_pause, ice_pause, glc_pause, rof_pause, & - wav_pause, cpl_pause, atm_resume, lnd_resume, ocn_resume, & - ice_resume, glc_resume, rof_resume, wav_resume, cpl_resume, & + atm_resume, lnd_resume, ocn_resume, ice_resume, & + glc_resume, rof_resume, wav_resume, cpl_resume, & mct_usealltoall, mct_usevector, max_cplstep_time) @@ -1068,14 +1097,6 @@ SUBROUTINE seq_infodata_GetData_explicit( infodata, cime_model, case_name, case_ logical, optional, intent(OUT) :: glcrun_alarm ! glc run alarm logical, optional, intent(OUT) :: glc_g2lupdate ! update glc2lnd fields in lnd model real(shr_kind_r8), optional, intent(out) :: max_cplstep_time - logical, optional, intent(OUT) :: atm_pause ! atm write pause restart file - logical, optional, intent(OUT) :: lnd_pause ! lnd write pause restart file - logical, optional, intent(OUT) :: ice_pause ! ice write pause restart file - logical, optional, intent(OUT) :: ocn_pause ! ocn write pause restart file - logical, optional, intent(OUT) :: glc_pause ! glc write pause restart file - logical, optional, intent(OUT) :: rof_pause ! rof write pause restart file - logical, optional, intent(OUT) :: wav_pause ! wav write pause restart file - logical, optional, intent(OUT) :: cpl_pause ! cpl write pause restart file character(SHR_KIND_CL), optional, intent(OUT) :: atm_resume(:) ! atm read resume state character(SHR_KIND_CL), optional, intent(OUT) :: lnd_resume(:) ! lnd read resume state character(SHR_KIND_CL), optional, intent(OUT) :: ice_resume(:) ! ice read resume state @@ -1257,62 +1278,6 @@ SUBROUTINE seq_infodata_GetData_explicit( infodata, cime_model, case_name, case_ if ( present(atm_aero) ) atm_aero = infodata%atm_aero if ( present(glcrun_alarm) ) glcrun_alarm = infodata%glcrun_alarm if ( present(glc_g2lupdate) ) glc_g2lupdate = infodata%glc_g2lupdate - if ( present(atm_pause) ) then - if (associated(infodata%pause_resume)) then - atm_pause = infodata%pause_resume%atm_pause - else - atm_pause = .false. - end if - end if - if ( present(lnd_pause) ) then - if (associated(infodata%pause_resume)) then - lnd_pause = infodata%pause_resume%lnd_pause - else - lnd_pause = .false. - end if - end if - if ( present(ice_pause) ) then - if (associated(infodata%pause_resume)) then - ice_pause = infodata%pause_resume%ice_pause - else - ice_pause = .false. - end if - end if - if ( present(ocn_pause) ) then - if (associated(infodata%pause_resume)) then - ocn_pause = infodata%pause_resume%ocn_pause - else - ocn_pause = .false. - end if - end if - if ( present(glc_pause) ) then - if (associated(infodata%pause_resume)) then - glc_pause = infodata%pause_resume%glc_pause - else - glc_pause = .false. - end if - end if - if ( present(rof_pause) ) then - if (associated(infodata%pause_resume)) then - rof_pause = infodata%pause_resume%rof_pause - else - rof_pause = .false. - end if - end if - if ( present(wav_pause) ) then - if (associated(infodata%pause_resume)) then - wav_pause = infodata%pause_resume%wav_pause - else - wav_pause = .false. - end if - end if - if ( present(cpl_pause) ) then - if (associated(infodata%pause_resume)) then - cpl_pause = infodata%pause_resume%cpl_pause - else - cpl_pause = .false. - end if - end if if ( present(atm_resume) ) then if (associated(infodata%pause_resume)) then atm_resume(:) = infodata%pause_resume%atm_resume(:) @@ -1385,7 +1350,7 @@ END SUBROUTINE seq_infodata_GetData_explicit SUBROUTINE seq_infodata_GetData_bytype( component_firstletter, infodata, & comp_present, comp_prognostic, comp_gnam, histavg_comp, & - comp_phase, comp_nx, comp_ny, comp_pause, comp_resume) + comp_phase, comp_nx, comp_ny, comp_resume) implicit none @@ -1401,7 +1366,6 @@ SUBROUTINE seq_infodata_GetData_bytype( component_firstletter, infodata, & integer(SHR_KIND_IN), optional, intent(OUT) :: comp_ny ! nx,ny 2d grid size global integer(SHR_KIND_IN), optional, intent(OUT) :: comp_phase logical, optional, intent(OUT) :: histavg_comp - logical, optional, intent(OUT) :: comp_pause character(SHR_KIND_CL), optional, intent(OUT) :: comp_resume(:) !----- local ----- @@ -1413,37 +1377,37 @@ SUBROUTINE seq_infodata_GetData_bytype( component_firstletter, infodata, & call seq_infodata_GetData(infodata, atm_present=comp_present, & atm_prognostic=comp_prognostic, atm_gnam=comp_gnam, & atm_phase=comp_phase, atm_nx=comp_nx, atm_ny=comp_ny, & - histavg_atm=histavg_comp, atm_pause=comp_pause, atm_resume=comp_resume) + histavg_atm=histavg_comp, atm_resume=comp_resume) else if (component_firstletter == 'l') then call seq_infodata_GetData(infodata, lnd_present=comp_present, & lnd_prognostic=comp_prognostic, lnd_gnam=comp_gnam, & lnd_phase=comp_phase, lnd_nx=comp_nx, lnd_ny=comp_ny, & - histavg_lnd=histavg_comp, lnd_pause=comp_pause, lnd_resume=comp_resume) + histavg_lnd=histavg_comp, lnd_resume=comp_resume) else if (component_firstletter == 'i') then call seq_infodata_GetData(infodata, ice_present=comp_present, & ice_prognostic=comp_prognostic, ice_gnam=comp_gnam, & ice_phase=comp_phase, ice_nx=comp_nx, ice_ny=comp_ny, & - histavg_ice=histavg_comp, ice_pause=comp_pause, ice_resume=comp_resume) + histavg_ice=histavg_comp, ice_resume=comp_resume) else if (component_firstletter == 'o') then call seq_infodata_GetData(infodata, ocn_present=comp_present, & ocn_prognostic=comp_prognostic, ocn_gnam=comp_gnam, & ocn_phase=comp_phase, ocn_nx=comp_nx, ocn_ny=comp_ny, & - histavg_ocn=histavg_comp, ocn_pause=comp_pause, ocn_resume=comp_resume) + histavg_ocn=histavg_comp, ocn_resume=comp_resume) else if (component_firstletter == 'r') then call seq_infodata_GetData(infodata, rof_present=comp_present, & rof_prognostic=comp_prognostic, rof_gnam=comp_gnam, & rof_phase=comp_phase, rof_nx=comp_nx, rof_ny=comp_ny, & - histavg_rof=histavg_comp, rof_pause=comp_pause, rof_resume=comp_resume) + histavg_rof=histavg_comp, rof_resume=comp_resume) else if (component_firstletter == 'g') then call seq_infodata_GetData(infodata, glc_present=comp_present, & glc_prognostic=comp_prognostic, glc_gnam=comp_gnam, & glc_phase=comp_phase, glc_nx=comp_nx, glc_ny=comp_ny, & - histavg_glc=histavg_comp, glc_pause=comp_pause, glc_resume=comp_resume) + histavg_glc=histavg_comp, glc_resume=comp_resume) else if (component_firstletter == 'w') then call seq_infodata_GetData(infodata, wav_present=comp_present, & wav_prognostic=comp_prognostic, wav_gnam=comp_gnam, & wav_phase=comp_phase, wav_nx=comp_nx, wav_ny=comp_ny, & - histavg_wav=histavg_comp, wav_pause=comp_pause, wav_resume=comp_resume) + histavg_wav=histavg_comp, wav_resume=comp_resume) else if (component_firstletter == 'e') then if (present(comp_gnam)) then comp_gnam = '' @@ -1469,12 +1433,6 @@ SUBROUTINE seq_infodata_GetData_bytype( component_firstletter, infodata, & write(logunit,*) trim(subname),' Note: ESP type has no histavg property' end if end if - if (present(comp_pause)) then - comp_pause = .false. - if ((loglevel > 1) .and. seq_comm_iamroot(1)) then - write(logunit,*) trim(subname),' Note: ESP type has no pause property' - end if - end if if (present(comp_resume)) then comp_resume = ' ' if ((loglevel > 1) .and. seq_comm_iamroot(1)) then @@ -1535,9 +1493,8 @@ SUBROUTINE seq_infodata_PutData_explicit( infodata, cime_model, case_name, case_ glc_nx, glc_ny, eps_frac, eps_amask, & eps_agrid, eps_aarea, eps_omask, eps_ogrid, eps_oarea, & reprosum_use_ddpdd, reprosum_diffmax, reprosum_recompute, & - atm_pause, lnd_pause, ocn_pause, ice_pause, glc_pause, rof_pause, & - wav_pause, cpl_pause, atm_resume, lnd_resume, ocn_resume, & - ice_resume, glc_resume, rof_resume, wav_resume, cpl_resume, & + atm_resume, lnd_resume, ocn_resume, ice_resume, & + glc_resume, rof_resume, wav_resume, cpl_resume, & mct_usealltoall, mct_usevector ) @@ -1698,14 +1655,6 @@ SUBROUTINE seq_infodata_PutData_explicit( infodata, cime_model, case_name, case_ logical, optional, intent(IN) :: atm_aero ! atm aerosols logical, optional, intent(IN) :: glcrun_alarm ! glc run alarm logical, optional, intent(IN) :: glc_g2lupdate ! update glc2lnd fields in lnd model - logical, optional, intent(IN) :: atm_pause ! atm pause - logical, optional, intent(IN) :: lnd_pause ! lnd pause - logical, optional, intent(IN) :: ice_pause ! ice pause - logical, optional, intent(IN) :: ocn_pause ! ocn pause - logical, optional, intent(IN) :: glc_pause ! glc pause - logical, optional, intent(IN) :: rof_pause ! rof pause - logical, optional, intent(IN) :: wav_pause ! wav pause - logical, optional, intent(IN) :: cpl_pause ! cpl pause character(SHR_KIND_CL), optional, intent(IN) :: atm_resume(:) ! atm resume character(SHR_KIND_CL), optional, intent(IN) :: lnd_resume(:) ! lnd resume character(SHR_KIND_CL), optional, intent(IN) :: ice_resume(:) ! ice resume @@ -1875,70 +1824,6 @@ SUBROUTINE seq_infodata_PutData_explicit( infodata, cime_model, case_name, case_ if ( present(atm_aero) ) infodata%atm_aero = atm_aero if ( present(glcrun_alarm) ) infodata%glcrun_alarm = glcrun_alarm if ( present(glc_g2lupdate) ) infodata%glc_g2lupdate = glc_g2lupdate - if ( present(atm_pause) ) then - if (associated(infodata%pause_resume)) then - infodata%pause_resume%atm_pause = atm_pause - else if (atm_pause) then - allocate(infodata%pause_resume) - infodata%pause_resume%atm_pause = atm_pause - end if - end if - if ( present(lnd_pause) ) then - if (associated(infodata%pause_resume)) then - infodata%pause_resume%lnd_pause = lnd_pause - else if (lnd_pause) then - allocate(infodata%pause_resume) - infodata%pause_resume%lnd_pause = lnd_pause - end if - end if - if ( present(ice_pause) ) then - if (associated(infodata%pause_resume)) then - infodata%pause_resume%ice_pause = ice_pause - else if (ice_pause) then - allocate(infodata%pause_resume) - infodata%pause_resume%ice_pause = ice_pause - end if - end if - if ( present(ocn_pause) ) then - if (associated(infodata%pause_resume)) then - infodata%pause_resume%ocn_pause = ocn_pause - else if (ocn_pause) then - allocate(infodata%pause_resume) - infodata%pause_resume%ocn_pause = ocn_pause - end if - end if - if ( present(glc_pause) ) then - if (associated(infodata%pause_resume)) then - infodata%pause_resume%glc_pause = glc_pause - else if (glc_pause) then - allocate(infodata%pause_resume) - infodata%pause_resume%glc_pause = glc_pause - end if - end if - if ( present(rof_pause) ) then - if (associated(infodata%pause_resume)) then - infodata%pause_resume%rof_pause = rof_pause - else if (rof_pause) then - allocate(infodata%pause_resume) - infodata%pause_resume%rof_pause = rof_pause - end if - end if - if ( present(wav_pause) ) then - if (associated(infodata%pause_resume)) then - infodata%pause_resume%wav_pause = wav_pause - else if (wav_pause) then - allocate(infodata%pause_resume) - infodata%pause_resume%wav_pause = wav_pause - end if - end if - if ( present(cpl_pause) ) then - if (associated(infodata%pause_resume)) then - infodata%pause_resume%cpl_pause = cpl_pause - else if (cpl_pause) then - allocate(infodata%pause_resume) - infodata%pause_resume%cpl_pause = cpl_pause - end if - end if if ( present(atm_resume) ) then if (associated(infodata%pause_resume)) then infodata%pause_resume%atm_resume(:) = atm_resume(:) @@ -2018,7 +1903,7 @@ END SUBROUTINE seq_infodata_PutData_explicit SUBROUTINE seq_infodata_PutData_bytype( component_firstletter, infodata, & comp_present, comp_prognostic, comp_gnam, & - histavg_comp, comp_phase, comp_nx, comp_ny, comp_pause, comp_resume) + histavg_comp, comp_phase, comp_nx, comp_ny, comp_resume) implicit none @@ -2033,7 +1918,6 @@ SUBROUTINE seq_infodata_PutData_bytype( component_firstletter, infodata, & integer(SHR_KIND_IN), optional, intent(IN) :: comp_ny ! nx,ny 2d grid size global integer(SHR_KIND_IN), optional, intent(IN) :: comp_phase logical, optional, intent(IN) :: histavg_comp - logical, optional, intent(IN) :: comp_pause character(SHR_KIND_CL), optional, intent(IN) :: comp_resume(:) !EOP @@ -2047,37 +1931,37 @@ SUBROUTINE seq_infodata_PutData_bytype( component_firstletter, infodata, & call seq_infodata_PutData(infodata, atm_present=comp_present, & atm_prognostic=comp_prognostic, atm_gnam=comp_gnam, & atm_phase=comp_phase, atm_nx=comp_nx, atm_ny=comp_ny, & - histavg_atm=histavg_comp, atm_pause=comp_pause, atm_resume=comp_resume) + histavg_atm=histavg_comp, atm_resume=comp_resume) else if (component_firstletter == 'l') then call seq_infodata_PutData(infodata, lnd_present=comp_present, & lnd_prognostic=comp_prognostic, lnd_gnam=comp_gnam, & lnd_phase=comp_phase, lnd_nx=comp_nx, lnd_ny=comp_ny, & - histavg_lnd=histavg_comp, lnd_pause=comp_pause, lnd_resume=comp_resume) + histavg_lnd=histavg_comp, lnd_resume=comp_resume) else if (component_firstletter == 'i') then call seq_infodata_PutData(infodata, ice_present=comp_present, & ice_prognostic=comp_prognostic, ice_gnam=comp_gnam, & ice_phase=comp_phase, ice_nx=comp_nx, ice_ny=comp_ny, & - histavg_ice=histavg_comp, ice_pause=comp_pause, ice_resume=comp_resume) + histavg_ice=histavg_comp, ice_resume=comp_resume) else if (component_firstletter == 'o') then call seq_infodata_PutData(infodata, ocn_present=comp_present, & ocn_prognostic=comp_prognostic, ocn_gnam=comp_gnam, & ocn_phase=comp_phase, ocn_nx=comp_nx, ocn_ny=comp_ny, & - histavg_ocn=histavg_comp, ocn_pause=comp_pause, ocn_resume=comp_resume) + histavg_ocn=histavg_comp, ocn_resume=comp_resume) else if (component_firstletter == 'r') then call seq_infodata_PutData(infodata, rof_present=comp_present, & rof_prognostic=comp_prognostic, rof_gnam=comp_gnam, & rof_phase=comp_phase, rof_nx=comp_nx, rof_ny=comp_ny, & - histavg_rof=histavg_comp, rof_pause=comp_pause, rof_resume=comp_resume) + histavg_rof=histavg_comp, rof_resume=comp_resume) else if (component_firstletter == 'g') then call seq_infodata_PutData(infodata, glc_present=comp_present, & glc_prognostic=comp_prognostic, glc_gnam=comp_gnam, & glc_phase=comp_phase, glc_nx=comp_nx, glc_ny=comp_ny, & - histavg_glc=histavg_comp, glc_pause=comp_pause, glc_resume=comp_resume) + histavg_glc=histavg_comp, glc_resume=comp_resume) else if (component_firstletter == 'w') then call seq_infodata_PutData(infodata, wav_present=comp_present, & wav_prognostic=comp_prognostic, wav_gnam=comp_gnam, & wav_phase=comp_phase, wav_nx=comp_nx, wav_ny=comp_ny, & - histavg_wav=histavg_comp, wav_pause=comp_pause, wav_resume=comp_resume) + histavg_wav=histavg_comp, wav_resume=comp_resume) else if (component_firstletter == 'e') then if ((loglevel > 1) .and. seq_comm_iamroot(1)) then if (present(comp_gnam)) then @@ -2092,9 +1976,6 @@ SUBROUTINE seq_infodata_PutData_bytype( component_firstletter, infodata, & if (present(histavg_comp)) then write(logunit,*) trim(subname),' Note: ESP type has no histavg property' end if - if (present(comp_pause)) then - write(logunit,*) trim(subname),' Note: ESP type has no pause property' - end if if (present(comp_resume)) then write(logunit,*) trim(subname),' Note: ESP type has no resume property' end if @@ -2111,6 +1992,74 @@ END SUBROUTINE seq_infodata_PutData_bytype #endif ! ^ ifndef CPRPGI +!=============================================================================== +!BOP =========================================================================== +! +! !IROUTINE: seq_infodata_pauseresume_bcast -- Broadcast pause/resume data from root pe +! +! !DESCRIPTION: +! +! Broadcast the pause_resume data from an infodata across pes +! +! !INTERFACE: ------------------------------------------------------------------ + +subroutine seq_infodata_pauseresume_bcast(infodata, mpicom, pebcast) + + use shr_mpi_mod, only : shr_mpi_bcast + +! !INPUT/OUTPUT PARAMETERS: + + type(seq_infodata_type), intent(INOUT) :: infodata ! assume valid on root pe + integer(SHR_KIND_IN), intent(IN) :: mpicom ! MPI Communicator + integer(SHR_KIND_IN), optional, intent(IN) :: pebcast ! pe sending + +!EOP + + !----- local ----- + integer :: ind + integer(SHR_KIND_IN) :: pebcast_local + character(len=*), parameter :: subname = '(seq_infodata_pauseresume_bcast) ' + + if (present(pebcast)) then + pebcast_local = pebcast + else + pebcast_local = 0 + end if + + if (associated(infodata%pause_resume)) then + do ind = 1, num_inst_atm + call shr_mpi_bcast(infodata%pause_resume%atm_resume(ind), mpicom, & + pebcast=pebcast_local) + end do + do ind = 1, num_inst_lnd + call shr_mpi_bcast(infodata%pause_resume%lnd_resume(ind), mpicom, & + pebcast=pebcast_local) + end do + do ind = 1, num_inst_ice + call shr_mpi_bcast(infodata%pause_resume%ice_resume(ind), mpicom, & + pebcast=pebcast_local) + end do + do ind = 1, num_inst_ocn + call shr_mpi_bcast(infodata%pause_resume%ocn_resume(ind), mpicom, & + pebcast=pebcast_local) + end do + do ind = 1, num_inst_glc + call shr_mpi_bcast(infodata%pause_resume%glc_resume(ind), mpicom, & + pebcast=pebcast_local) + end do + do ind = 1, num_inst_rof + call shr_mpi_bcast(infodata%pause_resume%rof_resume(ind), mpicom, & + pebcast=pebcast_local) + end do + do ind = 1, num_inst_wav + call shr_mpi_bcast(infodata%pause_resume%wav_resume(ind), mpicom, & + pebcast=pebcast_local) + end do + call shr_mpi_bcast(infodata%pause_resume%cpl_resume, mpicom, & + pebcast=pebcast_local) + end if +end subroutine seq_infodata_pauseresume_bcast + !=============================================================================== !BOP =========================================================================== ! @@ -2293,38 +2242,8 @@ subroutine seq_infodata_bcast(infodata,mpicom) call shr_mpi_bcast(infodata%atm_aero, mpicom) call shr_mpi_bcast(infodata%glcrun_alarm, mpicom) call shr_mpi_bcast(infodata%glc_g2lupdate, mpicom) - if (associated(infodata%pause_resume)) then - call shr_mpi_bcast(infodata%pause_resume%atm_pause, mpicom) - call shr_mpi_bcast(infodata%pause_resume%lnd_pause, mpicom) - call shr_mpi_bcast(infodata%pause_resume%ice_pause, mpicom) - call shr_mpi_bcast(infodata%pause_resume%ocn_pause, mpicom) - call shr_mpi_bcast(infodata%pause_resume%glc_pause, mpicom) - call shr_mpi_bcast(infodata%pause_resume%rof_pause, mpicom) - call shr_mpi_bcast(infodata%pause_resume%wav_pause, mpicom) - call shr_mpi_bcast(infodata%pause_resume%cpl_pause, mpicom) - do ind = 1, num_inst_atm - call shr_mpi_bcast(infodata%pause_resume%atm_resume(ind), mpicom) - end do - do ind = 1, num_inst_lnd - call shr_mpi_bcast(infodata%pause_resume%lnd_resume(ind), mpicom) - end do - do ind = 1, num_inst_ice - call shr_mpi_bcast(infodata%pause_resume%ice_resume(ind), mpicom) - end do - do ind = 1, num_inst_ocn - call shr_mpi_bcast(infodata%pause_resume%ocn_resume(ind), mpicom) - end do - do ind = 1, num_inst_glc - call shr_mpi_bcast(infodata%pause_resume%glc_resume(ind), mpicom) - end do - do ind = 1, num_inst_rof - call shr_mpi_bcast(infodata%pause_resume%rof_resume(ind), mpicom) - end do - do ind = 1, num_inst_wav - call shr_mpi_bcast(infodata%pause_resume%wav_resume(ind), mpicom) - end do - call shr_mpi_bcast(infodata%pause_resume%cpl_resume, mpicom) - end if + + call seq_infodata_pauseresume_bcast(infodata, mpicom) end subroutine seq_infodata_bcast @@ -2355,7 +2274,8 @@ subroutine seq_infodata_Exchange(infodata,ID,type) !----- local ----- integer(SHR_KIND_IN) :: mpicom ! mpicom - integer(SHR_KIND_IN) :: pebcast ! pe sending + integer(SHR_KIND_IN) :: cmppe ! component 'root' for broadcast + integer(SHR_KIND_IN) :: cplpe ! coupler 'root' for broadcast logical :: atm2cpli,atm2cplr logical :: lnd2cpli,lnd2cplr logical :: rof2cpli,rof2cplr @@ -2363,6 +2283,7 @@ subroutine seq_infodata_Exchange(infodata,ID,type) logical :: ice2cpli,ice2cplr logical :: glc2cpli,glc2cplr logical :: wav2cpli,wav2cplr + logical :: esp2cpli,esp2cplr logical :: cpl2i,cpl2r logical :: logset logical :: deads ! local variable to hold info temporarily @@ -2372,8 +2293,7 @@ subroutine seq_infodata_Exchange(infodata,ID,type) ! Notes: !------------------------------------------------------------------------------- - ! assume the comp pe is going to broadcast, change to cplpe below if appropriate - call seq_comm_setptrs(ID,mpicom=mpicom,cmppe=pebcast) + call seq_comm_setptrs(ID, mpicom=mpicom, cmppe=cmppe, cplpe=cplpe) logset = .false. @@ -2391,6 +2311,8 @@ subroutine seq_infodata_Exchange(infodata,ID,type) glc2cplr = .false. wav2cpli = .false. wav2cplr = .false. + esp2cpli = .false. + esp2cplr = .false. cpl2i = .false. cpl2r = .false. @@ -2466,16 +2388,26 @@ subroutine seq_infodata_Exchange(infodata,ID,type) logset = .true. endif + if (trim(type) == 'esp2cpl_init') then + esp2cpli = .true. + esp2cplr = .true. + logset = .true. + endif + if (trim(type) == 'esp2cpl_run') then + esp2cplr = .true. + logset = .true. + endif + if (trim(type) == 'cpl2atm_init' .or. & trim(type) == 'cpl2lnd_init' .or. & trim(type) == 'cpl2rof_init' .or. & trim(type) == 'cpl2ocn_init' .or. & trim(type) == 'cpl2glc_init' .or. & trim(type) == 'cpl2wav_init' .or. & + trim(type) == 'cpl2esp_init' .or. & trim(type) == 'cpl2ice_init') then cpl2i = .true. cpl2r = .true. - call seq_comm_setptrs(ID,cplpe=pebcast) logset = .true. endif @@ -2487,7 +2419,6 @@ subroutine seq_infodata_Exchange(infodata,ID,type) trim(type) == 'cpl2wav_run' .or. & trim(type) == 'cpl2ice_run') then cpl2r = .true. - call seq_comm_setptrs(ID,cplpe=pebcast) logset = .true. endif @@ -2501,129 +2432,143 @@ subroutine seq_infodata_Exchange(infodata,ID,type) ! --- now execute exchange --- if (atm2cpli) then - call shr_mpi_bcast(infodata%atm_present, mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%atm_prognostic, mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%atm_nx, mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%atm_ny, mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%atm_aero, mpicom,pebcast=pebcast) + call shr_mpi_bcast(infodata%atm_present, mpicom, pebcast=cmppe) + call shr_mpi_bcast(infodata%atm_prognostic, mpicom, pebcast=cmppe) + call shr_mpi_bcast(infodata%atm_nx, mpicom, pebcast=cmppe) + call shr_mpi_bcast(infodata%atm_ny, mpicom, pebcast=cmppe) + call shr_mpi_bcast(infodata%atm_aero, mpicom, pebcast=cmppe) ! dead_comps is true if it's ever set to true deads = infodata%dead_comps - call shr_mpi_bcast(deads, mpicom,pebcast=pebcast) + call shr_mpi_bcast(deads, mpicom, pebcast=cmppe) if (deads .or. infodata%dead_comps) infodata%dead_comps = .true. endif if (lnd2cpli) then - call shr_mpi_bcast(infodata%lnd_present, mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%lnd_prognostic, mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%lnd_nx, mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%lnd_ny, mpicom,pebcast=pebcast) + call shr_mpi_bcast(infodata%lnd_present, mpicom, pebcast=cmppe) + call shr_mpi_bcast(infodata%lnd_prognostic, mpicom, pebcast=cmppe) + call shr_mpi_bcast(infodata%lnd_nx, mpicom, pebcast=cmppe) + call shr_mpi_bcast(infodata%lnd_ny, mpicom, pebcast=cmppe) ! dead_comps is true if it's ever set to true deads = infodata%dead_comps - call shr_mpi_bcast(deads, mpicom,pebcast=pebcast) + call shr_mpi_bcast(deads, mpicom, pebcast=cmppe) if (deads .or. infodata%dead_comps) infodata%dead_comps = .true. endif if (rof2cpli) then - call shr_mpi_bcast(infodata%rof_present, mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%rofice_present, mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%rof_prognostic, mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%rof_nx, mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%rof_ny, mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%flood_present, mpicom,pebcast=pebcast) + call shr_mpi_bcast(infodata%rof_present, mpicom, pebcast=cmppe) + call shr_mpi_bcast(infodata%rofice_present, mpicom, pebcast=cmppe) + call shr_mpi_bcast(infodata%rof_prognostic, mpicom, pebcast=cmppe) + call shr_mpi_bcast(infodata%rof_nx, mpicom, pebcast=cmppe) + call shr_mpi_bcast(infodata%rof_ny, mpicom, pebcast=cmppe) + call shr_mpi_bcast(infodata%flood_present, mpicom, pebcast=cmppe) ! dead_comps is true if it's ever set to true deads = infodata%dead_comps - call shr_mpi_bcast(deads, mpicom,pebcast=pebcast) + call shr_mpi_bcast(deads, mpicom, pebcast=cmppe) if (deads .or. infodata%dead_comps) infodata%dead_comps = .true. endif if (ocn2cpli) then - call shr_mpi_bcast(infodata%ocn_present, mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%ocn_prognostic, mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%ocnrof_prognostic,mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%ocn_nx, mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%ocn_ny, mpicom,pebcast=pebcast) + call shr_mpi_bcast(infodata%ocn_present, mpicom, pebcast=cmppe) + call shr_mpi_bcast(infodata%ocn_prognostic, mpicom, pebcast=cmppe) + call shr_mpi_bcast(infodata%ocnrof_prognostic, mpicom, pebcast=cmppe) + call shr_mpi_bcast(infodata%ocn_nx, mpicom, pebcast=cmppe) + call shr_mpi_bcast(infodata%ocn_ny, mpicom, pebcast=cmppe) ! dead_comps is true if it's ever set to true deads = infodata%dead_comps - call shr_mpi_bcast(deads, mpicom,pebcast=pebcast) + call shr_mpi_bcast(deads, mpicom, pebcast=cmppe) if (deads .or. infodata%dead_comps) infodata%dead_comps = .true. endif if (ice2cpli) then - call shr_mpi_bcast(infodata%ice_present, mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%ice_prognostic, mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%iceberg_prognostic,mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%ice_nx, mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%ice_ny, mpicom,pebcast=pebcast) + call shr_mpi_bcast(infodata%ice_present, mpicom, pebcast=cmppe) + call shr_mpi_bcast(infodata%ice_prognostic, mpicom, pebcast=cmppe) + call shr_mpi_bcast(infodata%iceberg_prognostic, mpicom, pebcast=cmppe) + call shr_mpi_bcast(infodata%ice_nx, mpicom, pebcast=cmppe) + call shr_mpi_bcast(infodata%ice_ny, mpicom, pebcast=cmppe) ! dead_comps is true if it's ever set to true deads = infodata%dead_comps - call shr_mpi_bcast(deads, mpicom,pebcast=pebcast) + call shr_mpi_bcast(deads, mpicom, pebcast=cmppe) if (deads .or. infodata%dead_comps) infodata%dead_comps = .true. endif if (glc2cpli) then - call shr_mpi_bcast(infodata%glc_present, mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%glclnd_present, mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%glcocn_present, mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%glcice_present, mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%glc_prognostic, mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%glc_nx, mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%glc_ny, mpicom,pebcast=pebcast) + call shr_mpi_bcast(infodata%glc_present, mpicom, pebcast=cmppe) + call shr_mpi_bcast(infodata%glclnd_present, mpicom, pebcast=cmppe) + call shr_mpi_bcast(infodata%glcocn_present, mpicom, pebcast=cmppe) + call shr_mpi_bcast(infodata%glcice_present, mpicom, pebcast=cmppe) + call shr_mpi_bcast(infodata%glc_prognostic, mpicom, pebcast=cmppe) + call shr_mpi_bcast(infodata%glc_nx, mpicom, pebcast=cmppe) + call shr_mpi_bcast(infodata%glc_ny, mpicom, pebcast=cmppe) ! dead_comps is true if it's ever set to true deads = infodata%dead_comps - call shr_mpi_bcast(deads, mpicom,pebcast=pebcast) + call shr_mpi_bcast(deads, mpicom, pebcast=cmppe) if (deads .or. infodata%dead_comps) infodata%dead_comps = .true. endif if (wav2cpli) then - call shr_mpi_bcast(infodata%wav_present, mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%wav_prognostic, mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%wav_nx, mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%wav_ny, mpicom,pebcast=pebcast) + call shr_mpi_bcast(infodata%wav_present, mpicom, pebcast=cmppe) + call shr_mpi_bcast(infodata%wav_prognostic, mpicom, pebcast=cmppe) + call shr_mpi_bcast(infodata%wav_nx, mpicom, pebcast=cmppe) + call shr_mpi_bcast(infodata%wav_ny, mpicom, pebcast=cmppe) ! dead_comps is true if it's ever set to true deads = infodata%dead_comps - call shr_mpi_bcast(deads, mpicom,pebcast=pebcast) + call shr_mpi_bcast(deads, mpicom, pebcast=cmppe) if (deads .or. infodata%dead_comps) infodata%dead_comps = .true. endif + if (esp2cpli) then + call shr_mpi_bcast(infodata%esp_present, mpicom, pebcast=cmppe) + call shr_mpi_bcast(infodata%esp_prognostic, mpicom, pebcast=cmppe) + call seq_infodata_pauseresume_bcast(infodata, mpicom, pebcast=cmppe) + endif + if (cpl2i) then - call shr_mpi_bcast(infodata%atm_present, mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%atm_prognostic, mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%lnd_present, mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%lnd_prognostic, mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%rof_present, mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%rofice_present, mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%rof_prognostic, mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%flood_present, mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%ocn_present, mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%ocn_prognostic, mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%ocnrof_prognostic,mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%ice_present, mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%ice_prognostic, mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%iceberg_prognostic,mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%glc_present, mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%glclnd_present, mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%glcocn_present, mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%glcice_present, mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%glc_prognostic, mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%wav_present, mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%wav_prognostic, mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%dead_comps, mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%atm_aero, mpicom,pebcast=pebcast) + call shr_mpi_bcast(infodata%atm_present, mpicom, pebcast=cplpe) + call shr_mpi_bcast(infodata%atm_prognostic, mpicom, pebcast=cplpe) + call shr_mpi_bcast(infodata%lnd_present, mpicom, pebcast=cplpe) + call shr_mpi_bcast(infodata%lnd_prognostic, mpicom, pebcast=cplpe) + call shr_mpi_bcast(infodata%rof_present, mpicom, pebcast=cplpe) + call shr_mpi_bcast(infodata%rofice_present, mpicom, pebcast=cplpe) + call shr_mpi_bcast(infodata%rof_prognostic, mpicom, pebcast=cplpe) + call shr_mpi_bcast(infodata%flood_present, mpicom, pebcast=cplpe) + call shr_mpi_bcast(infodata%ocn_present, mpicom, pebcast=cplpe) + call shr_mpi_bcast(infodata%ocn_prognostic, mpicom, pebcast=cplpe) + call shr_mpi_bcast(infodata%ocnrof_prognostic, mpicom, pebcast=cplpe) + call shr_mpi_bcast(infodata%ice_present, mpicom, pebcast=cplpe) + call shr_mpi_bcast(infodata%ice_prognostic, mpicom, pebcast=cplpe) + call shr_mpi_bcast(infodata%iceberg_prognostic, mpicom, pebcast=cplpe) + call shr_mpi_bcast(infodata%glc_present, mpicom, pebcast=cplpe) + call shr_mpi_bcast(infodata%glclnd_present, mpicom, pebcast=cplpe) + call shr_mpi_bcast(infodata%glcocn_present, mpicom, pebcast=cplpe) + call shr_mpi_bcast(infodata%glcice_present, mpicom, pebcast=cplpe) + call shr_mpi_bcast(infodata%glc_prognostic, mpicom, pebcast=cplpe) + call shr_mpi_bcast(infodata%wav_present, mpicom, pebcast=cplpe) + call shr_mpi_bcast(infodata%wav_prognostic, mpicom, pebcast=cplpe) + call shr_mpi_bcast(infodata%esp_present, mpicom, pebcast=cplpe) + call shr_mpi_bcast(infodata%esp_prognostic, mpicom, pebcast=cplpe) + call shr_mpi_bcast(infodata%dead_comps, mpicom, pebcast=cplpe) + call shr_mpi_bcast(infodata%atm_aero, mpicom, pebcast=cplpe) endif + ! Run-time data exchanges if (atm2cplr) then - call shr_mpi_bcast(infodata%nextsw_cday, mpicom,pebcast=pebcast) + call shr_mpi_bcast(infodata%nextsw_cday, mpicom, pebcast=cmppe) endif if (ocn2cplr) then - call shr_mpi_bcast(infodata%precip_fact, mpicom,pebcast=pebcast) + call shr_mpi_bcast(infodata%precip_fact, mpicom, pebcast=cmppe) + endif + + if (esp2cplr) then + call seq_infodata_pauseresume_bcast(infodata, mpicom, pebcast=cmppe) endif if (cpl2r) then - call shr_mpi_bcast(infodata%nextsw_cday, mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%precip_fact, mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%glcrun_alarm, mpicom,pebcast=pebcast) - call shr_mpi_bcast(infodata%glc_g2lupdate, mpicom,pebcast=pebcast) + call shr_mpi_bcast(infodata%nextsw_cday, mpicom, pebcast=cplpe) + call shr_mpi_bcast(infodata%precip_fact, mpicom, pebcast=cplpe) + call shr_mpi_bcast(infodata%glcrun_alarm, mpicom, pebcast=cplpe) + call shr_mpi_bcast(infodata%glc_g2lupdate, mpicom, pebcast=cplpe) + call seq_infodata_pauseresume_bcast(infodata, mpicom, pebcast=cplpe) endif end subroutine seq_infodata_Exchange @@ -2754,33 +2699,6 @@ subroutine seq_infodata_Check( infodata ) call shr_sys_abort(subname//': vect_map invalid = '//trim(infodata%vect_map)) endif - if (associated(infodata%pause_resume)) then - if (infodata%pause_resume%atm_pause .and. ANY(len_trim(infodata%pause_resume%atm_resume) > 0)) then - call shr_sys_abort(subname//': If atm_pause is .true., then atm_resume should not be set') - end if - if (infodata%pause_resume%lnd_pause .and. ANY(len_trim(infodata%pause_resume%lnd_resume) > 0)) then - call shr_sys_abort(subname//': If lnd_pause is .true., then lnd_resume should not be set') - end if - if (infodata%pause_resume%ocn_pause .and. ANY(len_trim(infodata%pause_resume%ocn_resume) > 0)) then - call shr_sys_abort(subname//': If ocn_pause is .true., then ocn_resume should not be set') - end if - if (infodata%pause_resume%ice_pause .and. ANY(len_trim(infodata%pause_resume%ice_resume) > 0)) then - call shr_sys_abort(subname//': If ice_pause is .true., then ice_resume should not be set') - end if - if (infodata%pause_resume%rof_pause .and. ANY(len_trim(infodata%pause_resume%rof_resume) > 0)) then - call shr_sys_abort(subname//': If rof_pause is .true., then rof_resume should not be set') - end if - if (infodata%pause_resume%glc_pause .and. ANY(len_trim(infodata%pause_resume%glc_resume) > 0)) then - call shr_sys_abort(subname//': If glc_pause is .true., then glc_resume should not be set') - end if - if (infodata%pause_resume%wav_pause .and. ANY(len_trim(infodata%pause_resume%wav_resume) > 0)) then - call shr_sys_abort(subname//': If wav_pause is .true., then wav_resume should not be set') - end if - if (infodata%pause_resume%cpl_pause .and. (len_trim(infodata%pause_resume%cpl_resume) > 0)) then - call shr_sys_abort(subname//': If cpl_pause is .true., then cpl_resume should not be set') - end if - end if - END SUBROUTINE seq_infodata_Check !=============================================================================== @@ -2995,14 +2913,6 @@ SUBROUTINE seq_infodata_print( infodata ) write(logunit,F0L) subname,'glcrun_alarm = ', infodata%glcrun_alarm write(logunit,F0L) subname,'glc_g2lupdate = ', infodata%glc_g2lupdate if (associated(infodata%pause_resume)) then - write(logunit,F0L) subname,'atm_pause = ', infodata%pause_resume%atm_pause - write(logunit,F0L) subname,'lnd_pause = ', infodata%pause_resume%lnd_pause - write(logunit,F0L) subname,'ocn_pause = ', infodata%pause_resume%ocn_pause - write(logunit,F0L) subname,'ice_pause = ', infodata%pause_resume%ice_pause - write(logunit,F0L) subname,'glc_pause = ', infodata%pause_resume%glc_pause - write(logunit,F0S) subname,'rof_pause = ', infodata%pause_resume%rof_pause - write(logunit,F0L) subname,'wav_pause = ', infodata%pause_resume%wav_pause - write(logunit,F0L) subname,'cpl_pause = ', infodata%pause_resume%cpl_pause do ind = 1, num_inst_atm if (len_trim(infodata%pause_resume%atm_resume(ind)) > 0) then write(logunit,FIA) subname,'atm_resume(',ind,') = ', trim(infodata%pause_resume%atm_resume(ind)) diff --git a/src/drivers/mct/shr/seq_timemgr_mod.F90 b/src/drivers/mct/shr/seq_timemgr_mod.F90 index 8a297ce9e048..f985a53a4e2b 100644 --- a/src/drivers/mct/shr/seq_timemgr_mod.F90 +++ b/src/drivers/mct/shr/seq_timemgr_mod.F90 @@ -60,9 +60,11 @@ module seq_timemgr_mod public :: seq_timemgr_restartAlarmIsOn ! Is a restart alarm ringing public :: seq_timemgr_stopAlarmIsOn ! Is a stop alarm ringing public :: seq_timemgr_historyAlarmIsOn ! Is a history alarm ringing - - ! --- Data that belongs to the driver (but is here to avoid loops) - character(SHR_KIND_CS),public :: seq_timemgr_pause_component_list ! Pause - resume components + public :: seq_timemgr_pauseAlarmIsOn ! Is a pause alarm ringing + ! --- ESP components need to know about the state of other components + public :: seq_timemgr_pause_active ! Pause/resume is enabled + public :: seq_timemgr_pause_component_index ! Index of named component + public :: seq_timemgr_pause_component_active ! .true. is comp should pause ! ! PUBLIC PARAMETERS: @@ -208,6 +210,9 @@ module seq_timemgr_mod seq_timemgr_alarm_pause = 'seq_timemgr_alarm_pause ', & seq_timemgr_alarm_barrier = 'seq_timemgr_alarm_barrier ' + ! Active pause - resume components + logical, private :: pause_active(max_clocks) = .false. + type EClock_pointer ! needed for array of pointers type(ESMF_Clock),pointer :: EClock => null() end type EClock_pointer @@ -217,13 +222,15 @@ module seq_timemgr_mod type(ESMF_Alarm) :: EAlarm(max_clocks,max_alarms) ! array of clock alarms end type seq_timemgr_type - ! --- Private local data ------------------------------------------------------- + ! --- Private local data ---------------------------------------------------- + + type(ESMF_Calendar), target, save :: seq_timemgr_cal ! calendar + character(SHR_KIND_CL) ,save :: seq_timemgr_calendar ! calendar string + integer, parameter :: SecPerDay = 86400 ! Seconds per day - type(ESMF_Calendar), target, save :: seq_timemgr_cal ! calendar - character(SHR_KIND_CL) ,save :: seq_timemgr_calendar ! calendar string - logical :: seq_timemgr_end_restart ! write restarts at end of run? - logical, save :: seq_timemgr_setCalendar = .false. ! if calendar has been set - integer, parameter :: SecPerDay = 86400 ! Seconds per day + integer :: seq_timemgr_pause_sig_index ! Index of pause comp with smallest dt + logical :: seq_timemgr_esp_run_on_pause ! Run ESP component on pause cycle + logical :: seq_timemgr_end_restart ! write restarts at end of run? !=============================================================================== @@ -244,8 +251,7 @@ subroutine seq_timemgr_clockInit(SyncClock, nmlfile, restart, restart_file, pioi Eclock_rof, EClock_wav, Eclock_esp) ! !USES: - use pio, only : file_desc_T - use shr_string_mod, only : shr_string_toupper + use pio, only : file_desc_T use shr_file_mod, only : shr_file_getunit, shr_file_freeunit use shr_mpi_mod, only : shr_mpi_bcast use seq_io_read_mod @@ -280,10 +286,11 @@ subroutine seq_timemgr_clockInit(SyncClock, nmlfile, restart, restart_file, pioi type(ESMF_Time) :: StopTime1 ! Stop time type(ESMF_Time) :: StopTime2 ! Stop time type(ESMF_TimeInterval) :: TimeStep ! Clock time-step - type(ESMF_TimeInterval) :: AlarmInterval ! Alarm interval type(ESMF_CalKind_Flag) :: esmf_caltype ! local esmf calendar integer :: rc ! Return code - integer :: n ! index + integer :: n, i ! index + logical :: found + integer :: min_dt ! smallest time step integer :: dtime(max_clocks) ! time-step to use integer :: offset(max_clocks) ! run offset integer :: unitn ! i/o unit number @@ -334,8 +341,8 @@ subroutine seq_timemgr_clockInit(SyncClock, nmlfile, restart, restart_file, pioi integer(SHR_KIND_IN) :: wav_cpl_offset ! Wav coupling interval integer(SHR_KIND_IN) :: rof_cpl_offset ! Runoff coupling interval integer(SHR_KIND_IN) :: esp_cpl_offset ! Esp coupling interval + logical :: esp_run_on_pause ! Run ESP on pause cycle logical :: end_restart ! Write restart at end of run - integer(SHR_KIND_IN) :: nlUnit ! Namelist unit number integer(SHR_KIND_IN) :: ierr ! Return code character(len=*), parameter :: F0A = "(2A,A)" @@ -355,7 +362,7 @@ subroutine seq_timemgr_clockInit(SyncClock, nmlfile, restart, restart_file, pioi atm_cpl_offset, lnd_cpl_offset, ocn_cpl_offset, & ice_cpl_offset, glc_cpl_dt, glc_cpl_offset, & wav_cpl_dt, wav_cpl_offset, esp_cpl_dt, esp_cpl_offset, & - rof_cpl_dt, rof_cpl_offset, end_restart + rof_cpl_dt, rof_cpl_offset, esp_run_on_pause, end_restart !------------------------------------------------------------------------------- ! Notes: !------------------------------------------------------------------------------- @@ -426,6 +433,7 @@ subroutine seq_timemgr_clockInit(SyncClock, nmlfile, restart, restart_file, pioi rof_cpl_offset = 0 wav_cpl_offset = 0 esp_cpl_offset = 0 + esp_run_on_pause = .true. end_restart = .true. !--------------------------------------------------------------------------- @@ -534,6 +542,7 @@ subroutine seq_timemgr_clockInit(SyncClock, nmlfile, restart, restart_file, pioi write(logunit,F0A) trim(subname),' pause_option = ',trim(pause_option) write(logunit,F0I) trim(subname),' pause_n = ',pause_n write(logunit,F0A) trim(subname),' pause_component_list = ',trim(pause_component_list) + write(logunit,F0L) trim(subname),' esp_run_on_pause = ',esp_run_on_pause write(logunit,F0A) trim(subname),' history_option = ',trim(history_option) write(logunit,F0I) trim(subname),' history_n = ',history_n write(logunit,F0I) trim(subname),' history_ymd = ',history_ymd @@ -655,6 +664,7 @@ subroutine seq_timemgr_clockInit(SyncClock, nmlfile, restart, restart_file, pioi call shr_mpi_bcast( rof_cpl_offset, mpicom ) call shr_mpi_bcast( wav_cpl_offset, mpicom ) call shr_mpi_bcast( esp_cpl_offset, mpicom ) + call shr_mpi_bcast( esp_run_on_pause, mpicom ) call shr_mpi_bcast( end_restart, mpicom ) ! --- derive a couple things --- @@ -682,8 +692,60 @@ subroutine seq_timemgr_clockInit(SyncClock, nmlfile, restart, restart_file, pioi ! --- Initialize generic stuff --- seq_timemgr_calendar = shr_cal_calendarName(calendar) + seq_timemgr_esp_run_on_pause = esp_run_on_pause seq_timemgr_end_restart = end_restart - seq_timemgr_pause_component_list = pause_component_list + ! --- Figure out which components (if any) are doing pause this run + rc = 1 + i = 1 + if (trim(pause_component_list) == 'all') then + pause_active = .true. + else if (trim(pause_component_list) == 'none') then + pause_active = .false. + else + do + i = scan(trim(pause_component_list(rc:)), ':') - 1 + if ((i < 0) .and. (len_trim(pause_component_list) >= rc)) then + i = len_trim(pause_component_list(rc:)) + end if + if (i > 0) then + found = .false. + do n = 1, max_clocks + if (pause_component_list(rc:rc+i-1) == trim(seq_timemgr_clocks(n))) then + pause_active(n) = .true. + found = .true. + exit + end if + end do + ! Special case for cpl -- synonym for drv + if ((.not. found) .and. (pause_component_list(rc:rc+i-1) == 'cpl')) then + pause_active(seq_timemgr_nclock_drv) = .true. + found = .true. + end if + if (.not. found) then + call shr_sys_abort(subname//': unknown pause component, '//pause_component_list(rc:rc+i-1)) + end if + rc = rc + i + if (pause_component_list(rc:rc) == ':') then + rc = rc + 1 + end if + if (rc >= len_trim(pause_component_list)) then + exit + end if + else + exit + end if + end do + end if + if ( ANY(pause_active) .and. & + (trim(pause_option) /= seq_timemgr_optNONE) .and. & + (trim(pause_option) /= seq_timemgr_optNever)) then + do n = 1, max_clocks + if (pause_active(n)) then + write(logunit, '(4a)') subname, ': Pause active for ', & + trim(seq_timemgr_clocks(n)),' component' + end if + end do + end if ! --- Create the new calendar if not already set ------ if ( trim(seq_timemgr_calendar) == trim(seq_timemgr_noleap)) then @@ -722,12 +784,29 @@ subroutine seq_timemgr_clockInit(SyncClock, nmlfile, restart, restart_file, pioi dtime(seq_timemgr_nclock_drv) = maxval(dtime) dtime(seq_timemgr_nclock_drv) = minval(dtime) + ! --- For figuring pause cycle + min_dt = maxval(dtime) + seq_timemgr_pause_sig_index = -1 + do n = 1,max_clocks if ( mod(dtime(n),dtime(seq_timemgr_nclock_drv)) /= 0) then write(logunit,*) trim(subname),' ERROR: dtime inconsistent = ',dtime call shr_sys_abort( subname//' :coupling intervals not compatible' ) endif + if (pause_active(n) .and. (dtime(n) < min_dt)) then + min_dt = dtime(n) + seq_timemgr_pause_sig_index = n + end if enddo + if (ANY(pause_active)) then + if (seq_timemgr_pause_sig_index < 1) then + write(logunit, *) subname,"ERROR: No pause_sig_index even with active pause" + call shr_sys_abort(subname//"ERROR: No pause_sig_index even with active pause") + end if + else + ! Don't try to run ESP on non-existent pauses + seq_timemgr_esp_run_on_pause = .false. + end if ! --- Initialize clocks and alarms --- @@ -739,8 +818,9 @@ subroutine seq_timemgr_clockInit(SyncClock, nmlfile, restart, restart_file, pioi call seq_timemgr_alarmInit(SyncClock%ECP(n)%EClock, & EAlarm = SyncClock%EAlarm(n,seq_timemgr_nalarm_run), & - option = seq_timemgr_optNSeconds, & - opt_n = dtime(n), RefTime = CurrTime, & + option = seq_timemgr_optNSeconds, & + opt_n = dtime(n), & + RefTime = CurrTime, & alarmname = trim(seq_timemgr_alarm_run)) call seq_timemgr_alarmInit(SyncClock%ECP(n)%EClock, & @@ -757,7 +837,7 @@ subroutine seq_timemgr_clockInit(SyncClock, nmlfile, restart, restart_file, pioi option = seq_timemgr_optDate, & opt_ymd = stop_ymd, & opt_tod = stop_tod, & - RefTime = StartTime, & + RefTime = StartTime, & alarmname = trim(seq_timemgr_alarm_datestop)) call seq_timemgr_alarmInit(SyncClock%ECP(n)%EClock, & @@ -809,12 +889,21 @@ subroutine seq_timemgr_clockInit(SyncClock, nmlfile, restart, restart_file, pioi endif ! Set the pause option if pause/resume is active - call seq_timemgr_alarmInit(SyncClock%ECP(n)%EClock, & - EAlarm = SyncClock%EAlarm(n,seq_timemgr_nalarm_pause), & - option = pause_option, & - opt_n = pause_n, & - RefTime = StartTime, & - alarmname = trim(seq_timemgr_alarm_pause)) + if (pause_active(n)) then + call seq_timemgr_alarmInit(SyncClock%ECP(n)%EClock, & + EAlarm = SyncClock%EAlarm(n,seq_timemgr_nalarm_pause), & + option = pause_option, & + opt_n = pause_n, & + RefTime = CurrTime, & + alarmname = trim(seq_timemgr_alarm_pause)) + else + call seq_timemgr_alarmInit(SyncClock%ECP(n)%EClock, & + EAlarm = SyncClock%EAlarm(n,seq_timemgr_nalarm_pause), & + option = seq_timemgr_optNever, & + opt_n = -1, & + RefTime = StartTime, & + alarmname = trim(seq_timemgr_alarm_pause)) + endif enddo @@ -901,16 +990,6 @@ subroutine seq_timemgr_clockInit(SyncClock, nmlfile, restart, restart_file, pioi RefTime = OffsetTime, & alarmname = trim(seq_timemgr_alarm_wavrun)) - call ESMF_TimeIntervalSet( TimeStep, s=offset(seq_timemgr_nclock_esp), rc=rc ) - OffsetTime = CurrTime + TimeStep - call seq_timemgr_alarmInit(SyncClock%ECP(seq_timemgr_nclock_drv)%EClock, & - EAlarm = SyncClock%EAlarm(seq_timemgr_nclock_drv,seq_timemgr_nalarm_esprun), & - option = seq_timemgr_optNSeconds, & - opt_n = dtime(seq_timemgr_nclock_esp), & - RefTime = OffsetTime, & - alarmname = trim(seq_timemgr_alarm_esprun)) - - ! --- this is the glcrun alarm (there ^) offset by a -dtime of the driver call ESMF_TimeIntervalSet( TimeStep, s=offset(seq_timemgr_nclock_glc), rc=rc ) OffsetTime = CurrTime + TimeStep call ESMF_TimeIntervalSet( TimeStep, s=-offset(seq_timemgr_nclock_drv), rc=rc ) @@ -943,6 +1022,15 @@ subroutine seq_timemgr_clockInit(SyncClock, nmlfile, restart, restart_file, pioi RefTime = OffsetTime, & alarmname = trim(seq_timemgr_alarm_ocnnext)) + call ESMF_TimeIntervalSet( TimeStep, s=offset(seq_timemgr_nclock_esp), rc=rc ) + OffsetTime = CurrTime + TimeStep + call seq_timemgr_alarmInit(SyncClock%ECP(seq_timemgr_nclock_drv)%EClock, & + EAlarm = SyncClock%EAlarm(seq_timemgr_nclock_drv,seq_timemgr_nalarm_esprun), & + option = seq_timemgr_optNSeconds, & + opt_n = dtime(seq_timemgr_nclock_esp), & + RefTime = OffsetTime, & + alarmname = trim(seq_timemgr_alarm_esprun)) + end subroutine seq_timemgr_clockInit !=============================================================================== @@ -1008,9 +1096,6 @@ subroutine seq_timemgr_EClockGetData( EClock, curr_yr, curr_mon, curr_day, & integer(SHR_KIND_IN) :: ymd ! Date (YYYYMMDD) integer(SHR_KIND_IN) :: tod ! time of day (sec) integer(SHR_KIND_IN) :: ldtime ! local dtime - integer(SHR_KIND_IN) :: intyrs ! alarm variable - integer(SHR_KIND_IN) :: intmon ! alarm variable - integer(SHR_KIND_IN) :: intsec ! alarm variable integer(SHR_KIND_IN) :: days ! number of whole days in time interval integer(SHR_KIND_IN) :: seconds ! number of seconds in time interval integer(SHR_KIND_IN) :: acount ! number of valid alarms @@ -1205,6 +1290,16 @@ subroutine seq_timemgr_clockAdvance( SyncClock, force_stop, force_stop_ymd, forc call seq_timemgr_ESMFCodeCheck(rc, msg=subname//"Error from esp ESMF_ClockAdvance") endif + ! Special handling of ESP component if linked to pause cycles + if (seq_timemgr_esp_run_on_pause) then + ! We need to figure out if any pause clock is ringing + call seq_timemgr_alarmSetOff(SyncClock%ECP(seq_timemgr_nclock_drv)%EClock,seq_timemgr_alarm_esprun) + if (seq_timemgr_alarmIsOn(SyncClock%ECP(seq_timemgr_pause_sig_index)%EClock,seq_timemgr_alarm_pause)) then + call seq_timemgr_alarmSetOn(SyncClock%ECP(seq_timemgr_nclock_drv)%EClock,seq_timemgr_alarm_esprun) + end if + end if + + ! Special handling of restart alarm if end_restart is .true. if (seq_timemgr_end_restart) then do n = 1,max_clocks if (seq_timemgr_alarmIsOn(SyncClock%ECP(n)%EClock,seq_timemgr_alarm_stop) .or. & @@ -1254,7 +1349,6 @@ subroutine seq_timemgr_alarmInit( EClock, EAlarm, option, opt_n, opt_ymd, opt_to logical :: update_nextalarm ! update next alarm type(ESMF_Time) :: CurrTime ! Current Time type(ESMF_Time) :: NextAlarm ! Next restart alarm time - type(ESMF_Time) :: AltAlarm ! Alternate alarm time type(ESMF_TimeInterval) :: AlarmInterval ! Alarm interval !------------------------------------------------------------------------------- @@ -1562,7 +1656,6 @@ subroutine seq_timemgr_AlarmSetOn( EClock, alarmname) logical :: found logical :: set character(len=64) :: name - type(ESMF_Alarm),pointer :: EAlarm type(ESMF_Alarm),pointer :: EAlarm_list(:) integer(SHR_KIND_IN) :: AlarmCount ! Number of valid alarms @@ -1646,7 +1739,6 @@ subroutine seq_timemgr_AlarmSetOff( EClock, alarmname) logical :: found logical :: set character(len=64) :: name - type(ESMF_Alarm),pointer :: EAlarm type(ESMF_Alarm),pointer :: EAlarm_list(:) integer(SHR_KIND_IN) :: AlarmCount ! Number of valid alarms @@ -1728,10 +1820,8 @@ logical function seq_timemgr_alarmIsOn( EClock, alarmname) integer :: n integer :: rc logical :: found - logical :: set character(len=64) :: name type(ESMF_Time) :: ETime1, ETime2 - type(ESMF_Alarm),pointer :: EAlarm type(ESMF_Alarm),pointer :: EAlarm_list(:) integer(SHR_KIND_IN) :: AlarmCount ! Number of valid alarms @@ -1806,7 +1896,6 @@ logical function seq_timemgr_restartAlarmIsOn( EClock) !EOP !----- local ----- - integer :: rc character(len=*), parameter :: subname = '(seq_timemgr_restartAlarmIsOn) ' !------------------------------------------------------------------------------- @@ -1871,7 +1960,6 @@ logical function seq_timemgr_historyAlarmIsOn( EClock) !EOP !----- local ----- - integer :: rc character(len=*), parameter :: subname = '(seq_timemgr_historyAlarmIsOn) ' !------------------------------------------------------------------------------- @@ -1883,6 +1971,137 @@ logical function seq_timemgr_historyAlarmIsOn( EClock) end function seq_timemgr_historyAlarmIsOn +!=============================================================================== +!=============================================================================== +! !IROUTINE: seq_timemgr_pauseAlarmIsOn -- check if an alarm is ringing +! +! !DESCRIPTION: +! +! check if an alarm is ringing +! +! !INTERFACE: ------------------------------------------------------------------ + +logical function seq_timemgr_pauseAlarmIsOn( EClock) + + implicit none + +! !INPUT/OUTPUT PARAMETERS: + + type(ESMF_Clock) , intent(IN) :: EClock ! clock/alarm + +!EOP + + !----- local ----- + character(len=*), parameter :: subname = '(seq_timemgr_pauseAlarmIsOn) ' + +!------------------------------------------------------------------------------- +! Notes: +!------------------------------------------------------------------------------- + + seq_timemgr_pauseAlarmIsOn = & + seq_timemgr_alarmIsOn(EClock, alarmname=seq_timemgr_alarm_pause) + + end function seq_timemgr_pauseAlarmIsOn + +!=============================================================================== +!=============================================================================== +! !IROUTINE: seq_timemgr_pause_active -- Is pause/resume active this run? +! +! !DESCRIPTION: +! +! Return .true. if any component is configured for pause/resume +! +! !INTERFACE: ------------------------------------------------------------------ + + logical function seq_timemgr_pause_active() + +! !INPUT/OUTPUT PARAMETERS: + +!EOP + +!------------------------------------------------------------------------------- +! Notes: +!------------------------------------------------------------------------------- + + seq_timemgr_pause_active = ANY(pause_active) + + end function seq_timemgr_pause_active + +!=============================================================================== +!=============================================================================== +! !IROUTINE: seq_timemgr_pause_component_index -- return an index for a component +! +! !DESCRIPTION: +! +! Look up a component's internal index for faster processing +! +! !INTERFACE: ------------------------------------------------------------------ + + integer function seq_timemgr_pause_component_index(component_name) + +! !INPUT/OUTPUT PARAMETERS: + + character(len=*), intent(IN) :: component_name + +!EOP + + !----- local ----- + integer :: ind + character(len=*), parameter :: subname = '(seq_timemgr_pause_component_index) ' + +!------------------------------------------------------------------------------- +! Notes: +!------------------------------------------------------------------------------- + seq_timemgr_pause_component_index = 0 + do ind = 1, max_clocks + if (trim(component_name) == trim(seq_timemgr_clocks(ind))) then + seq_timemgr_pause_component_index = ind + exit + end if + end do + if (seq_timemgr_pause_component_index < 1) then + if (trim(component_name) == 'cpl') then + seq_timemgr_pause_component_index = seq_timemgr_nclock_drv + end if + end if + if (seq_timemgr_pause_component_index < 1) then + call shr_sys_abort(subname//': No index for component '//trim(component_name)) + end if + + end function seq_timemgr_pause_component_index + +!=============================================================================== +!=============================================================================== +! !IROUTINE: seq_timemgr_pause_component_active -- Check if component paused +! +! !DESCRIPTION: +! +! Return .true. if component is active in driver pause +! +! !INTERFACE: ------------------------------------------------------------------ + + logical function seq_timemgr_pause_component_active(component_index) + +! !INPUT/OUTPUT PARAMETERS: + + integer, intent(IN) :: component_index + +!EOP + + !----- local ----- + character(len=*), parameter :: subname = '(seq_timemgr_pause_component_active) ' + +!------------------------------------------------------------------------------- +! Notes: +!------------------------------------------------------------------------------- + + if ((component_index < 1) .or. (component_index > max_clocks)) then + call shr_sys_abort(subname//': component_index out of range') + end if + seq_timemgr_pause_component_active = pause_active(component_index) + + end function seq_timemgr_pause_component_active + !=============================================================================== !=============================================================================== ! !IROUTINE: seq_timemgr_ETimeInit -- Create ESMF_Time object based on YMD values From b5dc7b79b5d383377f8caa6d51314b463e13e2e4 Mon Sep 17 00:00:00 2001 From: Steve Goldhaber Date: Wed, 12 Apr 2017 22:47:07 -0600 Subject: [PATCH 016/219] Make sure ESP runs on pause for PRE test. Add missing units to driver buildnml --- scripts/lib/CIME/SystemTests/pre.py | 1 + src/drivers/mct/cime_config/buildnml | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/scripts/lib/CIME/SystemTests/pre.py b/scripts/lib/CIME/SystemTests/pre.py index 7917a24a9925..ceadb12641d6 100644 --- a/scripts/lib/CIME/SystemTests/pre.py +++ b/scripts/lib/CIME/SystemTests/pre.py @@ -50,6 +50,7 @@ def _case_two_setup(self): # Set up a pause/resume run self._case.set_value("STOP_OPTION", self._stopopt) self._case.set_value("STOP_N", self._stopn) + self._case.set_value("ESP_RUN_ON_PAUSE", "TRUE") if self._stopn > 3: pausen = 2 else: diff --git a/src/drivers/mct/cime_config/buildnml b/src/drivers/mct/cime_config/buildnml index 0296d662e8c4..da09544ddda0 100755 --- a/src/drivers/mct/cime_config/buildnml +++ b/src/drivers/mct/cime_config/buildnml @@ -31,6 +31,10 @@ def _get_time_in_seconds(time, unit): dmult = 30 * 24 * 3600 elif 'nday' in unit: dmult = 24 * 3600 + elif 'nhour' in unit: + dmult = 3600 + elif 'nminute' in unit: + dmult = 60 else: dmult = 1 From 65d8e336a500017b871daf313acc74be928ae497 Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Thu, 13 Apr 2017 08:43:22 -0700 Subject: [PATCH 017/219] some progress on cori --- config/cesm/machines/config_batch.xml | 2 +- config/cesm/machines/config_compilers.xml | 2 +- config/cesm/machines/config_machines.xml | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/config/cesm/machines/config_batch.xml b/config/cesm/machines/config_batch.xml index 419cd046ecbc..a1008a91d0dc 100644 --- a/config/cesm/machines/config_batch.xml +++ b/config/cesm/machines/config_batch.xml @@ -383,7 +383,7 @@ sbatch - -C knl + -C knl,quad,cache regular diff --git a/config/cesm/machines/config_compilers.xml b/config/cesm/machines/config_compilers.xml index f9890da41c4f..7d3de067ca76 100644 --- a/config/cesm/machines/config_compilers.xml +++ b/config/cesm/machines/config_compilers.xml @@ -566,7 +566,7 @@ using a fortran linker. -L$(NETCDF_DIR) -lnetcdff -Wl,--as-needed,-L$(NETCDF_DIR)/lib -lnetcdff -lnetcdf - -DHAVE_PAPI -DHAVE_SLASHPROC + -DHAVE_SLASHPROC -mkl -lmemkind -zmuldefs diff --git a/config/cesm/machines/config_machines.xml b/config/cesm/machines/config_machines.xml index 276e0cae774f..4c56bdd4f3ae 100644 --- a/config/cesm/machines/config_machines.xml +++ b/config/cesm/machines/config_machines.xml @@ -432,7 +432,7 @@ PrgEnv-cray - cce cce/8.5.4 + cce cce/8.5.7 PrgEnv-gnu @@ -535,7 +535,7 @@ PrgEnv-cray - cce cce/8.5.4 + cce cce/8.5.7 PrgEnv-gnu @@ -544,7 +544,7 @@ cray-memkind craype-mic-knl - papi/5.4.3.2 + craype craype/2.5.7 From 537938ffa30810425cdae9d1decce702b8d4bceb Mon Sep 17 00:00:00 2001 From: jayeshkrishna Date: Thu, 13 Apr 2017 13:22:46 -0500 Subject: [PATCH 018/219] Dont point to pio2 cmake modules by default Remove assumptions in the pio1 configure files about the existense of pio2 (pio2 cmake modules). The old code assumed that pio1 always built within CIME (and hence assumed that pio2 was available in a specific parent directory) Fixes #995 --- pio/CMakeLists.txt | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/pio/CMakeLists.txt b/pio/CMakeLists.txt index 9779dec1a816..f871ca245a40 100644 --- a/pio/CMakeLists.txt +++ b/pio/CMakeLists.txt @@ -9,7 +9,7 @@ CMAKE_MINIMUM_REQUIRED(VERSION 2.8.5) IF (USER_CMAKE_MODULE_PATH) SET (CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${USER_CMAKE_MODULE_PATH}) ELSE() - SET (CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/../../pio2/cmake") + SET (CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/cmake") ENDIF() find_file( TESTFILE NAMES TryCSizeOf.f90 PATHS ${CMAKE_MODULE_PATH} NO_DEFAULT_PATH) get_filename_component( TESTFILEPATH ${TESTFILE} PATH) @@ -29,9 +29,6 @@ IF(${WITH_CSIZEOF} STREQUAL FALSE) endif() # Netcdf is required - -SET (CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/../../pio2/cmake" ${CMAKE_MODULE_PATH}) - #SET (NETCDF_FIND_COMPONENTS F90) FIND_PACKAGE(NetCDF "4.3.3" COMPONENTS Fortran) IF (${NetCDF_Fortran_FOUND}) From 40a43331836eb7557784520758e016de1988fef6 Mon Sep 17 00:00:00 2001 From: Steve Goldhaber Date: Thu, 13 Apr 2017 12:28:42 -0600 Subject: [PATCH 019/219] Fixed pylint issue with unused output from cprnc --- scripts/lib/CIME/SystemTests/pre.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/lib/CIME/SystemTests/pre.py b/scripts/lib/CIME/SystemTests/pre.py index ceadb12641d6..3553f7500ce8 100644 --- a/scripts/lib/CIME/SystemTests/pre.py +++ b/scripts/lib/CIME/SystemTests/pre.py @@ -102,7 +102,7 @@ def run_phase(self): expect((len(restart_files_2) == 1), "Missing case2 restart file, %s", glob_str) rfile2 = restart_files_2[0] - ok, out = cprnc(comp, rfile1, rfile2, self._case, rundir2) + ok, out = cprnc(comp, rfile1, rfile2, self._case, rundir2) # pylint: disable=unused-variable logger.warning("CPRNC result for %s: %s"%(os.path.basename(rfile1), "PASS" if (ok == should_match) else "FAIL")) compare_ok = compare_ok and (should_match == ok) From 0930b79a4a4c8217eb1fe787a7ed892d4c6fec89 Mon Sep 17 00:00:00 2001 From: James Foucar Date: Thu, 13 Apr 2017 14:37:02 -0600 Subject: [PATCH 020/219] create_test was not handling user-selected projects correctly ... in the case where the test_root depended on PROJECT. --- scripts/lib/CIME/test_scheduler.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/scripts/lib/CIME/test_scheduler.py b/scripts/lib/CIME/test_scheduler.py index b3f977fc988f..315364b78a34 100644 --- a/scripts/lib/CIME/test_scheduler.py +++ b/scripts/lib/CIME/test_scheduler.py @@ -118,6 +118,9 @@ def __init__(self, test_names, test_data=None, else: self._project = project + # Needed in case default root depends on PROJECT + self._machobj.set_value("PROJECT", project) + # We will not use batch system if user asked for no_batch or if current # machine is not a batch machine self._no_batch = no_batch or not self._machobj.has_batch_system() From 00d9e48d015648c4fef9139b668a22f18adbfff7 Mon Sep 17 00:00:00 2001 From: Michael Levy Date: Thu, 13 Apr 2017 15:12:52 -0600 Subject: [PATCH 021/219] Update domain_b in nnsm map Previously, the generated maps were 1. nn: runoff -> ocean 2. smooth: ocean -> ocean 3. nnsm: runoff -> ocean The metadata from nn was copied to nnsm, and then the title attribute was overwritten. Now the maps are 1. nn: runoff -> ocean (coastal) 2. smooth: ocean (coastal) -> ocean (global) 3. nnsm: runoff -> ocean (global) So the domain_b attribute differs between nn and nnsm and also needs to be overwritten (it should map domain_b from the smooth map). --- tools/mapping/gen_mapping_files/runoff_to_ocn/src/main.F90 | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/mapping/gen_mapping_files/runoff_to_ocn/src/main.F90 b/tools/mapping/gen_mapping_files/runoff_to_ocn/src/main.F90 index a415e633ee41..e04319015797 100644 --- a/tools/mapping/gen_mapping_files/runoff_to_ocn/src/main.F90 +++ b/tools/mapping/gen_mapping_files/runoff_to_ocn/src/main.F90 @@ -208,6 +208,7 @@ PROGRAM main !--- create new map datatype to hold result of matrix-matrix multiply --- call map_dup(map_orig,map_new) map_new%title = trim(title) + map_new%domain_b = trim(map_smooth%domain_b) call map_matMatMult(map_orig,map_new,map_smooth) ! mult(A,B,S): B=S*A call mapsort_sort(map_new) call map_check(map_new) From af10dbe8884e5977af10f5d48db6a57c0cab4b8c Mon Sep 17 00:00:00 2001 From: James Foucar Date: Thu, 13 Apr 2017 16:12:39 -0600 Subject: [PATCH 022/219] New tool: get_key_commands --- scripts/Tools/get_key_commands | 63 +++++++++++++++++++++++++++++++ scripts/lib/CIME/XML/env_batch.py | 50 ++++++++++++++---------- scripts/lib/CIME/case.py | 5 ++- scripts/lib/CIME/case_setup.py | 13 +------ 4 files changed, 97 insertions(+), 34 deletions(-) create mode 100755 scripts/Tools/get_key_commands diff --git a/scripts/Tools/get_key_commands b/scripts/Tools/get_key_commands new file mode 100755 index 000000000000..32e67539846b --- /dev/null +++ b/scripts/Tools/get_key_commands @@ -0,0 +1,63 @@ +#!/usr/bin/env python + +""" +Script to query key CIME shell commands (mpirun and batch submission). + +To force a certain mpirun command, use: +./xmlchange MPIRUN_OVERRIDE $your_cmd + +To force a certain qsub command, use: +./xmlchange SUBMIT_OVERRIDE $your_cmd +""" + +from standard_script_setup import * + +from CIME.case import Case + +############################################################################### +def parse_command_line(args, description): +############################################################################### + parser = argparse.ArgumentParser( + usage="""\n%s [--verbose] +OR +%s --help +OR +%s --test + +\033[1mEXAMPLES:\033[0m + \033[1;32m# Run the tool \033[0m + > %s +""" % ((os.path.basename(args[0]), ) * 4), + description=description, + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument("caseroot", nargs="?", default=os.getcwd(), + help="Case directory to build") + + args = parser.parse_args(args[1:]) + + return args.caseroot + +############################################################################### +def _main_func(description): +############################################################################### + if "--test" in sys.argv: + test_results = doctest.testmod(verbose=True) + sys.exit(1 if test_results.failed > 0 else 0) + + caseroot = parse_command_line(sys.argv, description) + + logging.disable(logging.CRITICAL) + + with Case(caseroot, read_only=False) as case: + print "BATCH SUBMIT:" + job = "case.test" if case.get_value("TEST") else "case.run" + job_id_to_cmd = case.submit_jobs(dry_run=True, job=job) + for job_id, cmd in job_id_to_cmd: + print " ", job_id, "->", case.get_resolved_value(cmd) + print + print "MPIRUN:", case.get_resolved_value(case.get_mpirun_cmd()) + +if __name__ == "__main__": + _main_func(__doc__) diff --git a/scripts/lib/CIME/XML/env_batch.py b/scripts/lib/CIME/XML/env_batch.py index 88fcdfc8f210..96e78a5df643 100644 --- a/scripts/lib/CIME/XML/env_batch.py +++ b/scripts/lib/CIME/XML/env_batch.py @@ -281,7 +281,7 @@ def get_submit_args(self, case, job): return submitargs - def submit_jobs(self, case, no_batch=False, job=None, batch_args=None): + def submit_jobs(self, case, no_batch=False, job=None, batch_args=None, dry_run=False): alljobs = self.get_jobs() startindex = 0 jobs = [] @@ -299,16 +299,23 @@ def submit_jobs(self, case, no_batch=False, job=None, batch_args=None): if prereq is None or job == firstjob: prereq = True else: + if dry_run: + # Assume build is complete + prereq = prereq.replace("$BUILD_COMPLETE", "True") prereq = case.get_resolved_value(prereq) prereq = eval(prereq) except: expect(False,"Unable to evaluate prereq expression '%s' for job '%s'"%(self.get_value('prereq',subgroup=job), job)) + if prereq: - jobs.append((job,self.get_value('dependency', subgroup=job))) + jobs.append((job, self.get_value('dependency', subgroup=job))) + if self.batchtype == "cobalt": break depid = {} + jobcmds = [] + for job, dependency in jobs: if dependency is not None: deps = dependency.split() @@ -331,16 +338,21 @@ def submit_jobs(self, case, no_batch=False, job=None, batch_args=None): if slen == 0: jobid = None - logger.warn("job is %s"%job) - depid[job] = self.submit_single_job(case, job, jobid, no_batch=no_batch, batch_args=batch_args) + logger.warn("job is %s" % job) + result = self._submit_single_job(case, job, jobid, no_batch=no_batch, batch_args=batch_args, dry_run=dry_run) + batch_job_id = str(alljobs.index(job)) if dry_run else result + depid[job] = batch_job_id + jobcmds.append( (job, result) ) if self.batchtype == "cobalt": break - return sorted(list(depid.values())) + if dry_run: + return jobcmds + else: + return sorted(list(depid.values())) - def submit_single_job(self, case, job, depid=None, no_batch=False, batch_args=None): + def _submit_single_job(self, case, job, depid=None, no_batch=False, batch_args=None, dry_run=False): logger.warn("Submit job %s"%job) - caseroot = case.get_value("CASEROOT") batch_system = self.get_value("BATCH_SYSTEM", subgroup=None) if batch_system is None or batch_system == "none" or no_batch: # Import here to avoid circular include @@ -351,14 +363,9 @@ def submit_single_job(self, case, job, depid=None, no_batch=False, batch_args=No logger.info("Starting job script %s" % job) - # Hack until all testcases are ported to python - testcase = case.get_value("TESTCASE") - cimeroot = get_cime_root() - testscript = os.path.join(cimeroot, "scripts", "Testing", "Testcases", "%s_script" % testcase) - if job == "case.test" and testcase is not None and os.path.exists(testscript): - run_cmd_no_fail("%s --caseroot %s" % (os.path.join(".", job), caseroot)) - else: - # This is what we want longterm + # This is what we want longterm + function_name = job.replace(".", "_") + if not dry_run: function_name = job.replace(".", "_") locals()[function_name](case) @@ -383,11 +390,14 @@ def submit_single_job(self, case, job, depid=None, no_batch=False, batch_args=No if string is not None: submitcmd += string + " " - logger.info("Submitting job script %s"%submitcmd) - output = run_cmd_no_fail(submitcmd, combine_output=True) - jobid = self.get_job_id(output) - logger.info("Submitted job id is %s"%jobid) - return jobid + if dry_run: + return submitcmd + else: + logger.info("Submitting job script %s"%submitcmd) + output = run_cmd_no_fail(submitcmd, combine_output=True) + jobid = self.get_job_id(output) + logger.info("Submitted job id is %s"%jobid) + return jobid def get_batch_system_type(self): nodes = self.get_nodes("batch_system") diff --git a/scripts/lib/CIME/case.py b/scripts/lib/CIME/case.py index ca727570fa78..1426f5a2a1b3 100644 --- a/scripts/lib/CIME/case.py +++ b/scripts/lib/CIME/case.py @@ -862,6 +862,7 @@ def _create_caseroot_tools(self): os.path.join(toolsdir, "lt_archive.sh"), os.path.join(toolsdir, "getTiming"), os.path.join(toolsdir, "save_provenance"), + os.path.join(toolsdir, "get_key_commands"), os.path.join(machines_dir,"Makefile"), os.path.join(machines_dir,"mkSrcfiles"), os.path.join(machines_dir,"mkDepends")] @@ -1062,9 +1063,9 @@ def create_clone(self, newcase, keepexe=False, mach_dir=None, project=None, cime return newcase - def submit_jobs(self, no_batch=False, job=None, batch_args=None): + def submit_jobs(self, no_batch=False, job=None, batch_args=None, dry_run=False): env_batch = self.get_env('batch') - return env_batch.submit_jobs(self, no_batch=no_batch, job=job, batch_args=batch_args) + return env_batch.submit_jobs(self, no_batch=no_batch, job=job, batch_args=batch_args, dry_run=dry_run) def get_mpirun_cmd(self, job="case.run"): env_mach_specific = self.get_env('mach_specific') diff --git a/scripts/lib/CIME/case_setup.py b/scripts/lib/CIME/case_setup.py index 94f3fccec876..f527665e17ab 100644 --- a/scripts/lib/CIME/case_setup.py +++ b/scripts/lib/CIME/case_setup.py @@ -165,10 +165,7 @@ def _case_setup_impl(case, caseroot, clean=False, test_mode=False, reset=False, input_batch_script = os.path.join(case.get_value("MACHDIR"), env_batch.get_value('template', subgroup=job)) if job == "case.test" and testcase is not None and not test_mode: logger.info("Writing %s script" % job) - testscript = os.path.join(cimeroot, "scripts", "Testing", "Testcases", "%s_script" % testcase) - # Short term fix to be removed when csh tests are removed - if not os.path.exists(testscript): - env_batch.make_batch_script(input_batch_script, job, case, pestot, tasks_per_node, num_nodes, thread_count) + env_batch.make_batch_script(input_batch_script, job, case, pestot, tasks_per_node, num_nodes, thread_count) elif job != "case.test": logger.info("Writing %s script from input template %s" % (job, input_batch_script)) env_batch.make_batch_script(input_batch_script, job, case, pestot, tasks_per_node, num_nodes, thread_count) @@ -203,14 +200,6 @@ def _case_setup_impl(case, caseroot, clean=False, test_mode=False, reset=False, logger.info("If an old case build already exists, might want to run \'case.build --clean\' before building") - # Create test script if appropriate - # Short term fix to be removed when csh tests are removed - if os.path.exists("env_test.xml"): - if not os.path.exists("case.test"): - logger.info("Starting testcase.setup") - run_cmd_no_fail("./testcase.setup -caseroot %s" % caseroot) - logger.info("Finished testcase.setup") - # Some tests need namelists created here (ERP) - so do this if are in test mode if test_mode or get_model() == "acme": logger.info("Generating component namelists as part of setup") From 116e1fcb4ad1d94f3b908f76aa670b903f17eb6a Mon Sep 17 00:00:00 2001 From: Chris Fischer Date: Fri, 14 Apr 2017 09:46:20 -0600 Subject: [PATCH 023/219] Update ChangeLog --- ChangeLog | 139 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 139 insertions(+) diff --git a/ChangeLog b/ChangeLog index e26826d94378..d30b0f50fcae 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,5 +1,144 @@ ====================================================================== +Originator: Chris Fischer +Date: 4-14-2017 +Tag: cime5.3.0-alpha.07 +Answer Changes: None +Tests on various PRs: by-hand, code_checker, pylint, scripts_regression_tests, cime_developer with gnu compiler on cheyenne, + All gnu debug tests in CESM test suites, create B compset with ww3 at T31 resolution, J_TestCreateNewcase, +Dependencies: + +Brief Summary: + -Update domain_b in nnsm map + -Fix bug where create_test was not handling user-selected projects correctly in the case where the test_root depended on PROJECT. + -HOMME Improvement + -Unit testing: Determine machine-specific settings automatically + -Mapping/update runoff to ocn + -Fixes an issue with --user-compset flag + -Fixes for cheyenne issue + -Validate the case and test_id arguments + -Add more checks for gnu in debug mode. + -Avoid a problem with pio_typename in clone with keepexe. + -Fix blues NETCDFROOT unresolved error. + -Fix TOTAL_CORES. + -Refactor BUILD_THREADED to match documentation. + -Refactor run_cmd to handle file output streams more easily. + -More-robust way of getting path to create_test. + -Better content in TestStatus.log file. + -Improved formatting/content of TestStatus.log + -Better handling of poorly implemented batch systems. + -Fix atm grid match for T31 atm2wav map. + -Change flag for answer to create_newcase + -Fix bug in xmlquery, add regr test to catch in future + -Improve error message when user forgets --user-compset. + -Cleanup TEST_ROOT if everything works. + -Reduce newlines in CaseStatus. + +User interface changes: + -BUILD_THREADED behaves differently + -for create_newcase, --answer becomes --handle-preexisting-dirs + -Formatting changes to CaseStatus + +PR summary: git log --oneline --first-parent [previous_tag]..master +1f4c4e9 Merge pull request #1349 from mnlevy1981/mapping/runoff_metadata_bugfix +7664249 Merge pull request #1348 from ESMCI/jgfouca/fix_custom_proj_test_root +6156e0a Merge pull request #1346 from ESMCI/jgfouca/homme_improvement +3779ec1 Merge pull request #1343 from billsacks/run_tests_defaults +5074c4a Merge pull request #1280 from mnlevy1981/mapping/update_runoff_to_ocn +60bfc01 Merge pull request #1341 from jedwards4b/user-compset +a0d0f0e Update license of GPTL +5e79b04 Merge pull request #1340 from jedwards4b/cheyenne_fixes +37aac0f Merge pull request #1337 from jedwards4b/validate_case_and_test_id_args +5edf3fa Merge pull request #1339 from billsacks/gnu_debug_checks +82b6181 Merge pull request #1305 from jedwards4b/clone_keepexe_and_pio_typename +4d62d80 Merge pull request #1333 from ESMCI/sarich/fix-blues-netcdfroot +12bc9b3 Merge pull request #1336 from ESMCI/jgfouca/fix_total_cores +1adf8d1 Merge pull request #1335 from ESMCI/jgfouca/cleanup_build_threaded +315e849 Merge pull request #1326 from ESMCI/jgfouca/refactor_run_cmd +997ec60 Merge pull request #1321 from ESMCI/jgfouca/cime_bisect_robustness +cb7bd10 Merge pull request #1317 from ESMCI/jgfouca/better_testlog_content +7fb258b Merge pull request #1316 from ESMCI/jgfouca/handle_titan_batch_errs +2b4f7fe Merge pull request #1320 from billsacks/t31_wav_map +7370b02 Merge pull request #1311 from ESMCI/jgfouca/better_name_for_answer +b14ebe0 Merge pull request #1312 from ESMCI/jgfouca/fix_xmlquery_listall +f741180 Merge pull request #1314 from billsacks/user_compset_errmsg +243fafa Merge pull request #1310 from ESMCI/jgfouca/better_regr_cleanup +a4c7ab7 Merge pull request #1307 from ESMCI/jgfouca/remove_useless_newlines_from_casestatus + + +Modified files: git diff --name-status [previous_tag] + +M README.unit_testing +M config/acme/machines/config_machines.xml +M config/cesm/config_grids.xml +M config/cesm/machines/config_compilers.xml +M config/cesm/machines/config_machines.xml +M config/cesm/machines/config_pio.xml +M config/xml_schemas/config_machines.xsd +M scripts/Tools/cime_bisect +M scripts/Tools/xmlquery +M scripts/create_clone +M scripts/create_newcase +M scripts/create_test +M scripts/lib/CIME/SystemTests/erp.py +M scripts/lib/CIME/SystemTests/homme.py +M scripts/lib/CIME/SystemTests/system_tests_common.py +M scripts/lib/CIME/SystemTests/system_tests_compare_two.py +M scripts/lib/CIME/XML/compsets.py +M scripts/lib/CIME/XML/entry_id.py +M scripts/lib/CIME/XML/env_base.py +M scripts/lib/CIME/XML/env_batch.py +M scripts/lib/CIME/XML/env_mach_specific.py +M scripts/lib/CIME/XML/machines.py +M scripts/lib/CIME/aprun.py +M scripts/lib/CIME/build.py +M scripts/lib/CIME/case.py +M scripts/lib/CIME/case_run.py +M scripts/lib/CIME/case_setup.py +M scripts/lib/CIME/case_st_archive.py +M scripts/lib/CIME/hist_utils.py +M scripts/lib/CIME/preview_namelists.py +M scripts/lib/CIME/provenance.py +M scripts/lib/CIME/test_scheduler.py +M scripts/lib/CIME/utils.py +M scripts/lib/CIME/wait_for_tests.py +M scripts/tests/scripts_regression_tests.py +M src/drivers/mct/cime_config/config_component.xml +M src/externals/pio1/timing/COPYING +M src/externals/pio2/src/gptl/COPYING +M src/share/timing/COPYING +M src/share/util/mct_mod.F90 +M tools/mapping/gen_mapping_files/runoff_to_ocn/INSTALL +M tools/mapping/gen_mapping_files/runoff_to_ocn/README +A tools/mapping/gen_mapping_files/runoff_to_ocn/map_r05_to_gx1v7_nnsm_e1000r300.nml +A tools/mapping/gen_mapping_files/runoff_to_ocn/map_r05_to_gx3v7_nnsm_e1000r500.nml +A tools/mapping/gen_mapping_files/runoff_to_ocn/map_rx1_to_gx1v7_nnsm_e1000r300.nml +A tools/mapping/gen_mapping_files/runoff_to_ocn/map_rx1_to_gx3v7_nnsm_e1000r500.nml +A tools/mapping/gen_mapping_files/runoff_to_ocn/map_wr50a_to_ar9v4_nnsm_e1000r300.nml +A tools/mapping/gen_mapping_files/runoff_to_ocn/map_wr50a_to_gx3v7_nnsm_e1000r300.nml +A tools/mapping/gen_mapping_files/runoff_to_ocn/maps/.gitignore +A tools/mapping/gen_mapping_files/runoff_to_ocn/ncl/merge_mapping_files.ncl +A tools/mapping/gen_mapping_files/runoff_to_ocn/run_merge_mapping_files.sh +D tools/mapping/gen_mapping_files/runoff_to_ocn/runoff_map.nml +D tools/mapping/gen_mapping_files/runoff_to_ocn/runoff_map_r05_gx3v7.nml +D tools/mapping/gen_mapping_files/runoff_to_ocn/runoff_map_rx1_gx1v6.nml +D tools/mapping/gen_mapping_files/runoff_to_ocn/runoff_map_tx01.nml +D tools/mapping/gen_mapping_files/runoff_to_ocn/runoff_map_wr50a_ar9v4.nml +D tools/mapping/gen_mapping_files/runoff_to_ocn/runoff_map_wr50a_gx3v7.nml +M tools/mapping/gen_mapping_files/runoff_to_ocn/src/Makefile +M tools/mapping/gen_mapping_files/runoff_to_ocn/src/fixroff_mod.F90 +M tools/mapping/gen_mapping_files/runoff_to_ocn/src/kind_mod.F90 +M tools/mapping/gen_mapping_files/runoff_to_ocn/src/main.F90 +M tools/mapping/gen_mapping_files/runoff_to_ocn/src/map_mod.F90 +M tools/mapping/gen_mapping_files/runoff_to_ocn/src/mapsort_mod.F90 +M tools/mapping/gen_mapping_files/runoff_to_ocn/src/shr_kind_mod.F90 +M tools/mapping/gen_mapping_files/runoff_to_ocn/src/shr_sys_mod.F90 +M tools/mapping/gen_mapping_files/runoff_to_ocn/src/shr_timer_mod.F90 +M tools/mapping/gen_mapping_files/runoff_to_ocn/src/smooth_mod.F90 +M tools/unit_testing/run_tests.py + +====================================================================== + Originator:Chris Fischer Date: 4-4-2017 Tag: cime5.3.0-alpha.06 From 162e1d53f6821042633ab80e719172091cea83d1 Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Fri, 14 Apr 2017 08:50:56 -0700 Subject: [PATCH 024/219] updates for cori-knl --- config/cesm/machines/config_compilers.xml | 4 ++++ config/cesm/machines/config_machines.xml | 1 + 2 files changed, 5 insertions(+) diff --git a/config/cesm/machines/config_compilers.xml b/config/cesm/machines/config_compilers.xml index 7d3de067ca76..2c0848f1d519 100644 --- a/config/cesm/machines/config_compilers.xml +++ b/config/cesm/machines/config_compilers.xml @@ -549,6 +549,8 @@ using a fortran linker. -mkl + + FALSE @@ -571,6 +573,8 @@ using a fortran linker. -mkl -lmemkind -zmuldefs + + FALSE diff --git a/config/cesm/machines/config_machines.xml b/config/cesm/machines/config_machines.xml index 4c56bdd4f3ae..4b0044680cec 100644 --- a/config/cesm/machines/config_machines.xml +++ b/config/cesm/machines/config_machines.xml @@ -491,6 +491,7 @@ --label -n $TOTALPES + -c 4 --cpu-bind=cores From 9240cf287ebbec7868192263deb95516bc864876 Mon Sep 17 00:00:00 2001 From: Chris Fischer Date: Fri, 14 Apr 2017 10:02:48 -0600 Subject: [PATCH 025/219] Update ChangeLog --- ChangeLog | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/ChangeLog b/ChangeLog index d30b0f50fcae..67b089d6b5b5 100644 --- a/ChangeLog +++ b/ChangeLog @@ -4,13 +4,17 @@ Originator: Chris Fischer Date: 4-14-2017 Tag: cime5.3.0-alpha.07 Answer Changes: None -Tests on various PRs: by-hand, code_checker, pylint, scripts_regression_tests, cime_developer with gnu compiler on cheyenne, - All gnu debug tests in CESM test suites, create B compset with ww3 at T31 resolution, J_TestCreateNewcase, +Tests on various PRs: by-hand, code_checker, pylint, scripts_regression_tests, + cime_developer with gnu compiler on cheyenne, All gnu debug tests in CESM + test suites, create B compset with ww3 at T31 resolution, + J_TestCreateNewcase, some cesm tests on cori-knl Dependencies: Brief Summary: + -Updates to get going on cori-knl -Update domain_b in nnsm map - -Fix bug where create_test was not handling user-selected projects correctly in the case where the test_root depended on PROJECT. + -Fix bug where create_test was not handling user-selected projects correctly + in the case where the test_root depended on PROJECT. -HOMME Improvement -Unit testing: Determine machine-specific settings automatically -Mapping/update runoff to ocn @@ -40,13 +44,13 @@ User interface changes: -Formatting changes to CaseStatus PR summary: git log --oneline --first-parent [previous_tag]..master +30243ac Merge pull request #1351 from jedwards4b/cori_update 1f4c4e9 Merge pull request #1349 from mnlevy1981/mapping/runoff_metadata_bugfix 7664249 Merge pull request #1348 from ESMCI/jgfouca/fix_custom_proj_test_root 6156e0a Merge pull request #1346 from ESMCI/jgfouca/homme_improvement 3779ec1 Merge pull request #1343 from billsacks/run_tests_defaults 5074c4a Merge pull request #1280 from mnlevy1981/mapping/update_runoff_to_ocn 60bfc01 Merge pull request #1341 from jedwards4b/user-compset -a0d0f0e Update license of GPTL 5e79b04 Merge pull request #1340 from jedwards4b/cheyenne_fixes 37aac0f Merge pull request #1337 from jedwards4b/validate_case_and_test_id_args 5edf3fa Merge pull request #1339 from billsacks/gnu_debug_checks @@ -67,10 +71,10 @@ a4c7ab7 Merge pull request #1307 from ESMCI/jgfouca/remove_useless_newlines_from Modified files: git diff --name-status [previous_tag] - M README.unit_testing M config/acme/machines/config_machines.xml M config/cesm/config_grids.xml +M config/cesm/machines/config_batch.xml M config/cesm/machines/config_compilers.xml M config/cesm/machines/config_machines.xml M config/cesm/machines/config_pio.xml From 8ce40ab71bc8fa9f9ff783efb2ede78093177527 Mon Sep 17 00:00:00 2001 From: Bill Sacks Date: Fri, 14 Apr 2017 10:07:13 -0600 Subject: [PATCH 026/219] Modify some documentation of required settings for mid-year restarts with CISM --- src/drivers/mct/cime_config/config_component.xml | 7 +++---- src/drivers/mct/cime_config/config_component_cesm.xml | 3 +++ 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/src/drivers/mct/cime_config/config_component.xml b/src/drivers/mct/cime_config/config_component.xml index 75d815898b91..b1572e307257 100644 --- a/src/drivers/mct/cime_config/config_component.xml +++ b/src/drivers/mct/cime_config/config_component.xml @@ -303,7 +303,7 @@ run_begin_stop_restart env_run.xml - Sets the run length along with STOP_N and STOP_DATE (must be nyear(s) for _GLC compsets for restarts to work properly). + Sets the run length along with STOP_N and STOP_DATE @@ -335,8 +335,7 @@ run_begin_stop_restart env_run.xml - sets frequency of model restart writes (same options as STOP_OPTION) (must be nyear(s) for _GLC compsets) - (must be nyear(s) for _GLC compsets) + sets frequency of model restart writes (same options as STOP_OPTION) @@ -368,7 +367,7 @@ run_begin_stop_restart env_run.xml - Sets the pause frequency along with PAUSE_N (must be nyear(s) for _GLC compsets for restarts to resume properly). + Sets the pause frequency along with PAUSE_N diff --git a/src/drivers/mct/cime_config/config_component_cesm.xml b/src/drivers/mct/cime_config/config_component_cesm.xml index cf424aa34646..d6b93e4e6bf9 100644 --- a/src/drivers/mct/cime_config/config_component_cesm.xml +++ b/src/drivers/mct/cime_config/config_component_cesm.xml @@ -215,6 +215,9 @@ Setting GLC_AVG_PERIOD to 'glc_coupling_period' means that the averaging is done exactly when the GLC is called (governed by GLC_NCPL). + + IMPORTANT: In order to restart mid-year when running with CISM, you MUST specify GLC_AVG_PERIOD = 'yearly'. + If using GLC_AVG_PERIOD = 'glc_coupling_period' with CISM, you can only restart on year boundaries. From 1a1b4757ffbece598f2d79a7f81d9ca283bbcc40 Mon Sep 17 00:00:00 2001 From: Steve Goldhaber Date: Fri, 14 Apr 2017 12:33:27 -0500 Subject: [PATCH 027/219] Remove unused variable, modify initialization to clarify purpose. Move utility to turn time interval into seconds from driver buildnml to utils. Change PRE test to coarse grid for speedup. --- config/config_tests.xml | 2 ++ scripts/lib/CIME/SystemTests/pre.py | 5 ++--- scripts/lib/CIME/utils.py | 19 +++++++++++++++++++ scripts/lib/update_acme_tests.py | 2 +- src/drivers/mct/cime_config/buildnml | 22 ++-------------------- 5 files changed, 26 insertions(+), 24 deletions(-) diff --git a/config/config_tests.xml b/config/config_tests.xml index 3670b47b7e69..7532b2509030 100644 --- a/config/config_tests.xml +++ b/config/config_tests.xml @@ -158,6 +158,8 @@ LAR long term archive test DAE data assimilation test: non answer changing +PRE pause-resume test: by default a BFB test of pause-resume cycling + ====================================================================== Other component-specific tests ====================================================================== diff --git a/scripts/lib/CIME/SystemTests/pre.py b/scripts/lib/CIME/SystemTests/pre.py index 3553f7500ce8..5a9e8644f650 100644 --- a/scripts/lib/CIME/SystemTests/pre.py +++ b/scripts/lib/CIME/SystemTests/pre.py @@ -33,9 +33,8 @@ def __init__(self, case): run_two_suffix='pr', run_one_description='no pause/resume', run_two_description='pause/resume') - self._stopopt = 'ndays' - self._stopn = 5 - self._pausediv = 5 # Number of pause cycles per run + self._stopopt = '' + self._stopn = 0 ########################################################################### def _case_one_setup(self): diff --git a/scripts/lib/CIME/utils.py b/scripts/lib/CIME/utils.py index 46ca148024f0..bce74cf9f362 100644 --- a/scripts/lib/CIME/utils.py +++ b/scripts/lib/CIME/utils.py @@ -573,6 +573,25 @@ def get_timestamp(timestamp_format="%Y%m%d_%H%M%S", utc_time=False): time_tuple = time.localtime() return time.strftime(timestamp_format, time_tuple) +def get_time_in_seconds(time, unit): + """ + Convert a time from 'unit' to seconds + """ + if 'nyear' in unit: + dmult = 365 * 24 * 3600 + elif 'nmonth' in unit: + dmult = 30 * 24 * 3600 + elif 'nday' in unit: + dmult = 24 * 3600 + elif 'nhour' in unit: + dmult = 3600 + elif 'nminute' in unit: + dmult = 60 + else: + dmult = 1 + + return dmult * time + def get_project(machobj=None): """ Hierarchy for choosing PROJECT: diff --git a/scripts/lib/update_acme_tests.py b/scripts/lib/update_acme_tests.py index cdfd7c670733..e7592980beec 100644 --- a/scripts/lib/update_acme_tests.py +++ b/scripts/lib/update_acme_tests.py @@ -49,7 +49,7 @@ "SMS_D_Ln9.f19_g16_rx1.A", "DAE.f19_f19.A", "SMS.T42_T42.S", - "PRE.f19_f19.ADESP") + "PRE.f45_g37_rx1.ADESP") ), # diff --git a/src/drivers/mct/cime_config/buildnml b/src/drivers/mct/cime_config/buildnml index da09544ddda0..94c61cdc6018 100755 --- a/src/drivers/mct/cime_config/buildnml +++ b/src/drivers/mct/cime_config/buildnml @@ -16,30 +16,12 @@ from standard_script_setup import * from CIME.case import Case from CIME.nmlgen import NamelistGenerator from CIME.utils import expect -from CIME.utils import run_cmd_no_fail, get_model +from CIME.utils import run_cmd_no_fail, get_model, get_time_in_seconds from CIME.buildnml import create_namelist_infile, parse_input from CIME.XML.files import Files logger = logging.getLogger(__name__) -############################################################################### -def _get_time_in_seconds(time, unit): -############################################################################### - if 'nyear' in unit: - dmult = 365 * 24 * 3600 - elif 'nmonth' in unit: - dmult = 30 * 24 * 3600 - elif 'nday' in unit: - dmult = 24 * 3600 - elif 'nhour' in unit: - dmult = 3600 - elif 'nminute' in unit: - dmult = 60 - else: - dmult = 1 - - return dmult * time - ############################################################################### def _create_drv_namelists(case, infile, confdir, nmlgen, files): ############################################################################### @@ -179,7 +161,7 @@ def _create_drv_namelists(case, infile, confdir, nmlgen, files): if 'nstep' in pauseo: esp_time = mindt else: - esp_time = _get_time_in_seconds(pausen, pauseo) + esp_time = get_time_in_seconds(pausen, pauseo) nmlgen.set_value('esp_cpl_dt', value=esp_time) # End if pause is active From 1d3974659361a7359ceee472272a27f94eb08475 Mon Sep 17 00:00:00 2001 From: Steve Goldhaber Date: Fri, 14 Apr 2017 11:54:03 -0600 Subject: [PATCH 028/219] Fixed pylint issue --- scripts/lib/CIME/utils.py | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/scripts/lib/CIME/utils.py b/scripts/lib/CIME/utils.py index bce74cf9f362..e370435fb7c2 100644 --- a/scripts/lib/CIME/utils.py +++ b/scripts/lib/CIME/utils.py @@ -573,25 +573,6 @@ def get_timestamp(timestamp_format="%Y%m%d_%H%M%S", utc_time=False): time_tuple = time.localtime() return time.strftime(timestamp_format, time_tuple) -def get_time_in_seconds(time, unit): - """ - Convert a time from 'unit' to seconds - """ - if 'nyear' in unit: - dmult = 365 * 24 * 3600 - elif 'nmonth' in unit: - dmult = 30 * 24 * 3600 - elif 'nday' in unit: - dmult = 24 * 3600 - elif 'nhour' in unit: - dmult = 3600 - elif 'nminute' in unit: - dmult = 60 - else: - dmult = 1 - - return dmult * time - def get_project(machobj=None): """ Hierarchy for choosing PROJECT: @@ -807,6 +788,25 @@ def convert_to_babylonian_time(seconds): return "%02d:%02d:%02d" % (hours, minutes, seconds) +def get_time_in_seconds(timeval, unit): + """ + Convert a time from 'unit' to seconds + """ + if 'nyear' in unit: + dmult = 365 * 24 * 3600 + elif 'nmonth' in unit: + dmult = 30 * 24 * 3600 + elif 'nday' in unit: + dmult = 24 * 3600 + elif 'nhour' in unit: + dmult = 3600 + elif 'nminute' in unit: + dmult = 60 + else: + dmult = 1 + + return dmult * timeval + def compute_total_time(job_cost_map, proc_pool): """ Given a map: jobname -> (procs, est-time), return a total time From 61af37b844954f3ab2b31fd4416bf5dd593a00f8 Mon Sep 17 00:00:00 2001 From: jayeshkrishna Date: Tue, 11 Apr 2017 14:44:50 -0500 Subject: [PATCH 029/219] Set rearr opts using pio interface Instead of filling in and passing pio_rearr_opt_t type in pio_init, we now use pio_set_rearr_opts() interface to set the rearranger options. This change decouples the cime shared code from pio by avoiding accessing the internal pio type (pio_rearr_opt_t) to set the rearranger options. This change requires pio1 to support the pio_set_rearr_opts() interface. Fixes #1296 --- src/share/util/shr_pio_mod.F90 | 55 ++++++++++++++++++++++++---------- 1 file changed, 40 insertions(+), 15 deletions(-) diff --git a/src/share/util/shr_pio_mod.F90 b/src/share/util/shr_pio_mod.F90 index 2e02ed83c523..2bc81019c64c 100644 --- a/src/share/util/shr_pio_mod.F90 +++ b/src/share/util/shr_pio_mod.F90 @@ -52,7 +52,11 @@ module shr_pio_mod integer, allocatable :: io_compid(:) integer :: pio_debug_level=0, pio_blocksize=0 integer(kind=pio_offset_kind) :: pio_buffer_size_limit=-1 - type(pio_rearr_opt_t) :: pio_rearr_opts + integer :: pio_rearr_opt_comm_type, pio_rearr_opt_fcd + logical :: pio_rearr_opt_c2i_enable_hs, pio_rearr_opt_c2i_enable_isend + integer :: pio_rearr_opt_c2i_max_pend_req + logical :: pio_rearr_opt_i2c_enable_hs, pio_rearr_opt_i2c_enable_isend + integer :: pio_rearr_opt_i2c_max_pend_req integer :: total_comps=0 #define DEBUGI 1 @@ -144,6 +148,7 @@ subroutine shr_pio_init2(comp_id, comp_name, comp_iamin, comp_comm, comp_comm_ia integer :: ncomps character(len=shr_kind_cl) :: nlfilename, cname type(iosystem_desc_t) :: iosys + integer :: ret character(*), parameter :: subName = '(shr_pio_init2) ' if(pio_debug_level>0) then @@ -177,7 +182,18 @@ subroutine shr_pio_init2(comp_id, comp_name, comp_iamin, comp_comm, comp_comm_ia if(pio_async_interface) then #ifdef PIO1 - call pio_init(total_comps,mpi_comm_world, comp_comm, io_comm, iosystems, rearr_opts=pio_rearr_opts) + call pio_init(total_comps,mpi_comm_world, comp_comm, io_comm, iosystems) + do i=1,total_comps + ret = pio_set_rearr_opts(iosystems(i), pio_rearr_opt_comm_type,& + pio_rearr_opt_fcd,& + pio_rearr_opt_c2i_enable_hs, pio_rearr_opt_c2i_enable_isend,& + pio_rearr_opt_c2i_max_pend_req,& + pio_rearr_opt_i2c_enable_hs, pio_rearr_opt_i2c_enable_isend,& + pio_rearr_opt_i2c_max_pend_req) + if(ret /= PIO_NOERR) then + write(shr_log_unit,*) "ERROR: Setting rearranger options failed" + end if + end do #else call pio_init(total_comps,mpi_comm_world, comp_comm, io_comm, iosystems) @@ -199,7 +215,16 @@ subroutine shr_pio_init2(comp_id, comp_name, comp_iamin, comp_comm, comp_comm_ia call pio_init(comp_comm_iam(i), comp_comm(i), pio_comp_settings(i)%pio_numiotasks, 0, & pio_comp_settings(i)%pio_stride, & pio_comp_settings(i)%pio_rearranger, iosystems(i), & - base=pio_comp_settings(i)%pio_root, rearr_opts=pio_rearr_opts) + base=pio_comp_settings(i)%pio_root) + ret = pio_set_rearr_opts(iosystems(i), pio_rearr_opt_comm_type,& + pio_rearr_opt_fcd,& + pio_rearr_opt_c2i_enable_hs, pio_rearr_opt_c2i_enable_isend,& + pio_rearr_opt_c2i_max_pend_req,& + pio_rearr_opt_i2c_enable_hs, pio_rearr_opt_i2c_enable_isend,& + pio_rearr_opt_i2c_max_pend_req) + if(ret /= PIO_NOERR) then + write(shr_log_unit,*) "ERROR: Setting rearranger options failed" + end if if(comp_comm_iam(i)==0) then write(shr_log_unit,*) io_compname(i),' : pio_numiotasks = ',pio_comp_settings(i)%pio_numiotasks write(shr_log_unit,*) io_compname(i),' : pio_stride = ',pio_comp_settings(i)%pio_stride @@ -782,29 +807,29 @@ subroutine shr_pio_rearr_opts_set(comm, pio_rearr_comm_type, pio_rearr_comm_fcd, ! buf(6) = max_pend_req_io2comp ! buf(7) = enable_hs_io2comp ! buf(8) = enable_isend_io2comp - pio_rearr_opts%comm_type = buf(1) - pio_rearr_opts%fcd = buf(2) - pio_rearr_opts%comm_fc_opts_comp2io%max_pend_req = buf(3) + pio_rearr_opt_comm_type = buf(1) + pio_rearr_opt_fcd = buf(2) + pio_rearr_opt_c2i_max_pend_req = buf(3) if(buf(4) == 0) then - pio_rearr_opts%comm_fc_opts_comp2io%enable_hs = .false. + pio_rearr_opt_c2i_enable_hs = .false. else - pio_rearr_opts%comm_fc_opts_comp2io%enable_hs = .true. + pio_rearr_opt_c2i_enable_hs = .true. end if if(buf(5) == 0) then - pio_rearr_opts%comm_fc_opts_comp2io%enable_isend = .false. + pio_rearr_opt_c2i_enable_isend = .false. else - pio_rearr_opts%comm_fc_opts_comp2io%enable_isend = .true. + pio_rearr_opt_c2i_enable_isend = .true. end if - pio_rearr_opts%comm_fc_opts_io2comp%max_pend_req = buf(6) + pio_rearr_opt_i2c_max_pend_req = buf(6) if(buf(7) == 0) then - pio_rearr_opts%comm_fc_opts_io2comp%enable_hs = .false. + pio_rearr_opt_i2c_enable_hs = .false. else - pio_rearr_opts%comm_fc_opts_io2comp%enable_hs = .true. + pio_rearr_opt_i2c_enable_hs = .true. end if if(buf(8) == 0) then - pio_rearr_opts%comm_fc_opts_io2comp%enable_isend = .false. + pio_rearr_opt_i2c_enable_isend = .false. else - pio_rearr_opts%comm_fc_opts_io2comp%enable_isend = .true. + pio_rearr_opt_i2c_enable_isend = .true. end if end subroutine !=============================================================================== From 8b2a76d8c31df963637d96496261042a07a4fcf1 Mon Sep 17 00:00:00 2001 From: jayeshkrishna Date: Fri, 14 Apr 2017 15:26:29 -0500 Subject: [PATCH 030/219] Remove conditional pio1 code We no longer pass rearranger options via pio_init(). We have only implemented (for now) passing rearranger options via pio_init() for non-async cases, but since we no longer pass rearranger opts via pio_init (and pio_set_rearr_opts() works for both async and non-async cases) we can get rid of the conditional ("#ifdef PIO1") code. --- src/share/util/shr_pio_mod.F90 | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/share/util/shr_pio_mod.F90 b/src/share/util/shr_pio_mod.F90 index 2bc81019c64c..2c11bb9b98e1 100644 --- a/src/share/util/shr_pio_mod.F90 +++ b/src/share/util/shr_pio_mod.F90 @@ -181,7 +181,6 @@ subroutine shr_pio_init2(comp_id, comp_name, comp_iamin, comp_comm, comp_comm_ia allocate(iosystems(total_comps)) if(pio_async_interface) then -#ifdef PIO1 call pio_init(total_comps,mpi_comm_world, comp_comm, io_comm, iosystems) do i=1,total_comps ret = pio_set_rearr_opts(iosystems(i), pio_rearr_opt_comm_type,& @@ -194,10 +193,6 @@ subroutine shr_pio_init2(comp_id, comp_name, comp_iamin, comp_comm, comp_comm_ia write(shr_log_unit,*) "ERROR: Setting rearranger options failed" end if end do - -#else - call pio_init(total_comps,mpi_comm_world, comp_comm, io_comm, iosystems) -#endif i=1 else do i=1,total_comps From 50658485842e1e27c098029890fcfc926203a2ba Mon Sep 17 00:00:00 2001 From: James Foucar Date: Fri, 14 Apr 2017 15:24:18 -0600 Subject: [PATCH 031/219] Rename to preview_run --- scripts/Tools/{get_key_commands => preview_run} | 0 scripts/lib/CIME/case.py | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) rename scripts/Tools/{get_key_commands => preview_run} (100%) diff --git a/scripts/Tools/get_key_commands b/scripts/Tools/preview_run similarity index 100% rename from scripts/Tools/get_key_commands rename to scripts/Tools/preview_run diff --git a/scripts/lib/CIME/case.py b/scripts/lib/CIME/case.py index 1426f5a2a1b3..0d9b39a3ddb7 100644 --- a/scripts/lib/CIME/case.py +++ b/scripts/lib/CIME/case.py @@ -862,7 +862,7 @@ def _create_caseroot_tools(self): os.path.join(toolsdir, "lt_archive.sh"), os.path.join(toolsdir, "getTiming"), os.path.join(toolsdir, "save_provenance"), - os.path.join(toolsdir, "get_key_commands"), + os.path.join(toolsdir, "preview_run"), os.path.join(machines_dir,"Makefile"), os.path.join(machines_dir,"mkSrcfiles"), os.path.join(machines_dir,"mkDepends")] From e790212047c52a3ef1db1010d86345d0a3c9adbf Mon Sep 17 00:00:00 2001 From: James Foucar Date: Fri, 14 Apr 2017 16:10:46 -0600 Subject: [PATCH 032/219] Add mpirun and batch overrides --- scripts/lib/CIME/XML/env_batch.py | 6 ++- scripts/lib/CIME/case.py | 41 +++++++++++-------- .../mct/cime_config/config_component.xml | 11 ++++- 3 files changed, 37 insertions(+), 21 deletions(-) diff --git a/scripts/lib/CIME/XML/env_batch.py b/scripts/lib/CIME/XML/env_batch.py index 96e78a5df643..330e2382a222 100644 --- a/scripts/lib/CIME/XML/env_batch.py +++ b/scripts/lib/CIME/XML/env_batch.py @@ -363,7 +363,6 @@ def _submit_single_job(self, case, job, depid=None, no_batch=False, batch_args=N logger.info("Starting job script %s" % job) - # This is what we want longterm function_name = job.replace(".", "_") if not dry_run: function_name = job.replace(".", "_") @@ -372,6 +371,9 @@ def _submit_single_job(self, case, job, depid=None, no_batch=False, batch_args=N return submitargs = self.get_submit_args(case, job) + args_override = self.get_value("BATCH_COMMAND", subgroup=job) + if args_override: + submitargs = args_override if depid is not None: dep_string = self.get_value("depend_string", subgroup=None) @@ -455,7 +457,7 @@ def get_all_queues(self): def get_nodes(self, nodename, attributes=None, root=None, xpath=None): if nodename in ("JOB_WALLCLOCK_TIME", "PROJECT", "PROJECT_REQUIRED", - "JOB_QUEUE"): + "JOB_QUEUE", "BATCH_COMMAND"): nodes = EnvBase.get_nodes(self, "entry", attributes={"id":nodename}, root=root, xpath=xpath) else: diff --git a/scripts/lib/CIME/case.py b/scripts/lib/CIME/case.py index 0d9b39a3ddb7..9b8000a6a1dd 100644 --- a/scripts/lib/CIME/case.py +++ b/scripts/lib/CIME/case.py @@ -1074,28 +1074,33 @@ def get_mpirun_cmd(self, job="case.run"): run_misc_suffix = "" if run_misc_suffix is None else run_misc_suffix run_suffix = run_exe + run_misc_suffix - # Things that will have to be matched against mpirun element attributes - mpi_attribs = { - "compiler" : self.get_value("COMPILER"), - "mpilib" : self.get_value("MPILIB"), - "threaded" : self.get_build_threaded(), - "unit_testing" : False - } - - executable, args = env_mach_specific.get_mpirun(self, mpi_attribs, job=job) + mpirun_cmd_override = self.get_value("MPI_RUN_COMMAND") - # special case for aprun - if executable == "aprun": - aprun_cmd, num_nodes = get_aprun_cmd_for_case(self, run_exe) - expect(num_nodes == self.num_nodes, "Not using optimized num nodes") - return aprun_cmd + " " + run_misc_suffix + if mpirun_cmd_override not in ["", None, "UNSET"]: + return mpirun_cmd_override + " " + run_exe + " " + run_misc_suffix else: - mpi_arg_string = " ".join(args.values()) + # Things that will have to be matched against mpirun element attributes + mpi_attribs = { + "compiler" : self.get_value("COMPILER"), + "mpilib" : self.get_value("MPILIB"), + "threaded" : self.get_build_threaded(), + "unit_testing" : False + } + + executable, args = env_mach_specific.get_mpirun(self, mpi_attribs, job=job) + + # special case for aprun + if executable == "aprun": + aprun_cmd, num_nodes = get_aprun_cmd_for_case(self, run_exe) + expect(num_nodes == self.num_nodes, "Not using optimized num nodes") + return aprun_cmd + " " + run_misc_suffix + else: + mpi_arg_string = " ".join(args.values()) - if self.get_value("BATCH_SYSTEM") == "cobalt": - mpi_arg_string += " : " + if self.get_value("BATCH_SYSTEM") == "cobalt": + mpi_arg_string += " : " - return "%s %s %s" % (executable if executable is not None else "", mpi_arg_string, run_suffix) + return "%s %s %s" % (executable if executable is not None else "", mpi_arg_string, run_suffix) def set_model_version(self, model): version = "unknown" diff --git a/src/drivers/mct/cime_config/config_component.xml b/src/drivers/mct/cime_config/config_component.xml index 958edf5759c9..186fe6e85a17 100644 --- a/src/drivers/mct/cime_config/config_component.xml +++ b/src/drivers/mct/cime_config/config_component.xml @@ -1899,7 +1899,7 @@ UNSET run_mpi env_run.xml - mpi run command + override the mpi run command, do not include model executable @@ -2602,6 +2602,15 @@ The machine wallclock setting. Default determined in config_machines.xml can be overwritten by testing + + char + + + job_submission + env_batch.xml + Override the batch submit command this job. Do not include executable or dependencies + + char From 7948d497a0b480ed6b9830517106da394f8b7a8f Mon Sep 17 00:00:00 2001 From: James Foucar Date: Fri, 14 Apr 2017 16:37:12 -0600 Subject: [PATCH 033/219] Improve error reporting --- scripts/lib/CIME/utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/lib/CIME/utils.py b/scripts/lib/CIME/utils.py index 9216c85ff142..e33db5f80d07 100644 --- a/scripts/lib/CIME/utils.py +++ b/scripts/lib/CIME/utils.py @@ -259,7 +259,7 @@ def run_cmd_no_fail(cmd, input_str=None, from_dir=None, verbose=None, if stat != 0: # If command produced no errput, put output in the exception since we # have nothing else to go on. - errput = output if errput == "" else errput + errput = output if not errput else errput expect(False, "Command: '%s' failed with error '%s'%s" % (cmd, errput, "" if from_dir is None else " from dir '%s'" % from_dir)) @@ -1172,7 +1172,7 @@ def run_and_log_case_status(func, phase, caseroot='.'): try: rv = func() except: - e = sys.exc_info()[0] + e = sys.exc_info()[1] append_case_status(phase, "error", msg=("\n%s" % e), caseroot=caseroot) raise else: From e7319b0376f7bdef629429bae1db7b49408223e3 Mon Sep 17 00:00:00 2001 From: James Foucar Date: Fri, 14 Apr 2017 16:44:15 -0600 Subject: [PATCH 034/219] Update documentation --- scripts/Tools/preview_run | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/Tools/preview_run b/scripts/Tools/preview_run index 32e67539846b..1cc94320e20a 100755 --- a/scripts/Tools/preview_run +++ b/scripts/Tools/preview_run @@ -4,10 +4,10 @@ Script to query key CIME shell commands (mpirun and batch submission). To force a certain mpirun command, use: -./xmlchange MPIRUN_OVERRIDE $your_cmd +./xmlchange MPI_RUN_COMMAND $your_cmd To force a certain qsub command, use: -./xmlchange SUBMIT_OVERRIDE $your_cmd +./xmlchange --subgroup=case.run BATCH_COMMAND $your_cmd """ from standard_script_setup import * From 314a93cfd85be3ae9b0996404396ca03931bb899 Mon Sep 17 00:00:00 2001 From: James Foucar Date: Sun, 16 Apr 2017 13:18:43 -0600 Subject: [PATCH 035/219] Make Machines.get_value more likely to return values of the correct type --- scripts/create_test | 2 +- scripts/lib/CIME/XML/machines.py | 3 ++ scripts/lib/CIME/test_scheduler.py | 2 +- scripts/lib/CIME/utils.py | 34 +++++++++++++++++++++-- scripts/tests/scripts_regression_tests.py | 3 ++ 5 files changed, 40 insertions(+), 4 deletions(-) diff --git a/scripts/create_test b/scripts/create_test index f1dcede9b6e2..3ed88253cceb 100755 --- a/scripts/create_test +++ b/scripts/create_test @@ -412,7 +412,7 @@ def single_submit_impl(machine_name, test_id, proc_pool, project, args, job_cost submit_cmd = env_batch.get_value("batch_submit", subgroup=None) submit_args = env_batch.get_submit_args(case, "case.run") - tasks_per_node = int(mach.get_value("PES_PER_NODE")) + tasks_per_node = mach.get_value("PES_PER_NODE") num_nodes = int(math.ceil(float(proc_pool) / tasks_per_node)) if wall_time is None: wall_time = compute_total_time(job_cost_map, proc_pool) diff --git a/scripts/lib/CIME/XML/machines.py b/scripts/lib/CIME/XML/machines.py index cbe458e2afeb..5806045608ad 100644 --- a/scripts/lib/CIME/XML/machines.py +++ b/scripts/lib/CIME/XML/machines.py @@ -4,6 +4,7 @@ from CIME.XML.standard_module_setup import * from CIME.XML.generic_xml import GenericXML from CIME.XML.files import Files +from CIME.utils import convert_to_unknown_type import socket @@ -191,6 +192,8 @@ def get_value(self, name, attributes=None, resolved=True, subgroup=None): elif name in os.environ: value = os.environ[name] + value = convert_to_unknown_type(value) + return value def get_field_from_list(self, listname, reqval=None, attributes=None): diff --git a/scripts/lib/CIME/test_scheduler.py b/scripts/lib/CIME/test_scheduler.py index 315364b78a34..0483dbda2632 100644 --- a/scripts/lib/CIME/test_scheduler.py +++ b/scripts/lib/CIME/test_scheduler.py @@ -150,7 +150,7 @@ def __init__(self, test_names, test_data=None, if parallel_jobs is None: self._parallel_jobs = min(len(test_names), - int(self._machobj.get_value("MAX_TASKS_PER_NODE"))) + self._machobj.get_value("MAX_TASKS_PER_NODE")) else: self._parallel_jobs = parallel_jobs diff --git a/scripts/lib/CIME/utils.py b/scripts/lib/CIME/utils.py index 9216c85ff142..ef420e7560d4 100644 --- a/scripts/lib/CIME/utils.py +++ b/scripts/lib/CIME/utils.py @@ -714,9 +714,9 @@ def convert_to_type(value, type_str, vid=""): expect(False, "Entry %s was listed as type int but value '%s' is not valid int" % (vid, value)) elif type_str == "logical": - expect(value in ["TRUE", "FALSE","true","false"], + expect(value.upper() in ["TRUE", "FALSE"], "Entry %s was listed as type logical but had val '%s' instead of TRUE or FALSE" % (vid, value)) - value = value == "TRUE" or value == "true" + value = value.upper() == "TRUE" elif type_str == "real": try: @@ -729,6 +729,36 @@ def convert_to_type(value, type_str, vid=""): return value +def convert_to_unknown_type(value): + """ + Convert value to it's real type by probing conversions. + """ + if value is not None: + + # Attempt to convert to logical + if value.upper() in ["TRUE", "FALSE"]: + return value.upper() == "TRUE" + + # Attempt to convert to integer + try: + value = int(eval(value)) + except: + pass + else: + return value + + # Attempt to convert to float + try: + value = float(value) + except: + pass + else: + return value + + # Just treat as string + + return value + def convert_to_string(value, type_str=None, vid=""): """ Convert value back to string. diff --git a/scripts/tests/scripts_regression_tests.py b/scripts/tests/scripts_regression_tests.py index 762bf7bda702..d084e1dd6120 100755 --- a/scripts/tests/scripts_regression_tests.py +++ b/scripts/tests/scripts_regression_tests.py @@ -1268,6 +1268,9 @@ def test_cime_case(self): run_cmd_assert_result(self, "%s/create_test TESTRUNPASS_P1.f19_g16_rx1.A -t %s --no-build --test-root %s --output-root %s" % (SCRIPT_DIR, self._baseline_name, TEST_ROOT, TEST_ROOT)) + self.assertEqual(type(MACHINE.get_value("MAX_TASKS_PER_NODE")), int) + self.assertTrue(type(MACHINE.get_value("PROJECT_REQUIRED")) in [type(None) , bool]) + casedir = os.path.join(self._testroot, "%s.%s" % (CIME.utils.get_full_test_name("TESTRUNPASS_P1.f19_g16_rx1.A", machine=self._machine, compiler=self._compiler), self._baseline_name)) self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir) From cb1ffe8815920f7b2a42945954d4f1773e9a02c7 Mon Sep 17 00:00:00 2001 From: James Foucar Date: Sun, 16 Apr 2017 14:11:50 -0600 Subject: [PATCH 036/219] Inform user of machine Promote to warning if machine doesn't match probed machine --- scripts/lib/CIME/case.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/scripts/lib/CIME/case.py b/scripts/lib/CIME/case.py index ca727570fa78..cf5410a5e720 100644 --- a/scripts/lib/CIME/case.py +++ b/scripts/lib/CIME/case.py @@ -585,8 +585,14 @@ def configure(self, compset_name, grid_name, machine_name=None, #-------------------------------------------- # set machine values in env_xxx files machobj = Machines(machine=machine_name) + probed_machine = machobj.probe_machine_name() machine_name = machobj.get_machine_name() - self.set_value("MACH",machine_name) + self.set_value("MACH", machine_name) + if probed_machine != machine_name and probed_machine is not None: + logger.warning("WARNING: User-selected machine '%s' does not match probed machine '%s'" % (machine_name, probed_machine)) + else: + logger.info("Machine is %s" % machine_name) + nodenames = machobj.get_node_names() nodenames = [x for x in nodenames if '_system' not in x and '_variables' not in x and 'mpirun' not in x and\ From ad0a5c1ebdcbf4bbf43cc7a941af27e70f832052 Mon Sep 17 00:00:00 2001 From: James Foucar Date: Sun, 16 Apr 2017 16:10:51 -0600 Subject: [PATCH 037/219] Big cs.status upgrade 1) Can run from any directory 2) Pass through arguments from test-specific cs.status to general cs.status 3) Support for multiple test-ids 4) Add --summary option --- scripts/Tools/cs.status | 83 ++++++++++++++++++++++++++++++ scripts/Tools/cs_status | 60 --------------------- scripts/lib/CIME/test_scheduler.py | 3 +- scripts/lib/CIME/test_status.py | 12 +++-- scripts/lib/cs.status.template | 2 +- 5 files changed, 93 insertions(+), 67 deletions(-) create mode 100755 scripts/Tools/cs.status delete mode 100755 scripts/Tools/cs_status diff --git a/scripts/Tools/cs.status b/scripts/Tools/cs.status new file mode 100755 index 000000000000..8250a3c09684 --- /dev/null +++ b/scripts/Tools/cs.status @@ -0,0 +1,83 @@ +#!/usr/bin/env python +""" +List test results based on TestStatus files. Returns True if +no errors occured (not based on test statuses). +""" + +from standard_script_setup import * +import argparse, sys, os, logging, glob +from CIME.test_status import * + +############################################################################### +def parse_command_line(args, description): +############################################################################### + parser = argparse.ArgumentParser( +usage="""\n%s [ ...] [--verbose] +OR +%s --help +OR +%s --test + +\033[1mEXAMPLES:\033[0m + \033[1;32m# Wait for all tests in a test area\033[0m + > %s path/to/testarea/*/TestStatus +""" % ((os.path.basename(args[0]), ) * 4), + +description=description, + +formatter_class=argparse.ArgumentDefaultsHelpFormatter +) + + parser.add_argument("paths", nargs="*", help="Paths to TestStatus files.") + + parser.add_argument("-s", "--summary", action="store_true", + help="Only show summary") + + parser.add_argument("-t", "--test-id", action="append", default=[], + help="Only show summary") + + parser.add_argument("-r", "--test-root", default=os.getcwd(), + help="Only show summary") + + args = parser.parse_args(args[1:]) + + return args.paths, args.summary, args.test_id, args.test_root + +############################################################################### +def cs_status(test_paths, summary=False): +############################################################################### + test_id_output = {} + for test_path in test_paths: + test_dir=os.path.dirname(test_path) + ts = TestStatus(test_dir=test_dir) + test_id = os.path.basename(test_dir).split(".")[-1] + test_name = ts.get_name() + status = ts.get_overall_test_status() + if not summary: + output = " %s (Overall: %s) details:\n" % (test_name, status) + output += ts.phase_statuses_dump(prefix=" ") + else: + output = " %s %s\n" % (status, test_name) + + if test_id in test_id_output: + test_id_output[test_id] += output + else: + test_id_output[test_id] = output + + for test_id in sorted(test_id_output): + print test_id + print test_id_output[test_id], + +############################################################################### +def _main_func(description): +############################################################################### + test_paths, summary, test_ids, test_root = parse_command_line(sys.argv, description) + for test_id in test_ids: + test_paths.extend(glob.glob(os.path.join(test_root, "*%s/TestStatus" % test_id))) + + cs_status(test_paths, summary) + +############################################################################### + +if (__name__ == "__main__"): + _main_func(__doc__) diff --git a/scripts/Tools/cs_status b/scripts/Tools/cs_status deleted file mode 100755 index fd521f7dcbf0..000000000000 --- a/scripts/Tools/cs_status +++ /dev/null @@ -1,60 +0,0 @@ -#!/usr/bin/env python -""" -List test results based on TestStatus files. Returns True if -no errors occured (not based on test statuses). -""" - -from standard_script_setup import * -import argparse, sys, os, logging -from CIME.test_status import * - -############################################################################### -def parse_command_line(args, description): -############################################################################### - parser = argparse.ArgumentParser( -usage="""\n%s [ ...] [--verbose] -OR -%s --help -OR -%s --test - -\033[1mEXAMPLES:\033[0m - \033[1;32m# Wait for all tests in a test area\033[0m - > %s path/to/testarea/*/TestStatus -""" % ((os.path.basename(args[0]), ) * 4), - -description=description, - -formatter_class=argparse.ArgumentDefaultsHelpFormatter -) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("paths", nargs="+", help="Paths to TestStatus files.") - - args = parser.parse_args(args[1:]) - - CIME.utils.handle_standard_logging_options(args) - - return args.paths - -############################################################################### -def cs_status(test_paths): -############################################################################### - for test_path in test_paths: - ts = TestStatus(test_dir=os.path.dirname(test_path)) - test_name = ts.get_name() - summary = ts.get_overall_test_status() - print "%s (Overall: %s), details:" % (test_name, summary) - ts.phase_statuses_dump(sys.stdout, prefix=' ') - -############################################################################### -def _main_func(description): -############################################################################### - test_paths = parse_command_line(sys.argv, description) - cs_status(test_paths) - -############################################################################### - -if (__name__ == "__main__"): - _main_func(__doc__) diff --git a/scripts/lib/CIME/test_scheduler.py b/scripts/lib/CIME/test_scheduler.py index 315364b78a34..715327f7e0b5 100644 --- a/scripts/lib/CIME/test_scheduler.py +++ b/scripts/lib/CIME/test_scheduler.py @@ -745,7 +745,8 @@ def _setup_cs_files(self): template = open(template_file, "r").read() template = template.replace("", os.path.join(self._cime_root,"scripts","Tools")).replace\ - ("", self._test_id) + ("", self._test_id).replace\ + ("", self._test_root) if not os.path.exists(self._test_root): os.makedirs(self._test_root) cs_status_file = os.path.join(self._test_root, "cs.status.%s" % self._test_id) diff --git a/scripts/lib/CIME/test_status.py b/scripts/lib/CIME/test_status.py index 964b0dfd1b9f..97442a43a059 100644 --- a/scripts/lib/CIME/test_status.py +++ b/scripts/lib/CIME/test_status.py @@ -207,24 +207,26 @@ def get_status(self, phase): def get_comment(self, phase): return self._phase_statuses[phase][1] if phase in self._phase_statuses else None - def phase_statuses_dump(self, fd, prefix=''): + def phase_statuses_dump(self, prefix=''): """ Args: - fd: file open for writing prefix: string printed at the start of each line """ + result = "" if self._phase_statuses: for phase, data in self._phase_statuses.iteritems(): status, comments = data if not comments: - fd.write("%s%s %s %s\n" % (prefix, status, self._test_name, phase)) + result += "%s%s %s %s\n" % (prefix, status, self._test_name, phase) else: - fd.write("%s%s %s %s %s\n" % (prefix, status, self._test_name, phase, comments)) + result += "%s%s %s %s %s\n" % (prefix, status, self._test_name, phase, comments) + + return result def flush(self): if self._phase_statuses and not self._no_io: with open(self._filename, "w") as fd: - self.phase_statuses_dump(fd) + fd.write(self.phase_statuses_dump()) def _parse_test_status(self, file_contents): """ diff --git a/scripts/lib/cs.status.template b/scripts/lib/cs.status.template index f774bcc03e19..25f7f0fb2e80 100644 --- a/scripts/lib/cs.status.template +++ b/scripts/lib/cs.status.template @@ -1,3 +1,3 @@ #! /bin/bash -/cs_status *./TestStatus +/cs.status "$@" /*./TestStatus From 562cf4b622e935d9be13782f9eac8583dd223ba5 Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Thu, 13 Apr 2017 13:50:56 -0600 Subject: [PATCH 038/219] add support for user_mods in compset definition --- config/xml_schemas/config_compsets.xsd | 2 ++ scripts/lib/CIME/XML/compsets.py | 4 +++- scripts/lib/CIME/case.py | 30 +++++++++++++++++--------- 3 files changed, 25 insertions(+), 11 deletions(-) diff --git a/config/xml_schemas/config_compsets.xsd b/config/xml_schemas/config_compsets.xsd index e9072738a6d7..e2b64df2c666 100644 --- a/config/xml_schemas/config_compsets.xsd +++ b/config/xml_schemas/config_compsets.xsd @@ -11,6 +11,7 @@ + @@ -32,6 +33,7 @@ + diff --git a/scripts/lib/CIME/XML/compsets.py b/scripts/lib/CIME/XML/compsets.py index 875aea5d99a8..db1c4c62d783 100644 --- a/scripts/lib/CIME/XML/compsets.py +++ b/scripts/lib/CIME/XML/compsets.py @@ -36,8 +36,10 @@ def get_compset_match(self, name): for node in science_support_nodes: science_support.append(node.get("grid")) + user_mods = self.get_optional_node("user_mods", root=node) + logger.debug("Found node match with alias: %s and lname: %s" % (alias, lname)) - return (lname, alias, science_support) + return (lname, alias, science_support, user_mods) return (None, None, [False]) def get_compset_var_settings(self, compset, grid): diff --git a/scripts/lib/CIME/case.py b/scripts/lib/CIME/case.py index ca727570fa78..5a81b0fba128 100644 --- a/scripts/lib/CIME/case.py +++ b/scripts/lib/CIME/case.py @@ -95,7 +95,9 @@ def __init__(self, case_root=None, read_only=True): self._components = [] self._component_classes = [] self._is_env_loaded = False - + # these are user_mods as defined in the compset + # Command Line user_mods are handeled seperately + self._user_mods = None self.thread_count = None self.total_tasks = None self.tasks_per_node = None @@ -419,7 +421,7 @@ def _set_compset_and_pesfile(self, compset_name, files, user_compset=False, pesf # If the file exists, read it and see if there is a match for the compset alias or longname if (os.path.isfile(compsets_filename)): compsets = Compsets(compsets_filename) - match, compset_alias, science_support = compsets.get_compset_match(name=compset_name) + match, compset_alias, science_support, self._user_mods = compsets.get_compset_match(name=compset_name) pesfile = files.get_value("PES_SPEC_FILE" , {"component":component}) if match is not None: self._pesfile = pesfile @@ -952,14 +954,22 @@ def create_caseroot(self, clone=False): self._create_caseroot_tools() def apply_user_mods(self, user_mods_dir=None): - if user_mods_dir is not None: - if os.path.isabs(user_mods_dir): - user_mods_path = user_mods_dir - else: - user_mods_path = self.get_value('USER_MODS_DIR') - user_mods_path = os.path.join(user_mods_path, user_mods_dir) - self.set_value("USER_MODS_FULLPATH",user_mods_path) - apply_user_mods(self._caseroot, user_mods_path) + """ + User mods can be specified on the create_newcase command line (usually when called from create test) + or they can be in the compset definition, or both. + """ + + for user_mods in (user_mods_dir, self._user_mods): + if user_mods is not None: + if os.path.isabs(user_mods): + user_mods_path = user_mods + else: + user_mods_path = self.get_value('USER_MODS_DIR') + user_mods_path = os.path.join(user_mods_path, user_mods) + CIME.user_mod_support.apply_user_mods(self._caseroot, user_mods_path) + + + def create_clone(self, newcase, keepexe=False, mach_dir=None, project=None, cime_output_root=None): if cime_output_root is None: From efb71259f4a6557ebdf56fd6a789e5c11226d888 Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Thu, 13 Apr 2017 14:18:17 -0600 Subject: [PATCH 039/219] fix pylint issue --- scripts/lib/CIME/case.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/lib/CIME/case.py b/scripts/lib/CIME/case.py index 5a81b0fba128..f2aa9825ca39 100644 --- a/scripts/lib/CIME/case.py +++ b/scripts/lib/CIME/case.py @@ -966,7 +966,7 @@ def apply_user_mods(self, user_mods_dir=None): else: user_mods_path = self.get_value('USER_MODS_DIR') user_mods_path = os.path.join(user_mods_path, user_mods) - CIME.user_mod_support.apply_user_mods(self._caseroot, user_mods_path) + apply_user_mods(self._caseroot, user_mods_path) From ea4a759ee3a44b8e99c132549e784b0d1569806b Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Thu, 13 Apr 2017 14:30:15 -0600 Subject: [PATCH 040/219] fix no-match args --- scripts/lib/CIME/XML/compsets.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/scripts/lib/CIME/XML/compsets.py b/scripts/lib/CIME/XML/compsets.py index db1c4c62d783..09048b0e148a 100644 --- a/scripts/lib/CIME/XML/compsets.py +++ b/scripts/lib/CIME/XML/compsets.py @@ -27,7 +27,9 @@ def get_compset_match(self, name): nodes = self.get_nodes("compset") alias = None lname = None + science_support = [] + for node in nodes: alias = self.get_element_text("alias",root=node) lname = self.get_element_text("lname",root=node) @@ -40,7 +42,7 @@ def get_compset_match(self, name): logger.debug("Found node match with alias: %s and lname: %s" % (alias, lname)) return (lname, alias, science_support, user_mods) - return (None, None, [False]) + return (None, None, [False], None) def get_compset_var_settings(self, compset, grid): ''' From 685c3febf3a3e6fff6984149fb6a8862020698c5 Mon Sep 17 00:00:00 2001 From: Mariana Vertenstein Date: Fri, 14 Apr 2017 14:14:12 -0600 Subject: [PATCH 041/219] first changes needed for aquaplanet som --- config/cesm/config_grids.xml | 25 ++++++++++++++++--- config/xml_schemas/config_grids_v2.xsd | 2 +- .../docn/cime_config/config_component.xml | 7 ++++-- .../cime_config/namelist_definition_docn.xml | 4 +++ 4 files changed, 31 insertions(+), 7 deletions(-) diff --git a/config/cesm/config_grids.xml b/config/cesm/config_grids.xml index 269839e58c7a..8fab255610d3 100644 --- a/config/cesm/config_grids.xml +++ b/config/cesm/config_grids.xml @@ -361,13 +361,20 @@ gx1v7 - + 0.9x1.25 0.9x1.25 0.9x1.25 gx1v6 + + 0.9x1.25 + 0.9x1.25 + 0.9x1.25 + null + + 0.9x1.25 0.9x1.25 @@ -506,6 +513,13 @@ gx1v6 + + 1.9x2.5 + 1.9x2.5 + 1.9x2.5 + null + + 1.9x2.5 1.9x2.5 @@ -959,9 +973,11 @@ 288 192 domain.lnd.fv0.9x1.25_gx1v6.090309.nc - domain.ocn.0.9x1.25_gx1v6_090403.nc + domain.ocn.0.9x1.25_gx1v6_090403.nc domain.lnd.fv0.9x1.25_gx1v7.151020.nc - domain.ocn.fv0.9x1.25_gx1v7.151020.nc + domain.ocn.fv0.9x1.25_gx1v7.151020.nc + /glade/u/home/benedict/ys/datain/domain.aqua.fv0.9x1.25.nc + /glade/u/home/benedict/ys/datain/domain.aqua.fv0.9x1.25.nc 0.9x1.25 is FV 1-deg grid: @@ -969,6 +985,7 @@ 144 96 domain.lnd.fv1.9x2.5_gx1v6.090206.nc domain.ocn.1.9x2.5_gx1v6_090403.nc + domain.aqua.fv1.9x2.5.nc 1.9x2.5 is FV 2-deg grid: @@ -1483,7 +1500,7 @@ cpl/gridmaps/gx1v7/map_gx1v7_TO_ww3a_splice_170214.nc - + cpl/gridmaps/T31/map_T31_TO_ww3a_bilin_131104.nc diff --git a/config/xml_schemas/config_grids_v2.xsd b/config/xml_schemas/config_grids_v2.xsd index 2bac0a1de037..b4680b93f9e3 100644 --- a/config/xml_schemas/config_grids_v2.xsd +++ b/config/xml_schemas/config_grids_v2.xsd @@ -4,7 +4,7 @@ - + diff --git a/src/components/data_comps/docn/cime_config/config_component.xml b/src/components/data_comps/docn/cime_config/config_component.xml index 165d21c1dadb..686ab1bea336 100644 --- a/src/components/data_comps/docn/cime_config/config_component.xml +++ b/src/components/data_comps/docn/cime_config/config_component.xml @@ -15,7 +15,7 @@ char - prescribed,som,copyall,interannual,null + prescribed,pres_aquap,som,som_aquap,copyall,interannual,null prescribed null @@ -24,6 +24,8 @@ us20 interannual copyall + pres_aquap + som_aquap run_component_docn env_run.xml @@ -69,7 +71,8 @@ UNSET - pop_frc.1x1d.090130.nc + pop_frc.1x1d.090130.nc + /glade/u/home/benedict/ys/datain/cesm2_0_beta03.som.forcing/cam4.som.forcing.aquaplanet.QzaFix_h30Fix_TspunFix.fv19_CTL.nc run_component_docn env_run.xml diff --git a/src/components/data_comps/docn/cime_config/namelist_definition_docn.xml b/src/components/data_comps/docn/cime_config/namelist_definition_docn.xml index 32d2d7f5ba5a..5d249cdbd805 100644 --- a/src/components/data_comps/docn/cime_config/namelist_definition_docn.xml +++ b/src/components/data_comps/docn/cime_config/namelist_definition_docn.xml @@ -47,7 +47,9 @@ List of streams used for the given docn_mode. prescribed + prescribed som + som interannual copyall @@ -312,7 +314,9 @@ NULL SSTDATA + SSTDATA SOM + SOM IAF COPYALL From 9cc7740ca19723a3a7b056f0e63e7abc28450132 Mon Sep 17 00:00:00 2001 From: Bill Sacks Date: Fri, 14 Apr 2017 20:34:34 -0600 Subject: [PATCH 042/219] Fix precedence of user_mods application Now --user-mods on the command line (including testmods) will take precedence over the user_mods set by the compset - for user_nl files, shell_commands and SourceMods. I have tested this with this diff to the A compset diff --git a/src/drivers/mct/cime_config/config_compsets.xml b/src/drivers/mct/cime_config/config_compsets.xml index c11354e..7e6c2c9 100644 --- a/src/drivers/mct/cime_config/config_compsets.xml +++ b/src/drivers/mct/cime_config/config_compsets.xml @@ -40,6 +40,7 @@ A 2000_DATM%NYF_SLND_DICE%SSMI_DOCN%DOM_DROF%NYF_SGLC_SWAV + /Users/sacks/temporary/user_mods_compset Along with this create_newcase command: ./create_newcase -case test_0414m -compset A -res f45_g37 \ --run-unsupported \ --user-mods-dir /Users/sacks/temporary/user_mods_command_line where the contents of the two relevant user_mods directories are: --- user_mods_compset/shell_commands --- ./xmlchange STOP_N=101 --- user_mods_compset/SourceMods/src.drv/mysrc.F90 --- user_mods_compset --- user_mods_compset/user_nl_cpl --- user_mods_compset --- user_mods_command_line/shell_commands --- ./xmlchange STOP_N=102 --- user_mods_command_line/SourceMods/src.drv/mysrc.F90 --- user_mods_command_line --- user_mods_command_line/user_nl_cpl --- user_mods_command_line The final contents are: --- user_nl_cpl --- user_mods_compset user_mods_command_line --- shell_commands --- ./xmlchange --force STOP_N=102 --- SourceMods/src.drv/mysrc.F90 --- user_mods_command_line And $ ./xmlquery STOP_N STOP_N: 102 thus demonstrating that the user_mods on the command-line takes precedence over the compset's user_mods. --- scripts/lib/CIME/XML/compsets.py | 6 +++- scripts/lib/CIME/case.py | 6 ++-- scripts/lib/CIME/user_mod_support.py | 47 +++++++++++++++------------- 3 files changed, 34 insertions(+), 25 deletions(-) diff --git a/scripts/lib/CIME/XML/compsets.py b/scripts/lib/CIME/XML/compsets.py index 09048b0e148a..f5b85d8a5d2a 100644 --- a/scripts/lib/CIME/XML/compsets.py +++ b/scripts/lib/CIME/XML/compsets.py @@ -38,7 +38,11 @@ def get_compset_match(self, name): for node in science_support_nodes: science_support.append(node.get("grid")) - user_mods = self.get_optional_node("user_mods", root=node) + user_mods_node = self.get_optional_node("user_mods", root=node) + if user_mods_node is not None: + user_mods = user_mods_node.text + else: + user_mods = None logger.debug("Found node match with alias: %s and lname: %s" % (alias, lname)) return (lname, alias, science_support, user_mods) diff --git a/scripts/lib/CIME/case.py b/scripts/lib/CIME/case.py index f2aa9825ca39..542be4948f34 100644 --- a/scripts/lib/CIME/case.py +++ b/scripts/lib/CIME/case.py @@ -958,8 +958,10 @@ def apply_user_mods(self, user_mods_dir=None): User mods can be specified on the create_newcase command line (usually when called from create test) or they can be in the compset definition, or both. """ - - for user_mods in (user_mods_dir, self._user_mods): + + # This looping order will lead to the specified user_mods_dir taking + # precedence over self._user_mods, if there are any conflicts. + for user_mods in (self._user_mods, user_mods_dir): if user_mods is not None: if os.path.isabs(user_mods): user_mods_path = user_mods diff --git a/scripts/lib/CIME/user_mod_support.py b/scripts/lib/CIME/user_mod_support.py index 572910e43b22..d27c7255542d 100644 --- a/scripts/lib/CIME/user_mod_support.py +++ b/scripts/lib/CIME/user_mod_support.py @@ -14,6 +14,9 @@ def apply_user_mods(caseroot, user_mods_path): updating SourceMods and creating case shell_commands and xmlchange_cmds files First remove case shell_commands files if any already exist + + If this function is called multiple times, settings from later calls will + take precedence over earlier calls, if there are conflicts. ''' case_shell_command_files = [os.path.join(caseroot,"shell_commands"), os.path.join(caseroot,"xmlchange_cmnds")] @@ -22,6 +25,13 @@ def apply_user_mods(caseroot, user_mods_path): os.remove(shell_command_file) include_dirs = build_include_dirs_list(user_mods_path) + # If a user_mods dir 'foo' includes 'bar', the include_dirs list returned + # from build_include_dirs has 'foo' before 'bar'. But with the below code, + # directories that occur later in the list take precedence over the earlier + # ones, and we want 'foo' to take precedence over 'bar' in this case (in + # general: we want a given user_mods directory to take precedence over any + # mods that it includes). So we reverse include_dirs to accomplish this. + include_dirs.reverse() logger.debug("include_dirs are %s"%include_dirs) for include_dir in include_dirs: # write user_nl_xxx file in caseroot @@ -31,7 +41,11 @@ def apply_user_mods(caseroot, user_mods_path): if len(newcontents) == 0: continue case_user_nl = user_nl.replace(include_dir, caseroot) - update_user_nl_file(case_user_nl, newcontents) + # If the same variable is set twice in a user_nl file, the later one + # takes precedence. So by appending the new contents, later entries + # in the include_dirs list take precedence over earlier entries. + with open(case_user_nl, "a") as fd: + fd.write(newcontents) # update SourceMods in caseroot for root, _, files in os.walk(include_dir,followlinks=True,topdown=False): @@ -39,21 +53,18 @@ def apply_user_mods(caseroot, user_mods_path): for sfile in files: source_mods = os.path.join(root,sfile) case_source_mods = source_mods.replace(include_dir, caseroot) + # We overwrite any existing SourceMods file so that later + # include_dirs take precedence over earlier ones if os.path.isfile(case_source_mods): - logger.warn("Refusing to overwrite existing SourceMods in %s"%case_source_mods) + logger.warn("WARNING: Overwriting existing SourceMods in %s"%case_source_mods) else: logger.info("Adding SourceMod to case %s"%case_source_mods) - try: - shutil.copyfile(source_mods, case_source_mods) - except: - expect(False, "Could not write file %s in caseroot %s" - %(case_source_mods,caseroot)) + try: + shutil.copyfile(source_mods, case_source_mods) + except: + expect(False, "Could not write file %s in caseroot %s" + %(case_source_mods,caseroot)) - # Reverse include_dirs to make sure xmlchange commands are called in the - # correct order; it may be desireable to reverse include_dirs above the - # previous loop and then append user_nl changes rather than prepend them. - include_dirs.reverse() - for include_dir in include_dirs: # create xmlchange_cmnds and shell_commands in caseroot shell_command_files = glob.glob(os.path.join(include_dir,"shell_commands")) +\ glob.glob(os.path.join(include_dir,"xmlchange_cmnds")) @@ -70,6 +81,8 @@ def apply_user_mods(caseroot, user_mods_path): shell_commands_file) with open(shell_commands_file,"r") as fd: new_shell_commands = fd.read().replace("xmlchange","xmlchange --force") + # By appending the new commands to the end, settings from later + # include_dirs take precedence over earlier ones with open(case_shell_commands, "a") as fd: fd.write(new_shell_commands) @@ -78,16 +91,6 @@ def apply_user_mods(caseroot, user_mods_path): os.chmod(shell_command_file, 0777) run_cmd_no_fail(shell_command_file) -def update_user_nl_file(case_user_nl, contents): - if os.path.isfile(case_user_nl): - with open(case_user_nl, "r") as fd: - old_contents = fd.read() - contents = contents + old_contents - logger.debug("Pre-pending file %s"%(case_user_nl)) - with open(case_user_nl, "w") as fd: - fd.write(contents) - - def build_include_dirs_list(user_mods_path, include_dirs=None): ''' From 866493330903ac1311cefc62752a73f6911f7fc9 Mon Sep 17 00:00:00 2001 From: Mariana Vertenstein Date: Sat, 15 Apr 2017 11:04:56 -0600 Subject: [PATCH 043/219] fixed issue with using new user_mods element in compset definition --- scripts/lib/CIME/XML/compsets.py | 2 -- scripts/lib/CIME/case.py | 9 +++++---- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/scripts/lib/CIME/XML/compsets.py b/scripts/lib/CIME/XML/compsets.py index f5b85d8a5d2a..2d96aaed3e68 100644 --- a/scripts/lib/CIME/XML/compsets.py +++ b/scripts/lib/CIME/XML/compsets.py @@ -37,13 +37,11 @@ def get_compset_match(self, name): science_support_nodes = self.get_nodes("science_support", root=node) for node in science_support_nodes: science_support.append(node.get("grid")) - user_mods_node = self.get_optional_node("user_mods", root=node) if user_mods_node is not None: user_mods = user_mods_node.text else: user_mods = None - logger.debug("Found node match with alias: %s and lname: %s" % (alias, lname)) return (lname, alias, science_support, user_mods) return (None, None, [False], None) diff --git a/scripts/lib/CIME/case.py b/scripts/lib/CIME/case.py index 542be4948f34..ae878745eb92 100644 --- a/scripts/lib/CIME/case.py +++ b/scripts/lib/CIME/case.py @@ -953,12 +953,16 @@ def create_caseroot(self, clone=False): self._create_caseroot_sourcemods() self._create_caseroot_tools() + # Apply user_mods if part of compset + if self._user_mods is not None: + self._user_mods = self.get_resolved_value(self._user_mods) + self.apply_user_mods() + def apply_user_mods(self, user_mods_dir=None): """ User mods can be specified on the create_newcase command line (usually when called from create test) or they can be in the compset definition, or both. """ - # This looping order will lead to the specified user_mods_dir taking # precedence over self._user_mods, if there are any conflicts. for user_mods in (self._user_mods, user_mods_dir): @@ -970,9 +974,6 @@ def apply_user_mods(self, user_mods_dir=None): user_mods_path = os.path.join(user_mods_path, user_mods) apply_user_mods(self._caseroot, user_mods_path) - - - def create_clone(self, newcase, keepexe=False, mach_dir=None, project=None, cime_output_root=None): if cime_output_root is None: cime_output_root = self.get_value("CIME_OUTPUT_ROOT") From 01ad74425c514da65ac6e6f741dd215de27166ac Mon Sep 17 00:00:00 2001 From: Bill Sacks Date: Sun, 16 Apr 2017 06:12:03 -0600 Subject: [PATCH 044/219] Add unit tests for user_mod_support --- .../lib/CIME/tests/test_user_mod_support.py | 171 ++++++++++++++++++ 1 file changed, 171 insertions(+) create mode 100644 scripts/lib/CIME/tests/test_user_mod_support.py diff --git a/scripts/lib/CIME/tests/test_user_mod_support.py b/scripts/lib/CIME/tests/test_user_mod_support.py new file mode 100644 index 000000000000..c8e582f01b17 --- /dev/null +++ b/scripts/lib/CIME/tests/test_user_mod_support.py @@ -0,0 +1,171 @@ +#!/usr/bin/env python + +import unittest +import shutil +import tempfile +import os +from CIME.user_mod_support import apply_user_mods + +# ======================================================================== +# Define some parameters +# ======================================================================== + +_SOURCEMODS = os.path.join("SourceMods", "src.drv") + +class TestUserModSupport(unittest.TestCase): + + # ======================================================================== + # Test helper functions + # ======================================================================== + + def setUp(self): + self._caseroot = tempfile.mkdtemp() + self._caseroot_sourcemods = os.path.join(self._caseroot, _SOURCEMODS) + os.makedirs(self._caseroot_sourcemods) + self._user_mods_parent_dir = tempfile.mkdtemp() + + def tearDown(self): + shutil.rmtree(self._caseroot, ignore_errors=True) + shutil.rmtree(self._user_mods_parent_dir, ignore_errors=True) + + def createUserMod(self, name, include_dirs=None): + """Create a user_mods directory with the given name. + + This directory is created within self._user_mods_parent_dir + + For name='foo', it will contain: + + - A user_nl_cpl file with contents: + foo + + - A shell_commands file with contents: + echo foo >> /PATH/TO/CASEROOT/shell_commands_result + + - A file in _SOURCEMODS named myfile.F90 with contents: + foo + + If include_dirs is given, it should be a list of strings, giving names + of other user_mods directories to include. e.g., if include_dirs is + ['foo1', 'foo2'], then this will create a file 'include_user_mods' that + contains paths to the 'foo1' and 'foo2' user_mods directories, one per + line. + """ + + mod_dir = os.path.join(self._user_mods_parent_dir, name) + os.makedirs(mod_dir) + mod_dir_sourcemods = os.path.join(mod_dir, _SOURCEMODS) + os.makedirs(mod_dir_sourcemods) + + with open(os.path.join(mod_dir, "user_nl_cpl"), "w") as user_nl_cpl: + user_nl_cpl.write(name + "\n") + with open(os.path.join(mod_dir, "shell_commands"), "w") as shell_commands: + command = "echo %s >> %s/shell_commands_result\n"%(name, + self._caseroot) + shell_commands.write(command) + with open(os.path.join(mod_dir_sourcemods, "myfile.F90"), "w") as f90_file: + f90_file.write(name + "\n") + + if include_dirs: + with open(os.path.join(mod_dir, "include_user_mods"), "w") as include_user_mods: + for one_include in include_dirs: + include_user_mods.write(os.path.join(self._user_mods_parent_dir, one_include) + "\n") + + def assertResults(self, expected_user_nl_cpl, + expected_shell_commands_result, + expected_sourcemod, + msg = ""): + """Asserts that the contents of the files in self._caseroot match expectations + + If msg is provided, it is printed for some failing assertions + """ + + path_to_user_nl_cpl = os.path.join(self._caseroot, "user_nl_cpl") + self.assertTrue(os.path.isfile(path_to_user_nl_cpl), + msg = msg + ": user_nl_cpl does not exist") + with open(path_to_user_nl_cpl, "r") as user_nl_cpl: + contents = user_nl_cpl.read() + self.assertEqual(expected_user_nl_cpl, contents) + + path_to_shell_commands_result = os.path.join(self._caseroot, "shell_commands_result") + self.assertTrue(os.path.isfile(path_to_shell_commands_result), + msg = msg + ": shell_commands_result does not exist") + with open(path_to_shell_commands_result, "r") as shell_commands_result: + contents = shell_commands_result.read() + self.assertEqual(expected_shell_commands_result, contents) + + path_to_sourcemod = os.path.join(self._caseroot_sourcemods, "myfile.F90") + self.assertTrue(os.path.isfile(path_to_sourcemod), + msg = msg + ": sourcemod file does not exist") + with open(path_to_sourcemod, "r") as sourcemod: + contents = sourcemod.read() + self.assertEqual(expected_sourcemod, contents) + + # ======================================================================== + # Begin actual tests + # ======================================================================== + + def test_basic(self): + self.createUserMod("foo") + apply_user_mods(self._caseroot, + os.path.join(self._user_mods_parent_dir, "foo")) + self.assertResults(expected_user_nl_cpl = "foo\n", + expected_shell_commands_result = "foo\n", + expected_sourcemod = "foo\n", + msg = "test_basic") + + def test_two_applications(self): + """If apply_user_mods is called twice, the second should appear after the first so that it takes precedence.""" + + self.createUserMod("foo1") + self.createUserMod("foo2") + apply_user_mods(self._caseroot, + os.path.join(self._user_mods_parent_dir, "foo1")) + apply_user_mods(self._caseroot, + os.path.join(self._user_mods_parent_dir, "foo2")) + self.assertResults(expected_user_nl_cpl = "foo1\nfoo2\n", + expected_shell_commands_result = "foo1\nfoo2\n", + expected_sourcemod = "foo2\n", + msg = "test_two_applications") + + def test_include(self): + """If there is an included mod, the main one should appear after the included one so that it takes precedence.""" + + self.createUserMod("base") + self.createUserMod("derived", include_dirs=["base"]) + + apply_user_mods(self._caseroot, + os.path.join(self._user_mods_parent_dir, "derived")) + + self.assertResults(expected_user_nl_cpl = "base\nderived\n", + expected_shell_commands_result = "base\nderived\n", + expected_sourcemod = "derived\n", + msg = "test_include") + + def test_duplicate_includes(self): + """Test multiple includes, where both include the same base mod. + + The base mod should only be included once. + """ + + self.createUserMod("base") + self.createUserMod("derived1", include_dirs=["base"]) + self.createUserMod("derived2", include_dirs=["base"]) + self.createUserMod("derived_combo", + include_dirs = ["derived1", "derived2"]) + + apply_user_mods(self._caseroot, + os.path.join(self._user_mods_parent_dir, "derived_combo")) + + # NOTE(wjs, 2017-04-15) The ordering of derived1 vs. derived2 is not + # critical here: If this aspect of the behavior changes, the + # expected_contents can be changed to match the new behavior in this + # respect. + expected_contents = """base +derived2 +derived1 +derived_combo +""" + self.assertResults(expected_user_nl_cpl = expected_contents, + expected_shell_commands_result = expected_contents, + expected_sourcemod = "derived_combo\n", + msg = "test_duplicate_includes") From 0a38bb5ba9fb47c4bf5bffe0739f296986867f10 Mon Sep 17 00:00:00 2001 From: James Foucar Date: Sun, 16 Apr 2017 14:35:30 -0600 Subject: [PATCH 045/219] Force user to always go through case.submit This was already the policy, but was not being enforced correctly. --- scripts/lib/CIME/case_run.py | 4 ++++ scripts/lib/CIME/case_test.py | 2 ++ 2 files changed, 6 insertions(+) diff --git a/scripts/lib/CIME/case_run.py b/scripts/lib/CIME/case_run.py index 0b4728b2f466..31ff36d00f91 100644 --- a/scripts/lib/CIME/case_run.py +++ b/scripts/lib/CIME/case_run.py @@ -245,6 +245,10 @@ def case_run(case): "Please submit your run using the submit script like so:" " ./case.submit") + # Forces user to use case.submit if they re-submit + if case.get_value("TESTCASE") is None: + case.set_value("RUN_WITH_SUBMIT", False) + prerun_script = case.get_value("PRERUN_SCRIPT") postrun_script = case.get_value("POSTRUN_SCRIPT") diff --git a/scripts/lib/CIME/case_test.py b/scripts/lib/CIME/case_test.py index 85aaed7149ab..2ea51caa1d0d 100644 --- a/scripts/lib/CIME/case_test.py +++ b/scripts/lib/CIME/case_test.py @@ -66,4 +66,6 @@ def case_test(case, testname=None): success = test.run() + case.set_value("RUN_WITH_SUBMIT", False) + return success From 8e8fd62a6b642e249d89cbf853827a850c045a85 Mon Sep 17 00:00:00 2001 From: Bill Sacks Date: Sun, 16 Apr 2017 20:40:27 -0600 Subject: [PATCH 046/219] fix typo --- scripts/lib/CIME/case.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/lib/CIME/case.py b/scripts/lib/CIME/case.py index ae878745eb92..f67a34ca0950 100644 --- a/scripts/lib/CIME/case.py +++ b/scripts/lib/CIME/case.py @@ -96,7 +96,7 @@ def __init__(self, case_root=None, read_only=True): self._component_classes = [] self._is_env_loaded = False # these are user_mods as defined in the compset - # Command Line user_mods are handeled seperately + # Command Line user_mods are handled seperately self._user_mods = None self.thread_count = None self.total_tasks = None From ec95467d75c45047130104e6ca52acdcdc8546ad Mon Sep 17 00:00:00 2001 From: Bill Sacks Date: Sun, 16 Apr 2017 21:20:55 -0600 Subject: [PATCH 047/219] Fix timing of applying compset user_mods The previous implementation had two problems: 1. If you specified a user_mods on the command-line along with a compset that has its own user_mods, then the compset's user_mods get applied twice. 2. The new place where there was a call to apply_user_mods happened too early: xmlchange commands can not be done at that point. This fixes these problems. I have tested this with the same changes described in 9cc7740ca19723a3a7b056f0e63e7abc28450132. I tested create_newcase with no user_mods, user_mods just from the command line, user_mods just from the compset, and user_mods from the command line and the compset. --- scripts/create_newcase | 2 +- scripts/lib/CIME/case.py | 13 +++++++------ 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/scripts/create_newcase b/scripts/create_newcase index f1877cd76e5a..a28f8c8d1292 100755 --- a/scripts/create_newcase +++ b/scripts/create_newcase @@ -195,7 +195,7 @@ def _main_func(description): if user_mods_dir is not None: if os.path.isdir(user_mods_dir): user_mods_dir = os.path.abspath(user_mods_dir) - case.apply_user_mods(user_mods_dir) + case.apply_user_mods(user_mods_dir) # Lock env_case.xml lock_file("env_case.xml", caseroot) diff --git a/scripts/lib/CIME/case.py b/scripts/lib/CIME/case.py index f67a34ca0950..ec77ebb4ee57 100644 --- a/scripts/lib/CIME/case.py +++ b/scripts/lib/CIME/case.py @@ -953,19 +953,20 @@ def create_caseroot(self, clone=False): self._create_caseroot_sourcemods() self._create_caseroot_tools() - # Apply user_mods if part of compset - if self._user_mods is not None: - self._user_mods = self.get_resolved_value(self._user_mods) - self.apply_user_mods() - def apply_user_mods(self, user_mods_dir=None): """ User mods can be specified on the create_newcase command line (usually when called from create test) or they can be in the compset definition, or both. """ + + if self._user_mods is None: + compset_user_mods_resolved = None + else: + compset_user_mods_resolved = self.get_resolved_value(self._user_mods) + # This looping order will lead to the specified user_mods_dir taking # precedence over self._user_mods, if there are any conflicts. - for user_mods in (self._user_mods, user_mods_dir): + for user_mods in (compset_user_mods_resolved, user_mods_dir): if user_mods is not None: if os.path.isabs(user_mods): user_mods_path = user_mods From 8075121fcf298cc762e42667a21a855b1a29c7fb Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Mon, 17 Apr 2017 09:43:01 -0600 Subject: [PATCH 048/219] add comment to README and stdout --- scripts/lib/CIME/case.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/scripts/lib/CIME/case.py b/scripts/lib/CIME/case.py index ec77ebb4ee57..597876a4e8ae 100644 --- a/scripts/lib/CIME/case.py +++ b/scripts/lib/CIME/case.py @@ -949,6 +949,10 @@ def create_caseroot(self, clone=False): comp_grid = "%s_GRID"%component_class append_status("%s is %s"%(comp_grid,self.get_value(comp_grid)), "README.case", caseroot=self._caseroot) + if self._user_mods is not None: + note = "This compset includes user_mods %s"%self._user_mods + append_status(note, "README.case", caseroot=self._caseroot) + logger.info(note) if not clone: self._create_caseroot_sourcemods() self._create_caseroot_tools() From 29e0159f7fba14e29141c184d5f8224517ecfb58 Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Thu, 13 Apr 2017 13:50:56 -0600 Subject: [PATCH 049/219] add support for user_mods in compset definition --- config/xml_schemas/config_compsets.xsd | 2 ++ scripts/lib/CIME/XML/compsets.py | 4 +++- scripts/lib/CIME/case.py | 30 +++++++++++++++++--------- 3 files changed, 25 insertions(+), 11 deletions(-) diff --git a/config/xml_schemas/config_compsets.xsd b/config/xml_schemas/config_compsets.xsd index e9072738a6d7..e2b64df2c666 100644 --- a/config/xml_schemas/config_compsets.xsd +++ b/config/xml_schemas/config_compsets.xsd @@ -11,6 +11,7 @@ + @@ -32,6 +33,7 @@ + diff --git a/scripts/lib/CIME/XML/compsets.py b/scripts/lib/CIME/XML/compsets.py index 875aea5d99a8..db1c4c62d783 100644 --- a/scripts/lib/CIME/XML/compsets.py +++ b/scripts/lib/CIME/XML/compsets.py @@ -36,8 +36,10 @@ def get_compset_match(self, name): for node in science_support_nodes: science_support.append(node.get("grid")) + user_mods = self.get_optional_node("user_mods", root=node) + logger.debug("Found node match with alias: %s and lname: %s" % (alias, lname)) - return (lname, alias, science_support) + return (lname, alias, science_support, user_mods) return (None, None, [False]) def get_compset_var_settings(self, compset, grid): diff --git a/scripts/lib/CIME/case.py b/scripts/lib/CIME/case.py index e6e8531a27ba..6add22645dce 100644 --- a/scripts/lib/CIME/case.py +++ b/scripts/lib/CIME/case.py @@ -95,7 +95,9 @@ def __init__(self, case_root=None, read_only=True): self._components = [] self._component_classes = [] self._is_env_loaded = False - + # these are user_mods as defined in the compset + # Command Line user_mods are handeled seperately + self._user_mods = None self.thread_count = None self.total_tasks = None self.tasks_per_node = None @@ -419,7 +421,7 @@ def _set_compset_and_pesfile(self, compset_name, files, user_compset=False, pesf # If the file exists, read it and see if there is a match for the compset alias or longname if (os.path.isfile(compsets_filename)): compsets = Compsets(compsets_filename) - match, compset_alias, science_support = compsets.get_compset_match(name=compset_name) + match, compset_alias, science_support, self._user_mods = compsets.get_compset_match(name=compset_name) pesfile = files.get_value("PES_SPEC_FILE" , {"component":component}) if match is not None: self._pesfile = pesfile @@ -963,14 +965,22 @@ def create_caseroot(self, clone=False): self._create_caseroot_tools() def apply_user_mods(self, user_mods_dir=None): - if user_mods_dir is not None: - if os.path.isabs(user_mods_dir): - user_mods_path = user_mods_dir - else: - user_mods_path = self.get_value('USER_MODS_DIR') - user_mods_path = os.path.join(user_mods_path, user_mods_dir) - self.set_value("USER_MODS_FULLPATH",user_mods_path) - apply_user_mods(self._caseroot, user_mods_path) + """ + User mods can be specified on the create_newcase command line (usually when called from create test) + or they can be in the compset definition, or both. + """ + + for user_mods in (user_mods_dir, self._user_mods): + if user_mods is not None: + if os.path.isabs(user_mods): + user_mods_path = user_mods + else: + user_mods_path = self.get_value('USER_MODS_DIR') + user_mods_path = os.path.join(user_mods_path, user_mods) + CIME.user_mod_support.apply_user_mods(self._caseroot, user_mods_path) + + + def create_clone(self, newcase, keepexe=False, mach_dir=None, project=None, cime_output_root=None): if cime_output_root is None: From 8c2bd5118c85290b87fe6a1a16708e1d23847be9 Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Thu, 13 Apr 2017 14:18:17 -0600 Subject: [PATCH 050/219] fix pylint issue --- scripts/lib/CIME/case.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/lib/CIME/case.py b/scripts/lib/CIME/case.py index 6add22645dce..e1ff041ce914 100644 --- a/scripts/lib/CIME/case.py +++ b/scripts/lib/CIME/case.py @@ -977,7 +977,7 @@ def apply_user_mods(self, user_mods_dir=None): else: user_mods_path = self.get_value('USER_MODS_DIR') user_mods_path = os.path.join(user_mods_path, user_mods) - CIME.user_mod_support.apply_user_mods(self._caseroot, user_mods_path) + apply_user_mods(self._caseroot, user_mods_path) From a1656f4a9721db253e7af3197adafc04274385a5 Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Thu, 13 Apr 2017 14:30:15 -0600 Subject: [PATCH 051/219] fix no-match args --- scripts/lib/CIME/XML/compsets.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/scripts/lib/CIME/XML/compsets.py b/scripts/lib/CIME/XML/compsets.py index db1c4c62d783..09048b0e148a 100644 --- a/scripts/lib/CIME/XML/compsets.py +++ b/scripts/lib/CIME/XML/compsets.py @@ -27,7 +27,9 @@ def get_compset_match(self, name): nodes = self.get_nodes("compset") alias = None lname = None + science_support = [] + for node in nodes: alias = self.get_element_text("alias",root=node) lname = self.get_element_text("lname",root=node) @@ -40,7 +42,7 @@ def get_compset_match(self, name): logger.debug("Found node match with alias: %s and lname: %s" % (alias, lname)) return (lname, alias, science_support, user_mods) - return (None, None, [False]) + return (None, None, [False], None) def get_compset_var_settings(self, compset, grid): ''' From 582e308c8c789da1edbd20a61e0af8e51d42605e Mon Sep 17 00:00:00 2001 From: Mariana Vertenstein Date: Fri, 14 Apr 2017 14:14:12 -0600 Subject: [PATCH 052/219] first changes needed for aquaplanet som --- config/cesm/config_grids.xml | 25 ++++++++++++++++--- config/xml_schemas/config_grids_v2.xsd | 2 +- .../docn/cime_config/config_component.xml | 7 ++++-- .../cime_config/namelist_definition_docn.xml | 4 +++ 4 files changed, 31 insertions(+), 7 deletions(-) diff --git a/config/cesm/config_grids.xml b/config/cesm/config_grids.xml index 269839e58c7a..8fab255610d3 100644 --- a/config/cesm/config_grids.xml +++ b/config/cesm/config_grids.xml @@ -361,13 +361,20 @@ gx1v7 - + 0.9x1.25 0.9x1.25 0.9x1.25 gx1v6 + + 0.9x1.25 + 0.9x1.25 + 0.9x1.25 + null + + 0.9x1.25 0.9x1.25 @@ -506,6 +513,13 @@ gx1v6 + + 1.9x2.5 + 1.9x2.5 + 1.9x2.5 + null + + 1.9x2.5 1.9x2.5 @@ -959,9 +973,11 @@ 288 192 domain.lnd.fv0.9x1.25_gx1v6.090309.nc - domain.ocn.0.9x1.25_gx1v6_090403.nc + domain.ocn.0.9x1.25_gx1v6_090403.nc domain.lnd.fv0.9x1.25_gx1v7.151020.nc - domain.ocn.fv0.9x1.25_gx1v7.151020.nc + domain.ocn.fv0.9x1.25_gx1v7.151020.nc + /glade/u/home/benedict/ys/datain/domain.aqua.fv0.9x1.25.nc + /glade/u/home/benedict/ys/datain/domain.aqua.fv0.9x1.25.nc 0.9x1.25 is FV 1-deg grid: @@ -969,6 +985,7 @@ 144 96 domain.lnd.fv1.9x2.5_gx1v6.090206.nc domain.ocn.1.9x2.5_gx1v6_090403.nc + domain.aqua.fv1.9x2.5.nc 1.9x2.5 is FV 2-deg grid: @@ -1483,7 +1500,7 @@ cpl/gridmaps/gx1v7/map_gx1v7_TO_ww3a_splice_170214.nc - + cpl/gridmaps/T31/map_T31_TO_ww3a_bilin_131104.nc diff --git a/config/xml_schemas/config_grids_v2.xsd b/config/xml_schemas/config_grids_v2.xsd index 2bac0a1de037..b4680b93f9e3 100644 --- a/config/xml_schemas/config_grids_v2.xsd +++ b/config/xml_schemas/config_grids_v2.xsd @@ -4,7 +4,7 @@ - + diff --git a/src/components/data_comps/docn/cime_config/config_component.xml b/src/components/data_comps/docn/cime_config/config_component.xml index 165d21c1dadb..686ab1bea336 100644 --- a/src/components/data_comps/docn/cime_config/config_component.xml +++ b/src/components/data_comps/docn/cime_config/config_component.xml @@ -15,7 +15,7 @@ char - prescribed,som,copyall,interannual,null + prescribed,pres_aquap,som,som_aquap,copyall,interannual,null prescribed null @@ -24,6 +24,8 @@ us20 interannual copyall + pres_aquap + som_aquap run_component_docn env_run.xml @@ -69,7 +71,8 @@ UNSET - pop_frc.1x1d.090130.nc + pop_frc.1x1d.090130.nc + /glade/u/home/benedict/ys/datain/cesm2_0_beta03.som.forcing/cam4.som.forcing.aquaplanet.QzaFix_h30Fix_TspunFix.fv19_CTL.nc run_component_docn env_run.xml diff --git a/src/components/data_comps/docn/cime_config/namelist_definition_docn.xml b/src/components/data_comps/docn/cime_config/namelist_definition_docn.xml index 32d2d7f5ba5a..5d249cdbd805 100644 --- a/src/components/data_comps/docn/cime_config/namelist_definition_docn.xml +++ b/src/components/data_comps/docn/cime_config/namelist_definition_docn.xml @@ -47,7 +47,9 @@ List of streams used for the given docn_mode. prescribed + prescribed som + som interannual copyall @@ -312,7 +314,9 @@ NULL SSTDATA + SSTDATA SOM + SOM IAF COPYALL From 28e84b71dd695bc8c2b0c6a330d14cc4d698b24f Mon Sep 17 00:00:00 2001 From: Bill Sacks Date: Fri, 14 Apr 2017 20:34:34 -0600 Subject: [PATCH 053/219] Fix precedence of user_mods application Now --user-mods on the command line (including testmods) will take precedence over the user_mods set by the compset - for user_nl files, shell_commands and SourceMods. I have tested this with this diff to the A compset diff --git a/src/drivers/mct/cime_config/config_compsets.xml b/src/drivers/mct/cime_config/config_compsets.xml index c11354e..7e6c2c9 100644 --- a/src/drivers/mct/cime_config/config_compsets.xml +++ b/src/drivers/mct/cime_config/config_compsets.xml @@ -40,6 +40,7 @@ A 2000_DATM%NYF_SLND_DICE%SSMI_DOCN%DOM_DROF%NYF_SGLC_SWAV + /Users/sacks/temporary/user_mods_compset Along with this create_newcase command: ./create_newcase -case test_0414m -compset A -res f45_g37 \ --run-unsupported \ --user-mods-dir /Users/sacks/temporary/user_mods_command_line where the contents of the two relevant user_mods directories are: --- user_mods_compset/shell_commands --- ./xmlchange STOP_N=101 --- user_mods_compset/SourceMods/src.drv/mysrc.F90 --- user_mods_compset --- user_mods_compset/user_nl_cpl --- user_mods_compset --- user_mods_command_line/shell_commands --- ./xmlchange STOP_N=102 --- user_mods_command_line/SourceMods/src.drv/mysrc.F90 --- user_mods_command_line --- user_mods_command_line/user_nl_cpl --- user_mods_command_line The final contents are: --- user_nl_cpl --- user_mods_compset user_mods_command_line --- shell_commands --- ./xmlchange --force STOP_N=102 --- SourceMods/src.drv/mysrc.F90 --- user_mods_command_line And $ ./xmlquery STOP_N STOP_N: 102 thus demonstrating that the user_mods on the command-line takes precedence over the compset's user_mods. --- scripts/lib/CIME/XML/compsets.py | 6 +++- scripts/lib/CIME/case.py | 6 ++-- scripts/lib/CIME/user_mod_support.py | 47 +++++++++++++++------------- 3 files changed, 34 insertions(+), 25 deletions(-) diff --git a/scripts/lib/CIME/XML/compsets.py b/scripts/lib/CIME/XML/compsets.py index 09048b0e148a..f5b85d8a5d2a 100644 --- a/scripts/lib/CIME/XML/compsets.py +++ b/scripts/lib/CIME/XML/compsets.py @@ -38,7 +38,11 @@ def get_compset_match(self, name): for node in science_support_nodes: science_support.append(node.get("grid")) - user_mods = self.get_optional_node("user_mods", root=node) + user_mods_node = self.get_optional_node("user_mods", root=node) + if user_mods_node is not None: + user_mods = user_mods_node.text + else: + user_mods = None logger.debug("Found node match with alias: %s and lname: %s" % (alias, lname)) return (lname, alias, science_support, user_mods) diff --git a/scripts/lib/CIME/case.py b/scripts/lib/CIME/case.py index e1ff041ce914..d7e051021c06 100644 --- a/scripts/lib/CIME/case.py +++ b/scripts/lib/CIME/case.py @@ -969,8 +969,10 @@ def apply_user_mods(self, user_mods_dir=None): User mods can be specified on the create_newcase command line (usually when called from create test) or they can be in the compset definition, or both. """ - - for user_mods in (user_mods_dir, self._user_mods): + + # This looping order will lead to the specified user_mods_dir taking + # precedence over self._user_mods, if there are any conflicts. + for user_mods in (self._user_mods, user_mods_dir): if user_mods is not None: if os.path.isabs(user_mods): user_mods_path = user_mods diff --git a/scripts/lib/CIME/user_mod_support.py b/scripts/lib/CIME/user_mod_support.py index 572910e43b22..d27c7255542d 100644 --- a/scripts/lib/CIME/user_mod_support.py +++ b/scripts/lib/CIME/user_mod_support.py @@ -14,6 +14,9 @@ def apply_user_mods(caseroot, user_mods_path): updating SourceMods and creating case shell_commands and xmlchange_cmds files First remove case shell_commands files if any already exist + + If this function is called multiple times, settings from later calls will + take precedence over earlier calls, if there are conflicts. ''' case_shell_command_files = [os.path.join(caseroot,"shell_commands"), os.path.join(caseroot,"xmlchange_cmnds")] @@ -22,6 +25,13 @@ def apply_user_mods(caseroot, user_mods_path): os.remove(shell_command_file) include_dirs = build_include_dirs_list(user_mods_path) + # If a user_mods dir 'foo' includes 'bar', the include_dirs list returned + # from build_include_dirs has 'foo' before 'bar'. But with the below code, + # directories that occur later in the list take precedence over the earlier + # ones, and we want 'foo' to take precedence over 'bar' in this case (in + # general: we want a given user_mods directory to take precedence over any + # mods that it includes). So we reverse include_dirs to accomplish this. + include_dirs.reverse() logger.debug("include_dirs are %s"%include_dirs) for include_dir in include_dirs: # write user_nl_xxx file in caseroot @@ -31,7 +41,11 @@ def apply_user_mods(caseroot, user_mods_path): if len(newcontents) == 0: continue case_user_nl = user_nl.replace(include_dir, caseroot) - update_user_nl_file(case_user_nl, newcontents) + # If the same variable is set twice in a user_nl file, the later one + # takes precedence. So by appending the new contents, later entries + # in the include_dirs list take precedence over earlier entries. + with open(case_user_nl, "a") as fd: + fd.write(newcontents) # update SourceMods in caseroot for root, _, files in os.walk(include_dir,followlinks=True,topdown=False): @@ -39,21 +53,18 @@ def apply_user_mods(caseroot, user_mods_path): for sfile in files: source_mods = os.path.join(root,sfile) case_source_mods = source_mods.replace(include_dir, caseroot) + # We overwrite any existing SourceMods file so that later + # include_dirs take precedence over earlier ones if os.path.isfile(case_source_mods): - logger.warn("Refusing to overwrite existing SourceMods in %s"%case_source_mods) + logger.warn("WARNING: Overwriting existing SourceMods in %s"%case_source_mods) else: logger.info("Adding SourceMod to case %s"%case_source_mods) - try: - shutil.copyfile(source_mods, case_source_mods) - except: - expect(False, "Could not write file %s in caseroot %s" - %(case_source_mods,caseroot)) + try: + shutil.copyfile(source_mods, case_source_mods) + except: + expect(False, "Could not write file %s in caseroot %s" + %(case_source_mods,caseroot)) - # Reverse include_dirs to make sure xmlchange commands are called in the - # correct order; it may be desireable to reverse include_dirs above the - # previous loop and then append user_nl changes rather than prepend them. - include_dirs.reverse() - for include_dir in include_dirs: # create xmlchange_cmnds and shell_commands in caseroot shell_command_files = glob.glob(os.path.join(include_dir,"shell_commands")) +\ glob.glob(os.path.join(include_dir,"xmlchange_cmnds")) @@ -70,6 +81,8 @@ def apply_user_mods(caseroot, user_mods_path): shell_commands_file) with open(shell_commands_file,"r") as fd: new_shell_commands = fd.read().replace("xmlchange","xmlchange --force") + # By appending the new commands to the end, settings from later + # include_dirs take precedence over earlier ones with open(case_shell_commands, "a") as fd: fd.write(new_shell_commands) @@ -78,16 +91,6 @@ def apply_user_mods(caseroot, user_mods_path): os.chmod(shell_command_file, 0777) run_cmd_no_fail(shell_command_file) -def update_user_nl_file(case_user_nl, contents): - if os.path.isfile(case_user_nl): - with open(case_user_nl, "r") as fd: - old_contents = fd.read() - contents = contents + old_contents - logger.debug("Pre-pending file %s"%(case_user_nl)) - with open(case_user_nl, "w") as fd: - fd.write(contents) - - def build_include_dirs_list(user_mods_path, include_dirs=None): ''' From 4df9f7eb0a1d6ff151c6bbc79793d6ef0eb546ab Mon Sep 17 00:00:00 2001 From: Mariana Vertenstein Date: Sat, 15 Apr 2017 11:04:56 -0600 Subject: [PATCH 054/219] fixed issue with using new user_mods element in compset definition --- scripts/lib/CIME/XML/compsets.py | 2 -- scripts/lib/CIME/case.py | 9 +++++---- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/scripts/lib/CIME/XML/compsets.py b/scripts/lib/CIME/XML/compsets.py index f5b85d8a5d2a..2d96aaed3e68 100644 --- a/scripts/lib/CIME/XML/compsets.py +++ b/scripts/lib/CIME/XML/compsets.py @@ -37,13 +37,11 @@ def get_compset_match(self, name): science_support_nodes = self.get_nodes("science_support", root=node) for node in science_support_nodes: science_support.append(node.get("grid")) - user_mods_node = self.get_optional_node("user_mods", root=node) if user_mods_node is not None: user_mods = user_mods_node.text else: user_mods = None - logger.debug("Found node match with alias: %s and lname: %s" % (alias, lname)) return (lname, alias, science_support, user_mods) return (None, None, [False], None) diff --git a/scripts/lib/CIME/case.py b/scripts/lib/CIME/case.py index d7e051021c06..38ef14a3e46d 100644 --- a/scripts/lib/CIME/case.py +++ b/scripts/lib/CIME/case.py @@ -964,12 +964,16 @@ def create_caseroot(self, clone=False): self._create_caseroot_sourcemods() self._create_caseroot_tools() + # Apply user_mods if part of compset + if self._user_mods is not None: + self._user_mods = self.get_resolved_value(self._user_mods) + self.apply_user_mods() + def apply_user_mods(self, user_mods_dir=None): """ User mods can be specified on the create_newcase command line (usually when called from create test) or they can be in the compset definition, or both. """ - # This looping order will lead to the specified user_mods_dir taking # precedence over self._user_mods, if there are any conflicts. for user_mods in (self._user_mods, user_mods_dir): @@ -981,9 +985,6 @@ def apply_user_mods(self, user_mods_dir=None): user_mods_path = os.path.join(user_mods_path, user_mods) apply_user_mods(self._caseroot, user_mods_path) - - - def create_clone(self, newcase, keepexe=False, mach_dir=None, project=None, cime_output_root=None): if cime_output_root is None: cime_output_root = self.get_value("CIME_OUTPUT_ROOT") From 4f6bb99fef2410c246e1c93b621f4ff98550d890 Mon Sep 17 00:00:00 2001 From: Bill Sacks Date: Sun, 16 Apr 2017 06:12:03 -0600 Subject: [PATCH 055/219] Add unit tests for user_mod_support --- .../lib/CIME/tests/test_user_mod_support.py | 171 ++++++++++++++++++ 1 file changed, 171 insertions(+) create mode 100644 scripts/lib/CIME/tests/test_user_mod_support.py diff --git a/scripts/lib/CIME/tests/test_user_mod_support.py b/scripts/lib/CIME/tests/test_user_mod_support.py new file mode 100644 index 000000000000..c8e582f01b17 --- /dev/null +++ b/scripts/lib/CIME/tests/test_user_mod_support.py @@ -0,0 +1,171 @@ +#!/usr/bin/env python + +import unittest +import shutil +import tempfile +import os +from CIME.user_mod_support import apply_user_mods + +# ======================================================================== +# Define some parameters +# ======================================================================== + +_SOURCEMODS = os.path.join("SourceMods", "src.drv") + +class TestUserModSupport(unittest.TestCase): + + # ======================================================================== + # Test helper functions + # ======================================================================== + + def setUp(self): + self._caseroot = tempfile.mkdtemp() + self._caseroot_sourcemods = os.path.join(self._caseroot, _SOURCEMODS) + os.makedirs(self._caseroot_sourcemods) + self._user_mods_parent_dir = tempfile.mkdtemp() + + def tearDown(self): + shutil.rmtree(self._caseroot, ignore_errors=True) + shutil.rmtree(self._user_mods_parent_dir, ignore_errors=True) + + def createUserMod(self, name, include_dirs=None): + """Create a user_mods directory with the given name. + + This directory is created within self._user_mods_parent_dir + + For name='foo', it will contain: + + - A user_nl_cpl file with contents: + foo + + - A shell_commands file with contents: + echo foo >> /PATH/TO/CASEROOT/shell_commands_result + + - A file in _SOURCEMODS named myfile.F90 with contents: + foo + + If include_dirs is given, it should be a list of strings, giving names + of other user_mods directories to include. e.g., if include_dirs is + ['foo1', 'foo2'], then this will create a file 'include_user_mods' that + contains paths to the 'foo1' and 'foo2' user_mods directories, one per + line. + """ + + mod_dir = os.path.join(self._user_mods_parent_dir, name) + os.makedirs(mod_dir) + mod_dir_sourcemods = os.path.join(mod_dir, _SOURCEMODS) + os.makedirs(mod_dir_sourcemods) + + with open(os.path.join(mod_dir, "user_nl_cpl"), "w") as user_nl_cpl: + user_nl_cpl.write(name + "\n") + with open(os.path.join(mod_dir, "shell_commands"), "w") as shell_commands: + command = "echo %s >> %s/shell_commands_result\n"%(name, + self._caseroot) + shell_commands.write(command) + with open(os.path.join(mod_dir_sourcemods, "myfile.F90"), "w") as f90_file: + f90_file.write(name + "\n") + + if include_dirs: + with open(os.path.join(mod_dir, "include_user_mods"), "w") as include_user_mods: + for one_include in include_dirs: + include_user_mods.write(os.path.join(self._user_mods_parent_dir, one_include) + "\n") + + def assertResults(self, expected_user_nl_cpl, + expected_shell_commands_result, + expected_sourcemod, + msg = ""): + """Asserts that the contents of the files in self._caseroot match expectations + + If msg is provided, it is printed for some failing assertions + """ + + path_to_user_nl_cpl = os.path.join(self._caseroot, "user_nl_cpl") + self.assertTrue(os.path.isfile(path_to_user_nl_cpl), + msg = msg + ": user_nl_cpl does not exist") + with open(path_to_user_nl_cpl, "r") as user_nl_cpl: + contents = user_nl_cpl.read() + self.assertEqual(expected_user_nl_cpl, contents) + + path_to_shell_commands_result = os.path.join(self._caseroot, "shell_commands_result") + self.assertTrue(os.path.isfile(path_to_shell_commands_result), + msg = msg + ": shell_commands_result does not exist") + with open(path_to_shell_commands_result, "r") as shell_commands_result: + contents = shell_commands_result.read() + self.assertEqual(expected_shell_commands_result, contents) + + path_to_sourcemod = os.path.join(self._caseroot_sourcemods, "myfile.F90") + self.assertTrue(os.path.isfile(path_to_sourcemod), + msg = msg + ": sourcemod file does not exist") + with open(path_to_sourcemod, "r") as sourcemod: + contents = sourcemod.read() + self.assertEqual(expected_sourcemod, contents) + + # ======================================================================== + # Begin actual tests + # ======================================================================== + + def test_basic(self): + self.createUserMod("foo") + apply_user_mods(self._caseroot, + os.path.join(self._user_mods_parent_dir, "foo")) + self.assertResults(expected_user_nl_cpl = "foo\n", + expected_shell_commands_result = "foo\n", + expected_sourcemod = "foo\n", + msg = "test_basic") + + def test_two_applications(self): + """If apply_user_mods is called twice, the second should appear after the first so that it takes precedence.""" + + self.createUserMod("foo1") + self.createUserMod("foo2") + apply_user_mods(self._caseroot, + os.path.join(self._user_mods_parent_dir, "foo1")) + apply_user_mods(self._caseroot, + os.path.join(self._user_mods_parent_dir, "foo2")) + self.assertResults(expected_user_nl_cpl = "foo1\nfoo2\n", + expected_shell_commands_result = "foo1\nfoo2\n", + expected_sourcemod = "foo2\n", + msg = "test_two_applications") + + def test_include(self): + """If there is an included mod, the main one should appear after the included one so that it takes precedence.""" + + self.createUserMod("base") + self.createUserMod("derived", include_dirs=["base"]) + + apply_user_mods(self._caseroot, + os.path.join(self._user_mods_parent_dir, "derived")) + + self.assertResults(expected_user_nl_cpl = "base\nderived\n", + expected_shell_commands_result = "base\nderived\n", + expected_sourcemod = "derived\n", + msg = "test_include") + + def test_duplicate_includes(self): + """Test multiple includes, where both include the same base mod. + + The base mod should only be included once. + """ + + self.createUserMod("base") + self.createUserMod("derived1", include_dirs=["base"]) + self.createUserMod("derived2", include_dirs=["base"]) + self.createUserMod("derived_combo", + include_dirs = ["derived1", "derived2"]) + + apply_user_mods(self._caseroot, + os.path.join(self._user_mods_parent_dir, "derived_combo")) + + # NOTE(wjs, 2017-04-15) The ordering of derived1 vs. derived2 is not + # critical here: If this aspect of the behavior changes, the + # expected_contents can be changed to match the new behavior in this + # respect. + expected_contents = """base +derived2 +derived1 +derived_combo +""" + self.assertResults(expected_user_nl_cpl = expected_contents, + expected_shell_commands_result = expected_contents, + expected_sourcemod = "derived_combo\n", + msg = "test_duplicate_includes") From fc4ac0b488c178b85097a779fcf5c8c8c7f473ba Mon Sep 17 00:00:00 2001 From: Bill Sacks Date: Sun, 16 Apr 2017 20:40:27 -0600 Subject: [PATCH 056/219] fix typo --- scripts/lib/CIME/case.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/lib/CIME/case.py b/scripts/lib/CIME/case.py index 38ef14a3e46d..8532bf928c89 100644 --- a/scripts/lib/CIME/case.py +++ b/scripts/lib/CIME/case.py @@ -96,7 +96,7 @@ def __init__(self, case_root=None, read_only=True): self._component_classes = [] self._is_env_loaded = False # these are user_mods as defined in the compset - # Command Line user_mods are handeled seperately + # Command Line user_mods are handled seperately self._user_mods = None self.thread_count = None self.total_tasks = None From 50e5d7a609ae4abf5fd1589d1ef147c10d1b7f2a Mon Sep 17 00:00:00 2001 From: Bill Sacks Date: Sun, 16 Apr 2017 21:20:55 -0600 Subject: [PATCH 057/219] Fix timing of applying compset user_mods The previous implementation had two problems: 1. If you specified a user_mods on the command-line along with a compset that has its own user_mods, then the compset's user_mods get applied twice. 2. The new place where there was a call to apply_user_mods happened too early: xmlchange commands can not be done at that point. This fixes these problems. I have tested this with the same changes described in 9cc7740ca19723a3a7b056f0e63e7abc28450132. I tested create_newcase with no user_mods, user_mods just from the command line, user_mods just from the compset, and user_mods from the command line and the compset. --- scripts/create_newcase | 2 +- scripts/lib/CIME/case.py | 13 +++++++------ 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/scripts/create_newcase b/scripts/create_newcase index f1877cd76e5a..a28f8c8d1292 100755 --- a/scripts/create_newcase +++ b/scripts/create_newcase @@ -195,7 +195,7 @@ def _main_func(description): if user_mods_dir is not None: if os.path.isdir(user_mods_dir): user_mods_dir = os.path.abspath(user_mods_dir) - case.apply_user_mods(user_mods_dir) + case.apply_user_mods(user_mods_dir) # Lock env_case.xml lock_file("env_case.xml", caseroot) diff --git a/scripts/lib/CIME/case.py b/scripts/lib/CIME/case.py index 8532bf928c89..71432f0b840e 100644 --- a/scripts/lib/CIME/case.py +++ b/scripts/lib/CIME/case.py @@ -964,19 +964,20 @@ def create_caseroot(self, clone=False): self._create_caseroot_sourcemods() self._create_caseroot_tools() - # Apply user_mods if part of compset - if self._user_mods is not None: - self._user_mods = self.get_resolved_value(self._user_mods) - self.apply_user_mods() - def apply_user_mods(self, user_mods_dir=None): """ User mods can be specified on the create_newcase command line (usually when called from create test) or they can be in the compset definition, or both. """ + + if self._user_mods is None: + compset_user_mods_resolved = None + else: + compset_user_mods_resolved = self.get_resolved_value(self._user_mods) + # This looping order will lead to the specified user_mods_dir taking # precedence over self._user_mods, if there are any conflicts. - for user_mods in (self._user_mods, user_mods_dir): + for user_mods in (compset_user_mods_resolved, user_mods_dir): if user_mods is not None: if os.path.isabs(user_mods): user_mods_path = user_mods From 30755760ebcff3c002ad53251cc1c6f4e525ac6a Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Mon, 17 Apr 2017 09:43:01 -0600 Subject: [PATCH 058/219] add comment to README and stdout --- scripts/lib/CIME/case.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/scripts/lib/CIME/case.py b/scripts/lib/CIME/case.py index 71432f0b840e..6015b9aa6edf 100644 --- a/scripts/lib/CIME/case.py +++ b/scripts/lib/CIME/case.py @@ -960,6 +960,10 @@ def create_caseroot(self, clone=False): comp_grid = "%s_GRID"%component_class append_status("%s is %s"%(comp_grid,self.get_value(comp_grid)), "README.case", caseroot=self._caseroot) + if self._user_mods is not None: + note = "This compset includes user_mods %s"%self._user_mods + append_status(note, "README.case", caseroot=self._caseroot) + logger.info(note) if not clone: self._create_caseroot_sourcemods() self._create_caseroot_tools() From 0266e9d994a44cc9f6a4b91659c71c8dca5f16b1 Mon Sep 17 00:00:00 2001 From: Bill Sacks Date: Mon, 17 Apr 2017 10:05:16 -0600 Subject: [PATCH 059/219] Print user_mods directory upfront Point is: I want to make it hard to miss --- scripts/lib/CIME/case.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/scripts/lib/CIME/case.py b/scripts/lib/CIME/case.py index 597876a4e8ae..351e6f4a43a5 100644 --- a/scripts/lib/CIME/case.py +++ b/scripts/lib/CIME/case.py @@ -437,7 +437,10 @@ def _set_compset_and_pesfile(self, compset_name, files, user_compset=False, pesf self.set_lookup_value("USER_MODS_DIR" , user_mods_dir) self.set_lookup_value("PES_SPEC_FILE" , files.get_value("PES_SPEC_FILE" , {"component":component}, resolved=False)) - logger.info("Compset longname is %s " %(match)) + compset_info = "Compset longname is %s"%(match) + if self._user_mods is not None: + compset_info += " with user_mods directory %s"%(self._user_mods) + logger.info(compset_info) logger.info("Compset specification file is %s" %(compsets_filename)) logger.info("Pes specification file is %s" %(pesfile)) return compset_alias, science_support @@ -935,7 +938,10 @@ def create_caseroot(self, clone=False): # Open a new README.case file in $self._caseroot append_status(" ".join(sys.argv), "README.case", caseroot=self._caseroot) - append_status("Compset longname is %s"%self.get_value("COMPSET"), + compset_info = "Compset longname is %s"%(self.get_value("COMPSET")) + if self._user_mods is not None: + compset_info += " with user_mods directory %s"%(self._user_mods) + append_status(compset_info, "README.case", caseroot=self._caseroot) append_status("Compset specification file is %s" % (self.get_value("COMPSETS_SPEC_FILE")), @@ -949,10 +955,6 @@ def create_caseroot(self, clone=False): comp_grid = "%s_GRID"%component_class append_status("%s is %s"%(comp_grid,self.get_value(comp_grid)), "README.case", caseroot=self._caseroot) - if self._user_mods is not None: - note = "This compset includes user_mods %s"%self._user_mods - append_status(note, "README.case", caseroot=self._caseroot) - logger.info(note) if not clone: self._create_caseroot_sourcemods() self._create_caseroot_tools() From 09190ebd7ee1ae802dff70bb5abde7f58d2bb899 Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Mon, 17 Apr 2017 09:53:57 -0700 Subject: [PATCH 060/219] fix typo in cori-knl binding --- config/cesm/machines/config_machines.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/cesm/machines/config_machines.xml b/config/cesm/machines/config_machines.xml index 4b0044680cec..f07a9ee7942a 100644 --- a/config/cesm/machines/config_machines.xml +++ b/config/cesm/machines/config_machines.xml @@ -491,7 +491,7 @@ --label -n $TOTALPES - -c 4 --cpu-bind=cores + -c 4 --cpu_bind=cores From a97da52df492d3db97346744fc457b1398aa8035 Mon Sep 17 00:00:00 2001 From: James Foucar Date: Mon, 17 Apr 2017 13:13:56 -0600 Subject: [PATCH 061/219] Fix NODEFAIL test on cheyenne. When the switch is made from running a real exe to a script, an env var must be set in order for MPI to be able to run the script on cheyenne. --- scripts/lib/CIME/SystemTests/nodefail.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/scripts/lib/CIME/SystemTests/nodefail.py b/scripts/lib/CIME/SystemTests/nodefail.py index 2f0436a870e6..9101a5579970 100644 --- a/scripts/lib/CIME/SystemTests/nodefail.py +++ b/scripts/lib/CIME/SystemTests/nodefail.py @@ -56,8 +56,16 @@ def _restart_fake_phase(self): env_mach_specific.set_value("run_exe", fake_exe_file) self._case.flush(flushall=True) + # This flag is needed by mpt to run a script under mpiexec + mpilib = self._case.get_value("MPILIB") + if mpilib == "mpt": + os.environ["MPI_SHEPHERD"] = "true" + self.run_indv(suffix=None) + if mpilib == "mpt": + del os.environ["MPI_SHEPHERD"] + env_mach_specific = self._case.get_env("mach_specific") env_mach_specific.set_value("run_exe", prev_run_exe) self._case.flush(flushall=True) From 1f72a2ef087a3c8b3857669af78319f01217e471 Mon Sep 17 00:00:00 2001 From: Bill Sacks Date: Mon, 17 Apr 2017 15:09:43 -0600 Subject: [PATCH 062/219] Put explicit dot in paths in some instructions Some instructions have you source .env_mach_specific.sh. If you have some other .env_mach_specific.sh in your path (e.g., from pointing to a cprnc directory), this one can get picked up in place of the one in the current directory. Add an explicit ./ before the file to fix this problem. --- tools/cprnc/README | 4 ++-- tools/mapping/gen_domain_files/INSTALL | 4 ++-- tools/mapping/gen_mapping_files/runoff_to_ocn/INSTALL | 4 ++-- tools/mapping/map_field/INSTALL | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/tools/cprnc/README b/tools/cprnc/README index 1ac0fd2e0902..eceb705aafc5 100644 --- a/tools/cprnc/README +++ b/tools/cprnc/README @@ -19,14 +19,14 @@ configure. Next, run make to build cprnc. For instance, using sh/bash as a login shell: -CIMEROOT=../.. source .env_mach_specific.sh && make +CIMEROOT=../.. source ./.env_mach_specific.sh && make Finally, put the resulting executable in CCSM_CPRNC as defined in config_machines.xml. You can also build cprnc using cmake: export CIMEROOT=../.. -MPILIB=mpi-serial source .env_mach_specific.sh +MPILIB=mpi-serial source ./.env_mach_specific.sh ../configure --macros-format=CMake Next run cmake . to build the Makefile and then diff --git a/tools/mapping/gen_domain_files/INSTALL b/tools/mapping/gen_domain_files/INSTALL index a7b97822f2ee..3b253d264d7b 100644 --- a/tools/mapping/gen_domain_files/INSTALL +++ b/tools/mapping/gen_domain_files/INSTALL @@ -7,9 +7,9 @@ Prior to building, you must make sure environment variables CIMEROOT and CIME_MO (1) $ cd src (2) $ $CIMEROOT/tools/configure --machine [machine name] --macros-format Makefile (3) Bash users: - $ . .env_mach_specific.sh + $ . ./.env_mach_specific.sh csh users: - $ source .env_mach_specific.csh + $ source ./.env_mach_specific.csh (4) $ gmake Note: in the second step, replace [machine name] with the machine you are diff --git a/tools/mapping/gen_mapping_files/runoff_to_ocn/INSTALL b/tools/mapping/gen_mapping_files/runoff_to_ocn/INSTALL index d57d7b49545b..c4af56772286 100644 --- a/tools/mapping/gen_mapping_files/runoff_to_ocn/INSTALL +++ b/tools/mapping/gen_mapping_files/runoff_to_ocn/INSTALL @@ -5,9 +5,9 @@ HOW TO BUILD (1) $ cd src (2) $ ../../../../configure --macros-format Makefile --mpilib mpi-serial Bash users: -(3) $ (. .env_mach_specific.sh ; gmake) +(3) $ (. ./.env_mach_specific.sh ; gmake) csh users: -(3) $ (source .env_mach_specific.csh ; gmake) +(3) $ (source ./.env_mach_specific.csh ; gmake) Note: in the second step, you may need to include "--machine [machine name]", where [machine name] is the name of the machine you are building on. In most diff --git a/tools/mapping/map_field/INSTALL b/tools/mapping/map_field/INSTALL index 766c76e3fec0..bf9508417f2a 100644 --- a/tools/mapping/map_field/INSTALL +++ b/tools/mapping/map_field/INSTALL @@ -7,9 +7,9 @@ Prior to building, you must make sure $CIMEROOT is set. (1) $ cd src (2) $ $CIMEROOT/tools/configure macros-format Makefile (3) Bash users: - $ . .env_mach_specific.sh + $ . ./.env_mach_specific.sh csh users: - $ source .env_mach_specific.csh + $ source ./.env_mach_specific.csh (4) $ gmake Note: in the second step, replace [machine name] with the machine you are From fbcb8e8f71d44549251d0c0c0480c7532d5f4132 Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Mon, 17 Apr 2017 15:49:33 -0600 Subject: [PATCH 063/219] fix indentation error --- scripts/lib/CIME/case_setup.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/lib/CIME/case_setup.py b/scripts/lib/CIME/case_setup.py index a75843cecd66..087c5999b381 100644 --- a/scripts/lib/CIME/case_setup.py +++ b/scripts/lib/CIME/case_setup.py @@ -237,9 +237,9 @@ def adjust_pio_layout(case, new_pio_stride): if pio_stride != new_stride: logger.info("Resetting PIO_STRIDE_%s to %s"%(comp, new_stride)) case.set_value("PIO_STRIDE_%s"%comp, new_stride) - if pio_numtasks != new_numtasks: - logger.info("Resetting PIO_NUMTASKS_%s to %s"%(comp, new_numtasks)) - case.set_value("PIO_NUMTASKS_%s"%comp, new_numtasks) + if pio_numtasks != new_numtasks: + logger.info("Resetting PIO_NUMTASKS_%s to %s"%(comp, new_numtasks)) + case.set_value("PIO_NUMTASKS_%s"%comp, new_numtasks) ############################################################################### From 49bf0686b6dcbf2bafd3cdb57ca08e8f95392e54 Mon Sep 17 00:00:00 2001 From: Alice Bertini Date: Tue, 18 Apr 2017 09:46:27 -0600 Subject: [PATCH 064/219] bug fix for dart --- scripts/lib/CIME/case_st_archive.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/scripts/lib/CIME/case_st_archive.py b/scripts/lib/CIME/case_st_archive.py index 75158ee7212b..cfee31dacf96 100644 --- a/scripts/lib/CIME/case_st_archive.py +++ b/scripts/lib/CIME/case_st_archive.py @@ -43,11 +43,11 @@ def _get_ninst_info(case, compclass): ninst_strings = [] if ninst is None: ninst = 1 - for i in range(ninst): - if ninst > 1: - ninst_strings.append('_' + '%04d' % i) - else: - ninst_strings.append('') + for i in range(1,ninst+1): + if ninst > 1: + ninst_strings.append('_' + '%04d' % i) + else: + ninst_strings.append('') logger.debug("ninst and ninst_strings are: %s and %s for %s" %(ninst, ninst_strings, compclass)) return ninst, ninst_strings @@ -120,7 +120,7 @@ def _archive_log_files(case): srcfile = join(rundir, os.path.basename(logfile)) destfile = join(archive_logdir, os.path.basename(logfile)) shutil.move(srcfile, destfile) - logger.info("moving \b%s to \b%s" %(srcfile, destfile)) + logger.info("moving \n%s to \n%s" %(srcfile, destfile)) ############################################################################### @@ -231,7 +231,7 @@ def _archive_restarts(case, archive, archive_entry, for i in range(ninst): restfiles = "" pattern = r"%s\.%s\d*.*" % (casename, compname) - if pattern != "dart": + if "dart" not in pattern: pfile = re.compile(pattern) files = [f for f in os.listdir(rundir) if pfile.search(f)] if ninst_strings: From f4a2804c79231bf66d07584fb4887c4f45710689 Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Tue, 18 Apr 2017 11:17:54 -0600 Subject: [PATCH 065/219] fixes for test to work on cheyenne --- config/cesm/machines/config_machines.xml | 4 +++- config/config_tests.xml | 3 ++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/config/cesm/machines/config_machines.xml b/config/cesm/machines/config_machines.xml index f07a9ee7942a..d0b61ed4bcc8 100644 --- a/config/cesm/machines/config_machines.xml +++ b/config/cesm/machines/config_machines.xml @@ -219,8 +219,10 @@ mpiexec_mpt + -np $TOTALPES -p "%g:" - omplace + + omplace diff --git a/config/config_tests.xml b/config/config_tests.xml index 7532b2509030..ab0dadea2cef 100644 --- a/config/config_tests.xml +++ b/config/config_tests.xml @@ -366,7 +366,8 @@ LII CLM initial condition interpolation test For testing infra only. Tests restart upon detected node failure 1 - ndays + nsteps + $ATM_NCPL 11 $STOP_N / 2 + 1 $STOP_OPTION From bec05af88934f6e5862daff11ad4d534121f7d1c Mon Sep 17 00:00:00 2001 From: James Foucar Date: Tue, 18 Apr 2017 11:21:46 -0600 Subject: [PATCH 066/219] Improve create_test output by dumping case errors directly to screen. BEFORE: Finished MODEL_BUILD for test TESTBUILDFAIL_P1.f19_g16_rx1.A.melvin_gnu in 1.511061 seconds (FAIL). [COMPLETED 2 of 8] Case dir: /home/jgfouca/acme/scratch/TESTBUILDFAIL_P1.f19_g16_rx1.A.melvin_gnu.20170418_112029_6ygq0x AFTER Finished MODEL_BUILD for test TESTBUILDFAIL_P1.f19_g16_rx1.A.melvin_gnu in 1.511061 seconds (FAIL). [COMPLETED 2 of 8] Case dir: /home/jgfouca/acme/scratch/TESTBUILDFAIL_P1.f19_g16_rx1.A.melvin_gnu.20170418_112029_6ygq0x Errors were: Building test for TESTBUILDFAIL in directory /home/jgfouca/acme/scratch/TESTBUILDFAIL_P1.f19_g16_rx1.A.melvin_gnu.20170418_112029_6ygq0x Exception during build: ERROR: Intentional fail for testing infrastructure Traceback (most recent call last): File "/home/jgfouca/cime_yetanother/scripts/Tools/../../scripts/lib/CIME/SystemTests/system_tests_common.py", line 87, in build model_only=(phase_name==MODEL_BUILD_PHASE)) File "/home/jgfouca/cime_yetanother/scripts/Tools/../../scripts/lib/CIME/SystemTests/system_tests_common.py", line 523, in build_phase expect(False, "Intentional fail for testing infrastructure") File "/home/jgfouca/cime_yetanother/scripts/Tools/../../scripts/lib/CIME/utils.py", line 29, in expect raise exc_type("%s %s"%(error_prefix,error_msg)) SystemExit: ERROR: Intentional fail for testing infrastructure --- scripts/lib/CIME/test_scheduler.py | 39 ++++++++++++++++-------------- 1 file changed, 21 insertions(+), 18 deletions(-) diff --git a/scripts/lib/CIME/test_scheduler.py b/scripts/lib/CIME/test_scheduler.py index 2b5305cfcd28..dd97d93d301b 100644 --- a/scripts/lib/CIME/test_scheduler.py +++ b/scripts/lib/CIME/test_scheduler.py @@ -328,28 +328,26 @@ def _update_test_status(self, test, phase, status): def _shell_cmd_for_phase(self, test, cmd, phase, from_dir=None): ########################################################################### while True: - rc, output, _ = run_cmd(cmd, combine_output=True, from_dir=from_dir) + rc, output, errput = run_cmd(cmd, from_dir=from_dir) if rc != 0: self._log_output(test, "%s FAILED for test '%s'.\nCommand: %s\nOutput: %s\n" % - (phase, test, cmd, output)) + (phase, test, cmd, output + "\n" + errput)) # Temporary hack to get around odd file descriptor use by # buildnml scripts. if "bad interpreter" in output: time.sleep(1) continue else: - break + return False, errput else: # We don't want "RUN PASSED" in the TestStatus.log if the only thing that # succeeded was the submission. phase = "SUBMIT" if phase == RUN_PHASE else phase self._log_output(test, "%s PASSED for test '%s'.\nCommand: %s\nOutput: %s\n" % - (phase, test, cmd, output)) - break - - return rc == 0 + (phase, test, cmd, output + "\n" + errput)) + return True, errput ########################################################################### def _create_newcase_phase(self, test): @@ -379,9 +377,12 @@ def _create_newcase_phase(self, test): testmods_dir = files.get_value("TESTS_MODS_DIR", {"component": component}) test_mod_file = os.path.join(testmods_dir, component, modspath) if not os.path.exists(test_mod_file): - self._log_output(test, "Missing testmod file '%s'" % test_mod_file) - return False + error = "Missing testmod file '%s'" % test_mod_file + self._log_output(test, error) + return False, error + create_newcase_cmd += " --user-mods-dir %s" % test_mod_file + mpilib = None if case_opts is not None: for case_opt in case_opts: # pylint: disable=not-an-iterable @@ -539,7 +540,7 @@ def _xml_phase(self, test): if self._input_dir is not None: case.set_value("DIN_LOC_ROOT", self._input_dir) - return True + return True, "" ########################################################################### def _setup_phase(self, test): @@ -548,8 +549,9 @@ def _setup_phase(self, test): rv = self._shell_cmd_for_phase(test, "./case.setup", SETUP_PHASE, from_dir=test_dir) # It's OK for this command to fail with baseline diffs but not catastrophically - cmdstat, output, _ = run_cmd("./case.cmpgen_namelists", combine_output=True, from_dir=test_dir) - expect(cmdstat in [0, TESTS_FAILED_ERR_CODE], "Fatal error in case.cmpgen_namelists: %s" % output) + if rv[0]: + cmdstat, output, _ = run_cmd("./case.cmpgen_namelists", combine_output=True, from_dir=test_dir) + expect(cmdstat in [0, TESTS_FAILED_ERR_CODE], "Fatal error in case.cmpgen_namelists: %s" % output) return rv @@ -583,11 +585,10 @@ def _run_catch_exceptions(self, test, phase, run): return run(test) except (SystemExit, Exception) as e: exc_tb = sys.exc_info()[2] - errput = "Test '%s' failed in phase '%s' with exception '%s'" % (test, phase, str(e)) + errput = "Test '%s' failed in phase '%s' with exception '%s'\n" % (test, phase, str(e)) + errput += traceback.format_tb(exc_tb) self._log_output(test, errput) - logger.warning("Caught exception: %s" % str(e)) - traceback.print_tb(exc_tb) - return False + return False, errput ########################################################################### def _get_procs_needed(self, test, phase, threads_in_flight=None, no_batch=False): @@ -644,7 +645,7 @@ def _update_test_status_file(self, test, test_phase, status): def _consumer(self, test, test_phase, phase_method): ########################################################################### before_time = time.time() - success = self._run_catch_exceptions(test, test_phase, phase_method) + success, errors = self._run_catch_exceptions(test, test_phase, phase_method) elapsed_time = time.time() - before_time status = (TEST_PEND_STATUS if test_phase == RUN_PHASE and not \ self._no_batch else TEST_PASS_STATUS) if success else TEST_FAIL_STATUS @@ -662,7 +663,9 @@ def _consumer(self, test, test_phase, phase_method): (test_phase, test, elapsed_time, status) if not success: - status_str += " Case dir: %s" % self._get_test_dir(test) + status_str += "\n Case dir: %s\n" % self._get_test_dir(test) + status_str += " Errors were:\n %s\n" % "\n ".join(errors.splitlines()) + logger.info(status_str) if test_phase in [CREATE_NEWCASE_PHASE, XML_PHASE]: From cedf8f050ea0b109ff068b52e1c0fb5c9015db5e Mon Sep 17 00:00:00 2001 From: James Foucar Date: Tue, 18 Apr 2017 11:36:26 -0600 Subject: [PATCH 067/219] Add better test documentation --- config/config_tests.xml | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/config/config_tests.xml b/config/config_tests.xml index ab0dadea2cef..5a3c8d1133d4 100644 --- a/config/config_tests.xml +++ b/config/config_tests.xml @@ -166,6 +166,45 @@ PRE pause-resume test: by default a BFB test of pause-resume cycling LII CLM initial condition interpolation test +====================================================================== + Infrastructural tests for CIME. These are used by scripts_regression_tests. + Users won't generally run these. +====================================================================== + + +TESTBUILDFAIL Insta-fail build step. Used to confirm that failed + builds are caught and reported correctly. + +TESTBUILDFAILEXC Insta-fail build step by failing to init. Used to test + correct behavior when exceptions are generated. + +TESTRUNFAIL Insta-fail run step. Used to confirm that model run + failures are caught and reported correctly. + +TESTRUNFAILEXC Insta-fail run step via exception. Used to test correct + correct behavior when exceptions are generated. + +TESTRUNPASS Insta-pass run step. Used to test that run that work + are reported correctly. + +TESTMEMLEAKFAIL Insta-fail memleak step. Used to test that memleaks are + detected and reported correctly. + +TESTMEMLEAKPASS Insta-pass memleak step. Used to test that non-memleaks are + reported correctly. + +TESTRUNDIFF Produces a canned hist file. Env var TESTRUNDIFF_ALTERNATE can + be used to cause a DIFF. Used to check that baseline diffs are + detected and reported correctly. + +TESTTESTDIFF Simulates internal test diff (non baseline). Used to check that + internal comparison failures are detected and reported correctly. + +TESTRUNSLOWPASS After 5 minutes of sleep, pass run step. Used to test timeouts + and kills. + +NODEFAIL Tests restart upon detected node failure. Generates fake failures, + the number of which is controlled by NODEFAIL_NUM_FAILS. --> From 81435ace9822dd6ef1008c535d8c3e90dde0050e Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Tue, 18 Apr 2017 12:37:31 -0600 Subject: [PATCH 068/219] pio fixes for fortran unit tests --- src/share/unit_test_stubs/pio/pio.F90.in | 15 ++++++++++++++- src/share/util/shr_pio_mod.F90 | 13 +++++++++++++ 2 files changed, 27 insertions(+), 1 deletion(-) diff --git a/src/share/unit_test_stubs/pio/pio.F90.in b/src/share/unit_test_stubs/pio/pio.F90.in index 9aafa3550dc6..a855889396bd 100644 --- a/src/share/unit_test_stubs/pio/pio.F90.in +++ b/src/share/unit_test_stubs/pio/pio.F90.in @@ -87,8 +87,9 @@ module pio integer,parameter,public :: PIO_rearr_comm_fc_1d_io2comp = 2 integer,parameter,public :: PIO_rearr_comm_fc_2d_disable = 3 integer, public, parameter :: PIO_REARR_COMM_UNLIMITED_PEND_REQ = -1 + integer, public, parameter :: PIO_NOERR=0 - + public :: PIO_set_rearr_opts public :: PIO_def_dim public :: PIO_enddef public :: PIO_FILE_IS_OPEN @@ -273,6 +274,18 @@ contains type (io_desc_t) :: iodesc end subroutine freedecomp_file + integer function PIO_set_rearr_opts(iosystem, comm_type, fcd,& + enable_hs_c2i, enable_isend_c2i,& + max_pend_req_c2i,& + enable_hs_i2c, enable_isend_i2c,& + max_pend_req_i2c) result(ierr) + type (iosystem_desc_t), intent(inout) :: iosystem + integer, intent(in) :: comm_type, fcd + logical, intent(in) :: enable_hs_c2i, enable_hs_i2c + logical, intent(in) :: enable_isend_c2i, enable_isend_i2c + integer, intent(in) :: max_pend_req_c2i, max_pend_req_i2c + end function PIO_set_rearr_opts + integer function get_att_desc_{TYPE} (File,varDesc,name,value) result(ierr) type (File_desc_t), intent(inout) , target :: File type (VAR_desc_t), intent(in) :: varDesc diff --git a/src/share/util/shr_pio_mod.F90 b/src/share/util/shr_pio_mod.F90 index 2c11bb9b98e1..d4aa4b0dcff5 100644 --- a/src/share/util/shr_pio_mod.F90 +++ b/src/share/util/shr_pio_mod.F90 @@ -428,6 +428,19 @@ subroutine shr_pio_read_default_namelist(nlfilename, Comm, pio_stride, pio_root, pio_async_interface = .false. ! pio tasks are a subset of component tasks pio_rearranger = PIO_REARR_SUBSET + pio_rearr_comm_type = 'p2p' + pio_rearr_comm_fcd = '2denable' + pio_rearr_comm_max_pend_req_comp2io = 0 + pio_rearr_comm_enable_hs_comp2io = .true. + pio_rearr_comm_enable_isend_comp2io = .false. + pio_rearr_comm_max_pend_req_io2comp = 0 + pio_rearr_comm_enable_hs_io2comp = .true. + pio_rearr_comm_enable_isend_io2comp = .false. + + + + + if(iamroot) then unitn=shr_file_getunit() open( unitn, file=trim(nlfilename), status='old' , iostat=ierr) From 68a0556f1bf44fb8e9fcf26ec66d10465c08da7a Mon Sep 17 00:00:00 2001 From: Bill Sacks Date: Tue, 18 Apr 2017 15:29:03 -0600 Subject: [PATCH 069/219] Add a stub for seq_timemgr_mod.F90 Fortran unit tests now pass on my mac --- .../mct/unit_test/stubs/CMakeLists.txt | 1 + .../mct/unit_test/stubs/seq_timemgr_mod.F90 | 19 +++++++++++++++++++ 2 files changed, 20 insertions(+) create mode 100644 src/drivers/mct/unit_test/stubs/seq_timemgr_mod.F90 diff --git a/src/drivers/mct/unit_test/stubs/CMakeLists.txt b/src/drivers/mct/unit_test/stubs/CMakeLists.txt index 4e0db12da1a9..a3097917ed99 100644 --- a/src/drivers/mct/unit_test/stubs/CMakeLists.txt +++ b/src/drivers/mct/unit_test/stubs/CMakeLists.txt @@ -1,4 +1,5 @@ list(APPEND drv_sources + seq_timemgr_mod.F90 vertical_gradient_calculator_constant.F90 ) diff --git a/src/drivers/mct/unit_test/stubs/seq_timemgr_mod.F90 b/src/drivers/mct/unit_test/stubs/seq_timemgr_mod.F90 new file mode 100644 index 000000000000..f88a96d2da8c --- /dev/null +++ b/src/drivers/mct/unit_test/stubs/seq_timemgr_mod.F90 @@ -0,0 +1,19 @@ +module seq_timemgr_mod + + ! Stub for routines from seq_timemgr_mod that are needed by other modules built by the + ! unit tests. + + implicit none + private + + public :: seq_timemgr_pause_active + +contains + + logical function seq_timemgr_pause_active() + ! Stub for seq_timemgr_pause_active - always returns .false. + + seq_timemgr_pause_active = .false. + end function seq_timemgr_pause_active + +end module seq_timemgr_mod From 19f35f724a4d0776511f99187d1ac71e9e2b389a Mon Sep 17 00:00:00 2001 From: Bill Sacks Date: Tue, 18 Apr 2017 22:02:33 -0600 Subject: [PATCH 070/219] Get Fortran unit tests running on cheyenne I'm able to build and run the unit tests on cheyenne now by getting an interactive job on the share queue: qsub -I -l select=1:ncpus=1:mpiprocs=1 -l walltime=01:00:00 -q share -A P93300601 and then running the command in README.unit_testing: tools/unit_testing/run_tests.py --build-dir `mktemp -d ./unit_tests.XXXXXXXX` A couple of notes: - I pass unit_testing as an argument to the EnvMachSpecific constructor to avoid having to pass it all over between various methods; this also keeps clients simpler, I think. However, this could be made an argument to the relevant methods, if need be (similarly to compiler, debug and mpilib) - changes in run_tests.py: mostly moved ordering of things: need to load the environment before getting the resolved value of mpirun_command. Also added: os.environ["UNIT_TEST_HOST"] = socket.gethostname() --- config/cesm/machines/config_compilers.xml | 1 + config/cesm/machines/config_machines.xml | 14 ++++++++++++ config/xml_schemas/config_machines.xsd | 1 + scripts/lib/CIME/BuildTools/configure.py | 12 ++++++---- scripts/lib/CIME/XML/env_mach_specific.py | 8 ++++++- src/externals/CMake/pFUnit_utils.cmake | 3 ++- tools/unit_testing/run_tests.py | 28 +++++++++++++---------- 7 files changed, 49 insertions(+), 18 deletions(-) diff --git a/config/cesm/machines/config_compilers.xml b/config/cesm/machines/config_compilers.xml index 2c0848f1d519..3a5a0d0471b8 100644 --- a/config/cesm/machines/config_compilers.xml +++ b/config/cesm/machines/config_compilers.xml @@ -992,6 +992,7 @@ using a fortran linker. -DPIO_ENABLE_LOGGING=ON + $ENV{CESMDATAROOT}/tools/pFUnit/pFUnit3.2.8_cheyenne_Intel17.0.1_MPI_openMP FALSE diff --git a/config/cesm/machines/config_machines.xml b/config/cesm/machines/config_machines.xml index f07a9ee7942a..03a0b14660c0 100644 --- a/config/cesm/machines/config_machines.xml +++ b/config/cesm/machines/config_machines.xml @@ -223,6 +223,17 @@ omplace + + + + /opt/sgi/mpt/mpt-2.15/bin/mpirun $ENV{UNIT_TEST_HOST} -np 1 + @@ -262,6 +273,9 @@ 256M 16 + + false + diff --git a/config/xml_schemas/config_machines.xsd b/config/xml_schemas/config_machines.xsd index ee31239c4c04..bad0fb386dc6 100644 --- a/config/xml_schemas/config_machines.xsd +++ b/config/xml_schemas/config_machines.xsd @@ -188,6 +188,7 @@ + diff --git a/scripts/lib/CIME/BuildTools/configure.py b/scripts/lib/CIME/BuildTools/configure.py index b610b087aa3c..26281117df62 100644 --- a/scripts/lib/CIME/BuildTools/configure.py +++ b/scripts/lib/CIME/BuildTools/configure.py @@ -23,7 +23,8 @@ logger = logging.getLogger(__name__) -def configure(machobj, output_dir, macros_format, compiler, mpilib, debug, sysos): +def configure(machobj, output_dir, macros_format, compiler, mpilib, debug, + sysos, unit_testing=False): """Add Macros, Depends, and env_mach_specific files to a directory. Arguments: @@ -34,6 +35,8 @@ def configure(machobj, output_dir, macros_format, compiler, mpilib, debug, sysos compiler - String containing the compiler vendor to configure for. mpilib - String containing the MPI implementation to configure for. debug - Boolean specifying whether debugging options are enabled. + unit_testing - Boolean specifying whether we're running unit tests (as + opposed to a system run) """ # Macros generation. suffixes = {'Makefile': 'make', 'CMake': 'cmake'} @@ -44,7 +47,7 @@ def configure(machobj, output_dir, macros_format, compiler, mpilib, debug, sysos _copy_depends_files(machobj.get_machine_name(), machobj.machines_dir, output_dir, compiler) _generate_env_mach_specific(output_dir, machobj, compiler, mpilib, - debug, sysos) + debug, sysos, unit_testing) def _copy_depends_files(machine_name, machines_dir, output_dir, compiler): """ @@ -64,7 +67,8 @@ def _copy_depends_files(machine_name, machines_dir, output_dir, compiler): shutil.copyfile(dfile, outputdfile) -def _generate_env_mach_specific(output_dir, machobj, compiler, mpilib, debug, sysos): +def _generate_env_mach_specific(output_dir, machobj, compiler, mpilib, debug, + sysos, unit_testing): """ env_mach_specific generation. """ @@ -72,7 +76,7 @@ def _generate_env_mach_specific(output_dir, machobj, compiler, mpilib, debug, sy if os.path.exists(ems_path): logger.warn("%s already exists, delete to replace"%ems_path) return - ems_file = EnvMachSpecific(output_dir) + ems_file = EnvMachSpecific(output_dir, unit_testing=unit_testing) ems_file.populate(machobj) ems_file.write() for shell in ('sh', 'csh'): diff --git a/scripts/lib/CIME/XML/env_mach_specific.py b/scripts/lib/CIME/XML/env_mach_specific.py index b3efbb54bba9..226f199c377e 100644 --- a/scripts/lib/CIME/XML/env_mach_specific.py +++ b/scripts/lib/CIME/XML/env_mach_specific.py @@ -13,12 +13,14 @@ # get_type) otherwise need to implement own functions and make GenericXML parent class class EnvMachSpecific(EnvBase): # pylint: disable=unused-argument - def __init__(self, caseroot, infile="env_mach_specific.xml",components=None): + def __init__(self, caseroot, infile="env_mach_specific.xml", + components=None, unit_testing=False): """ initialize an object interface to file env_mach_specific.xml in the case directory """ fullpath = infile if os.path.isabs(infile) else os.path.join(caseroot, infile) EnvBase.__init__(self, caseroot, fullpath) + self._unit_testing = unit_testing def populate(self, machobj): """Add entries to the file using information from a Machines object.""" @@ -191,6 +193,10 @@ def _match_attribs(self, attribs, compiler, debug, mpilib): elif ("debug" in attribs and not self._match("TRUE" if debug else "FALSE", attribs["debug"].upper())): return False + elif ("unit_testing" in attribs and + not self._match("TRUE" if self._unit_testing else "FALSE", + attribs["unit_testing"].upper())): + return False return True diff --git a/src/externals/CMake/pFUnit_utils.cmake b/src/externals/CMake/pFUnit_utils.cmake index 0089e4989f42..2accabe20e14 100644 --- a/src/externals/CMake/pFUnit_utils.cmake +++ b/src/externals/CMake/pFUnit_utils.cmake @@ -215,7 +215,8 @@ function(create_pFUnit_test test_name executable_name pf_file_list fortran_sourc endif() # Prefix command with an mpirun command - set (MY_COMMAND ${PFUNIT_MPIRUN} ${MY_COMMAND}) + separate_arguments(PFUNIT_MPIRUN_LIST UNIX_COMMAND ${PFUNIT_MPIRUN}) + set (MY_COMMAND ${PFUNIT_MPIRUN_LIST} ${MY_COMMAND}) # Do the work add_pFUnit_executable(${executable_name} "${pf_file_list}" diff --git a/tools/unit_testing/run_tests.py b/tools/unit_testing/run_tests.py index e7a152fe2009..cbee91a8a5be 100755 --- a/tools/unit_testing/run_tests.py +++ b/tools/unit_testing/run_tests.py @@ -13,6 +13,7 @@ from CIME.XML.env_mach_specific import EnvMachSpecific from xml_test_list import TestSuiteSpec, suites_from_xml import subprocess +import socket #================================================= # Standard library modules. #================================================= @@ -277,8 +278,20 @@ def _main(): # Create the environment, and the Macros.cmake file # # - configure(machobj, build_dir, ["CMake"], compiler, mpilib, debug, os_) - machspecific = EnvMachSpecific(build_dir) + configure(machobj, build_dir, ["CMake"], compiler, mpilib, debug, os_, + unit_testing=True) + machspecific = EnvMachSpecific(build_dir, unit_testing=True) + + machspecific.load_env(compiler, debug, mpilib) + os.environ["OS"] = os_ + os.environ["COMPILER"] = compiler + os.environ["DEBUG"] = stringify_bool(debug) + os.environ["MPILIB"] = mpilib + if use_openmp: + os.environ["compile_threaded"] = "true" + os.environ["CC"] = find_executable("mpicc") + os.environ["FC"] = find_executable("mpif90") + os.environ["UNIT_TEST_HOST"] = socket.gethostname() if no_mpirun: mpirun_command = "" @@ -292,18 +305,9 @@ def _main(): # We can get away with specifying case=None since we're using exe_only=True mpirun_command, _ = machspecific.get_mpirun(case=None, attribs=mpi_attribs, exe_only=True) + mpirun_command = machspecific.get_resolved_value(mpirun_command) logger.warn("mpirun command is '%s'"%mpirun_command) - machspecific.load_env(compiler, debug, mpilib) - os.environ["OS"] = os_ - os.environ["COMPILER"] = compiler - os.environ["DEBUG"] = stringify_bool(debug) - os.environ["MPILIB"] = mpilib - if use_openmp: - os.environ["compile_threaded"] = "true" - os.environ["CC"] = find_executable("mpicc") - os.environ["FC"] = find_executable("mpif90") - #================================================= # Run tests. #================================================= From 0eb29e8e6fd0fc3c42573dcc04ec666204a4eb69 Mon Sep 17 00:00:00 2001 From: Bill Sacks Date: Tue, 18 Apr 2017 22:27:38 -0600 Subject: [PATCH 071/219] Generalize logic for when we run N_TestUnitTest Rather than hard-coding that this is only run on yellowstone-intel, instead run this on any machine for which PFUNIT_PATH is defined for the default compiler. --- scripts/tests/scripts_regression_tests.py | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/scripts/tests/scripts_regression_tests.py b/scripts/tests/scripts_regression_tests.py index d084e1dd6120..9f5d9163f09c 100755 --- a/scripts/tests/scripts_regression_tests.py +++ b/scripts/tests/scripts_regression_tests.py @@ -178,13 +178,19 @@ def setUpClass(cls): cls._testroot = os.path.join(TEST_ROOT, 'TestUnitTests') cls._testdirs = [] + def _has_unit_test_support(self): + default_compiler = MACHINE.get_default_compiler() + compiler = Compilers(MACHINE, compiler=default_compiler) + pfunit_path = compiler.get_optional_compiler_node("PFUNIT_PATH") + if pfunit_path is None: + return False + else: + return True + def test_a_unit_test(self): cls = self.__class__ - machine = MACHINE.get_machine_name() - compiler = MACHINE.get_default_compiler() - if (machine != "yellowstone" or compiler != "intel"): - #TODO: get rid of this restriction - self.skipTest("Skipping TestUnitTest - only supported on yellowstone with intel") + if not self._has_unit_test_support(): + self.skipTest("Skipping TestUnitTest - PFUNIT_PATH not found for the default compiler on this machine") test_dir = os.path.join(cls._testroot,"unit_tester_test") cls._testdirs.append(test_dir) os.makedirs(test_dir) @@ -199,11 +205,8 @@ def test_b_cime_f90_unit_tests(self): if (FAST_ONLY): self.skipTest("Skipping slow test") - machine = MACHINE.get_machine_name() - compiler = MACHINE.get_default_compiler() - if (machine != "yellowstone" or compiler != "intel"): - #TODO: get rid of this restriction - self.skipTest("Skipping TestUnitTest - only supported on yellowstone with intel") + if not self._has_unit_test_support(): + self.skipTest("Skipping TestUnitTest - PFUNIT_PATH not found for the default compiler on this machine") test_dir = os.path.join(cls._testroot,"driver_f90_tests") cls._testdirs.append(test_dir) From 085fbfd477e5af71c293b21fcf7699bc4ee25238 Mon Sep 17 00:00:00 2001 From: Bill Sacks Date: Wed, 19 Apr 2017 08:02:30 -0600 Subject: [PATCH 072/219] move a comment --- config/cesm/machines/config_machines.xml | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/config/cesm/machines/config_machines.xml b/config/cesm/machines/config_machines.xml index 03a0b14660c0..58b17abb6907 100644 --- a/config/cesm/machines/config_machines.xml +++ b/config/cesm/machines/config_machines.xml @@ -227,11 +227,6 @@ - /opt/sgi/mpt/mpt-2.15/bin/mpirun $ENV{UNIT_TEST_HOST} -np 1 @@ -274,6 +269,9 @@ 16 + false From 4c5565c040acf4fbfa4d8c469510e64514eee949 Mon Sep 17 00:00:00 2001 From: Bill Sacks Date: Wed, 19 Apr 2017 08:34:09 -0600 Subject: [PATCH 073/219] Change how we determine the compiler I think this method will work better once we want to select between a parallel and serial compiler --- tools/unit_testing/run_tests.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/tools/unit_testing/run_tests.py b/tools/unit_testing/run_tests.py index cbee91a8a5be..ec8d40030fdb 100755 --- a/tools/unit_testing/run_tests.py +++ b/tools/unit_testing/run_tests.py @@ -10,6 +10,7 @@ from CIME.BuildTools.configure import configure from CIME.utils import run_cmd_no_fail, stringify_bool from CIME.XML.machines import Machines +from CIME.XML.compilers import Compilers from CIME.XML.env_mach_specific import EnvMachSpecific from xml_test_list import TestSuiteSpec, suites_from_xml import subprocess @@ -272,6 +273,8 @@ def _main(): compiler = machobj.get_default_compiler() logger.warn("Compiler is %s"%compiler) + compilerobj = Compilers(machobj, compiler=compiler, mpilib=mpilib) + debug = not build_optimized os_ = machobj.get_value("OS") @@ -289,8 +292,8 @@ def _main(): os.environ["MPILIB"] = mpilib if use_openmp: os.environ["compile_threaded"] = "true" - os.environ["CC"] = find_executable("mpicc") - os.environ["FC"] = find_executable("mpif90") + os.environ["CC"] = compilerobj.get_value('MPICC') + os.environ["FC"] = compilerobj.get_value('MPIFC') os.environ["UNIT_TEST_HOST"] = socket.gethostname() if no_mpirun: From 046ea2210919b292dce6af520516985d63e51e12 Mon Sep 17 00:00:00 2001 From: Bill Sacks Date: Wed, 19 Apr 2017 12:11:00 -0600 Subject: [PATCH 074/219] Get unit test build & run working with either serial or mpi pfunit The default is now to do a serial build, with no mpi or openmp support. This can be changed via the --use-mpi and --use-openmp flags. I have tested this on my mac. This required making the following change in my config_compilers.xml: - /usr/local/pfunit/pfunit3.2.8-serial + /usr/local/pfunit/pfunit3.2.8-serial + /usr/local/pfunit/pfunit-mpi I could then build & run in serial mode with tools/unit_testing/run_tests.py --build-dir `mktemp -d ./unit_tests.XXXXXXXX` or in mpi mode with tools/unit_testing/run_tests.py --use-mpi --use-openmp --build-dir `mktemp -d ./unit_tests.XXXXXXXX` --- CMakeLists.txt | 28 +++++- src/drivers/mct/unit_test/CMakeLists.txt | 8 ++ tools/unit_testing/run_tests.py | 107 ++++++++++++++++------- 3 files changed, 107 insertions(+), 36 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 5ed6ad8a06f5..8fbb1b9f63ff 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -21,14 +21,21 @@ include_directories(${NetCDF_C_INCLUDE_DIRS} ${NetCDF_Fortran_INCLUDE_DIRS}) # ------------------------------------------------------------------------ set(MCT_ROOT "${CIME_ROOT}/src/externals/mct") +if (USE_MPI_SERIAL) + set(ENABLE_MPI_SERIAL "--enable-mpiserial") +else() + set(ENABLE_MPI_SERIAL "") +endif() + ExternalProject_add(mct_project PREFIX ${CMAKE_CURRENT_BINARY_DIR} SOURCE_DIR ${MCT_ROOT} BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}/mct - CONFIGURE_COMMAND ${MCT_ROOT}/configure --enable-debugging --prefix=${CMAKE_CURRENT_BINARY_DIR} CFLAGS=${CFLAGS} FCFLAGS=${FFLAGS} SRCDIR=${MCT_ROOT} DEBUG="-g" - BUILD_COMMAND $(MAKE) + CONFIGURE_COMMAND ${MCT_ROOT}/configure ${ENABLE_MPI_SERIAL} --enable-debugging --prefix=${CMAKE_CURRENT_BINARY_DIR} CFLAGS=${CFLAGS} FCFLAGS=${FFLAGS} SRCDIR=${MCT_ROOT} DEBUG="-g" + BUILD_COMMAND $(MAKE) SRCDIR=${MCT_ROOT} # Leave things in rather than "installing", because we have - # no need to move things around inside of the CMake binary directory. + # no need to move things around inside of the CMake binary directory. Also, + # mpi-serial doesn't install properly in the out-of-source build INSTALL_COMMAND : ) # This copy_makefiles step is needed because mct currently doesn't support an @@ -43,12 +50,27 @@ ExternalProject_add_step(mct_project copy_makefiles COMMAND mkdir -p mpeu COMMAND cp -p /mpeu/Makefile mpeu/ ) +if (USE_MPI_SERIAL) + ExternalProject_add_step(mct_project copy_mpi_serial_files + DEPENDEES configure + DEPENDERS build + WORKING_DIRECTORY + COMMAND mkdir -p mpi-serial + COMMAND cp -p /mpi-serial/Makefile mpi-serial/ + COMMAND cp /mpi-serial/mpif.h mpi-serial/ + COMMAND cp /mpi-serial/mpi.h mpi-serial/ + ) +endif() # Tell cmake to look for libraries & mod files here, because this is where we built libraries include_directories(${CMAKE_CURRENT_BINARY_DIR}/mct/mct) include_directories(${CMAKE_CURRENT_BINARY_DIR}/mct/mpeu) link_directories(${CMAKE_CURRENT_BINARY_DIR}/mct/mct) link_directories(${CMAKE_CURRENT_BINARY_DIR}/mct/mpeu) +if (USE_MPI_SERIAL) + include_directories(${CMAKE_CURRENT_BINARY_DIR}/mct/mpi-serial) + link_directories(${CMAKE_CURRENT_BINARY_DIR}/mct/mpi-serial) +endif() # ------------------------------------------------------------------------ # Done MCT build diff --git a/src/drivers/mct/unit_test/CMakeLists.txt b/src/drivers/mct/unit_test/CMakeLists.txt index 82603c37bfd4..a747f9e23a2b 100644 --- a/src/drivers/mct/unit_test/CMakeLists.txt +++ b/src/drivers/mct/unit_test/CMakeLists.txt @@ -11,6 +11,11 @@ add_definitions( -DNUM_COMP_INST_ESP=1 ) +# The following definitions are needed when building with the mpi-serial library +if (USE_MPI_SERIAL) + add_definitions(-DNO_MPI2 -DNO_MPIMOD) +endif() + # Add source directories from stubs. This should be done first, so that in the # case of name collisions, the drv versions take precedence (when there are two # files with the same name, the one added later wins). @@ -46,6 +51,9 @@ include_directories(${CMAKE_CURRENT_BINARY_DIR}) # tests need all of these libraries, but it's easiest just to set the same list # for everyone. set(DRV_UNIT_TEST_LIBS drv;csm_share;esmf_wrf_timemgr;mct;mpeu) +if (USE_MPI_SERIAL) + list(APPEND DRV_UNIT_TEST_LIBS mpi-serial) +endif() list(APPEND DRV_UNIT_TEST_LIBS ${NETCDF_LIBRARIES}) # Add the test directories diff --git a/tools/unit_testing/run_tests.py b/tools/unit_testing/run_tests.py index ec8d40030fdb..2d594e19482b 100755 --- a/tools/unit_testing/run_tests.py +++ b/tools/unit_testing/run_tests.py @@ -8,7 +8,7 @@ from standard_script_setup import * from CIME.BuildTools.configure import configure -from CIME.utils import run_cmd_no_fail, stringify_bool +from CIME.utils import run_cmd_no_fail, stringify_bool, expect from CIME.XML.machines import Machines from CIME.XML.compilers import Compilers from CIME.XML.env_mach_specific import EnvMachSpecific @@ -83,21 +83,24 @@ def parse_command_line(args): help="""Number of processes to use for build.""" ) + parser.add_argument("--use-mpi", action="store_true", + help="""If specified, run unit tests with an mpi-enabled version + of pFUnit, via mpirun. (Default is to use a serial build without + mpirun.) This requires a pFUnit build with MPI support.""") + parser.add_argument("--mpilib", - help="""MPI Library to use in build. - If not specified, use the default for this machine/compiler. - Must match an MPILIB option in config_compilers.xml. - e.g., for yellowstone, can use 'mpich2'.""" + help="""MPI Library to use in build. + If not specified, use the default for this machine/compiler. + Must match an MPILIB option in config_compilers.xml. + e.g., for yellowstone, can use 'mpich2'. + Only relevant if --use-mpi is specified.""" ) - parser.add_argument("--no-mpirun", action="store_true", - help="""If specified, then run executables without an mpirun command""") - parser.add_argument("--mpirun-command", - help="""Command to use to run an MPI executable. - If not specified, uses the default for this machine. - Ignored if --no-mpirun is specified.""" - ) + help="""Command to use to run an MPI executable. + If not specified, uses the default for this machine. + Only relevant if --use-mpi is specified.""" + ) parser.add_argument( "--test-spec-dir", default=".", help="""Location where tests are specified. @@ -116,8 +119,10 @@ def parse_command_line(args): override the command provided by Machines.""" ) parser.add_argument( - "--no-openmp", action="store_true", - help="""Default is to include OPENMP support for tests; this option excludes openmp""" + "--use-openmp", action="store_true", + help="""If specified, include OpenMP support for tests. + (Default is to run without OpenMP support.) This requires a pFUnit build with + OpenMP support.""" ) parser.add_argument( "--xml-test-list", @@ -138,16 +143,19 @@ def parse_command_line(args): return output, args.build_dir, args.build_optimized, args.clean,\ args.cmake_args, args.compiler, args.enable_genf90, args.machine, args.machines_dir,\ - args.make_j, args.mpilib, args.no_mpirun, args.mpirun_command, args.test_spec_dir, args.ctest_args,\ - args.no_openmp, args.xml_test_list, args.verbose + args.make_j, args.use_mpi, args.mpilib, args.mpirun_command, args.test_spec_dir, args.ctest_args,\ + args.use_openmp, args.xml_test_list, args.verbose -def cmake_stage(name, test_spec_dir, build_optimized, mpirun_command, output, cmake_args=None, clean=False, verbose=False, enable_genf90=True, color=True): +def cmake_stage(name, test_spec_dir, build_optimized, use_mpiserial, mpirun_command, output, + cmake_args=None, clean=False, verbose=False, enable_genf90=True, color=True): """Run cmake in the current working directory. Arguments: name - Name for output messages. test_spec_dir - Test specification directory to run CMake on. + use_mpiserial (logical) - If True, we'll tell CMake to include mpi-serial for tests + that need it build_optimized (logical) - If True, we'll build in optimized rather than debug mode """ # Clear CMake cache. @@ -176,6 +184,8 @@ def cmake_stage(name, test_spec_dir, build_optimized, mpirun_command, output, cm "-DCMAKE_BUILD_TYPE="+build_type, "-DPFUNIT_MPIRUN='"+mpirun_command+"'", ] + if use_mpiserial: + cmake_command.append("-DUSE_MPI_SERIAL=ON") if verbose: cmake_command.append("-Wdev") @@ -213,6 +223,26 @@ def make_stage(name, output, make_j, clean=False, verbose=True): run_cmd_no_fail(" ".join(make_command), arg_stdout=None, arg_stderr=subprocess.STDOUT) +def find_pfunit(compilerobj, mpilib, use_openmp): + """Find the pfunit installation we'll be using, and print its path + + Aborts if necessary information cannot be found. + + Args: + - compilerobj: Object of type Compilers + - mpilib: String giving the mpi library we're using + - use_openmp: Boolean + """ + attrs = {"MPILIB": mpilib, + "compile_threaded": "true" if use_openmp else "false" + } + + pfunit_path = compilerobj.get_optional_compiler_node("PFUNIT_PATH", attributes=attrs) + expect(pfunit_path is not None, + """PFUNIT_PATH not found for this machine and compiler, with MPILIB=%s and compile_threaded=%s. +You must specify PFUNIT_PATH in config_compilers.xml, with attributes MPILIB and compile_threaded."""%(mpilib, attrs['compile_threaded'])) + logger.info("Using PFUNIT_PATH: %s"%pfunit_path.text) + #================================================= # Iterate over input suite specs, building the tests. #================================================= @@ -221,12 +251,10 @@ def make_stage(name, output, make_j, clean=False, verbose=True): def _main(): output, build_dir, build_optimized, clean,\ cmake_args, compiler, enable_genf90, machine, machines_dir,\ - make_j, mpilib, no_mpirun, mpirun_command, test_spec_dir, ctest_args,\ - no_openmp, xml_test_list, verbose \ + make_j, use_mpi, mpilib, mpirun_command, test_spec_dir, ctest_args,\ + use_openmp, xml_test_list, verbose \ = parse_command_line(sys.argv) - use_openmp = not no_openmp - #================================================= # Find directory and file paths. #================================================= @@ -254,7 +282,7 @@ def _main(): else: machobj = Machines(machine=machine) -# Create build directory if necessary. + # Create build directory if necessary. build_dir = os.path.abspath(build_dir) if not os.path.isdir(build_dir): @@ -263,18 +291,24 @@ def _main(): # Switch to the build directory. os.chdir(build_dir) -#================================================= -# Functions to perform various stages of build. -#================================================= - if mpilib is None: + #================================================= + # Functions to perform various stages of build. + #================================================= + + if not use_mpi: + mpilib = "mpi-serial" + elif mpilib is None: mpilib = machobj.get_default_MPIlib() - logger.warn("Using mpilib: %s"%mpilib) + logger.info("Using mpilib: %s"%mpilib) + if compiler is None: compiler = machobj.get_default_compiler() - logger.warn("Compiler is %s"%compiler) + logger.info("Compiler is %s"%compiler) compilerobj = Compilers(machobj, compiler=compiler, mpilib=mpilib) + find_pfunit(compilerobj, mpilib=mpilib, use_openmp=use_openmp) + debug = not build_optimized os_ = machobj.get_value("OS") @@ -292,11 +326,17 @@ def _main(): os.environ["MPILIB"] = mpilib if use_openmp: os.environ["compile_threaded"] = "true" - os.environ["CC"] = compilerobj.get_value('MPICC') - os.environ["FC"] = compilerobj.get_value('MPIFC') + else: + os.environ["compile_threaded"] = "false" + if use_mpi: + os.environ["CC"] = compilerobj.get_value('MPICC') + os.environ["FC"] = compilerobj.get_value('MPIFC') + else: + os.environ["CC"] = compilerobj.get_value('SCC') + os.environ["FC"] = compilerobj.get_value('SFC') os.environ["UNIT_TEST_HOST"] = socket.gethostname() - if no_mpirun: + if not use_mpi: mpirun_command = "" elif mpirun_command is None: mpi_attribs = { @@ -309,7 +349,7 @@ def _main(): # We can get away with specifying case=None since we're using exe_only=True mpirun_command, _ = machspecific.get_mpirun(case=None, attribs=mpi_attribs, exe_only=True) mpirun_command = machspecific.get_resolved_value(mpirun_command) - logger.warn("mpirun command is '%s'"%mpirun_command) + logger.info("mpirun command is '%s'"%mpirun_command) #================================================= # Run tests. @@ -335,7 +375,8 @@ def _main(): if not os.path.islink("Macros.cmake"): os.symlink(os.path.join(build_dir,"Macros.cmake"), "Macros.cmake") - cmake_stage(name, directory, build_optimized, mpirun_command, output, verbose=verbose, + use_mpiserial = not use_mpi + cmake_stage(name, directory, build_optimized, use_mpiserial, mpirun_command, output, verbose=verbose, enable_genf90=enable_genf90, cmake_args=cmake_args) make_stage(name, output, make_j, clean=clean, verbose=verbose) From 38e08f38ad9ce43d6d6e0a46aa39458fe2146a3c Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Wed, 19 Apr 2017 12:36:03 -0600 Subject: [PATCH 075/219] add illegal char : to check_name test --- scripts/lib/CIME/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/lib/CIME/utils.py b/scripts/lib/CIME/utils.py index d4c847c628e5..a3f7da17ddae 100644 --- a/scripts/lib/CIME/utils.py +++ b/scripts/lib/CIME/utils.py @@ -45,7 +45,7 @@ def check_name(fullname, additional_chars=None, fullpath=False): True """ - chars = '<>/{}[\]~`@' # pylint: disable=anomalous-backslash-in-string + chars = '<>/{}[\]~`@:' # pylint: disable=anomalous-backslash-in-string if additional_chars is not None: chars += additional_chars if fullpath: From 29c29f3bb9e118146577c142820eef17d930415f Mon Sep 17 00:00:00 2001 From: Bill Sacks Date: Wed, 19 Apr 2017 13:45:48 -0600 Subject: [PATCH 076/219] fix check for PFUNIT_PATH --- scripts/tests/scripts_regression_tests.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/scripts/tests/scripts_regression_tests.py b/scripts/tests/scripts_regression_tests.py index 9f5d9163f09c..1d759cab0ef8 100755 --- a/scripts/tests/scripts_regression_tests.py +++ b/scripts/tests/scripts_regression_tests.py @@ -181,7 +181,9 @@ def setUpClass(cls): def _has_unit_test_support(self): default_compiler = MACHINE.get_default_compiler() compiler = Compilers(MACHINE, compiler=default_compiler) - pfunit_path = compiler.get_optional_compiler_node("PFUNIT_PATH") + attrs = {'MPILIB': 'mpi-serial', 'compile_threaded': 'false'} + pfunit_path = compiler.get_optional_compiler_node("PFUNIT_PATH", + attributes=attrs) if pfunit_path is None: return False else: From 8991d807aae20252e6b85d45ee243a22660acf1b Mon Sep 17 00:00:00 2001 From: Bill Sacks Date: Wed, 19 Apr 2017 13:51:19 -0600 Subject: [PATCH 077/219] Fix PFUNIT_PATH xml variables for new code Now need to specify MPILIB and compile_threaded attributes. Also remove references to old pfunit installations in Sean Santos's home directory on various machines, because I don't know the characteristics of these builds (whether they use mpi and/or openmp). --- config/acme/machines/config_compilers.xml | 3 --- .../userdefined_laptop_template/config_compilers.xml | 4 ++-- config/cesm/machines/config_compilers.xml | 8 ++------ .../userdefined_laptop_template/config_compilers.xml | 4 ++-- 4 files changed, 6 insertions(+), 13 deletions(-) diff --git a/config/acme/machines/config_compilers.xml b/config/acme/machines/config_compilers.xml index 920e7d24f814..a339fae03cd2 100644 --- a/config/acme/machines/config_compilers.xml +++ b/config/acme/machines/config_compilers.xml @@ -811,7 +811,6 @@ for mct, etc. -L$(LAPACK_LIBDIR) -Wl,-rpath=$(LAPACK_LIBDIR) \ -L$(BLAS_LIBDIR) -Wl,-rpath=$(BLAS_LIBDIR) - @@ -920,7 +919,6 @@ for mct, etc. mpixlf77_r /soft/libraries/netcdf/4.3.3-f4.4.1/cnk-xl/current/ /soft/libraries/pnetcdf/1.6.0/cnk-xl/current/ - /home/santos/pFUnit/pFUnit_IBM /soft/libraries/hdf5/1.8.14/cnk-xl/current/ -L$(NETCDF_PATH)/lib -lnetcdff -lnetcdf -L$(HDF5_PATH)/lib -lhdf5_hl -lhdf5 -L/soft/libraries/alcf/current/xl/ZLIB/lib -lz -L/soft/libraries/alcf/current/xl/LAPACK/lib -llapack -L/soft/libraries/alcf/current/xl/BLAS/lib -lblas -L/bgsys/drivers/ppcfloor/comm/sys/lib -L$(IBM_MAIN_DIR)/xlf/bg/14.1/bglib64 -lxlfmath -lxlf90_r -lxlopt -lxl -L$(IBM_MAIN_DIR)/xlsmp/bg/3.1/bglib64 -lxlsmp @@ -943,7 +941,6 @@ for mct, etc. mpixlf77_r /soft/libraries/netcdf/4.3.3-f4.4.1/cnk-xl/current/ /soft/libraries/pnetcdf/1.6.0/cnk-xl/current/ - /home/santos/pFUnit/pFUnit_IBM /soft/libraries/hdf5/1.8.14/cnk-xl/current/ -L$(NETCDF_PATH)/lib -lnetcdff -lnetcdf -L$(HDF5_PATH)/lib -lhdf5_hl -lhdf5 -L/soft/libraries/alcf/current/xl/ZLIB/lib -lz -L/soft/libraries/alcf/current/xl/LAPACK/lib -llapack -L/soft/libraries/alcf/current/xl/BLAS/lib -lblas -L/bgsys/drivers/ppcfloor/comm/sys/lib -L$(IBM_MAIN_DIR)/xlf/bg/14.1/bglib64 -lxlfmath -lxlf90_r -lxlopt -lxl -L$(IBM_MAIN_DIR)/xlsmp/bg/3.1/bglib64 -lxlsmp diff --git a/config/acme/machines/userdefined_laptop_template/config_compilers.xml b/config/acme/machines/userdefined_laptop_template/config_compilers.xml index 03970f6a16b0..f3ecb9dce845 100644 --- a/config/acme/machines/userdefined_laptop_template/config_compilers.xml +++ b/config/acme/machines/userdefined_laptop_template/config_compilers.xml @@ -29,7 +29,7 @@ TRUE /usr/local $(shell $(NETCDF_PATH)/bin/nf-config --flibs) -framework Accelerate - $ENV{HOME}/local/pfunit/pfunit-sf.git.ae92605e8e + $ENV{HOME}/local/pfunit/pfunit-sf.git.ae92605e8e @@ -57,6 +57,6 @@ TRUE /opt/local $(shell $(NETCDF_PATH)/bin/nf-config --flibs) -framework Accelerate - $ENV{HOME}/local/pfunit/pfunit-sf.git.ae92605e8e + $ENV{HOME}/local/pfunit/pfunit-sf.git.ae92605e8e diff --git a/config/cesm/machines/config_compilers.xml b/config/cesm/machines/config_compilers.xml index 3a5a0d0471b8..ef5c89f8e221 100644 --- a/config/cesm/machines/config_compilers.xml +++ b/config/cesm/machines/config_compilers.xml @@ -687,7 +687,6 @@ using a fortran linker. -Wl,-rpath,$ENV{MPI_PATH}/lib -lifcore - /home/santos/pFUnit/pFUnit_Intel_3_0 -mkl=cluster @@ -701,7 +700,6 @@ using a fortran linker. -lpthread - /home/santos/pFUnit/pFUnit_NAG_3_0 @@ -767,7 +765,6 @@ using a fortran linker. /home/pkcoff/mpich-sandboxes/master/install-production/bin/mpixlc_r /home/pkcoff/mpich-sandboxes/master/install-production/bin/mpixlf2003_r /soft/libraries/netcdf/4.3.3-f4.4.1/cnk-xl/current/ - /home/santos/pFUnit/pFUnit_IBM gpfs /soft/libraries/pnetcdf/1.6.1/cnk-xl/current/ /home/pkcoff/mpich-sandboxes/master/install-production/bin/mpixlc_r @@ -930,7 +927,6 @@ using a fortran linker. mpixlc_r mpixlf2003_r /soft/libraries/netcdf/4.3.0-f4.2/cnk-xl/V1R2M0-20131211/ - /home/santos/pFUnit/pFUnit_IBM gpfs /soft/libraries/pnetcdf/1.3.1/cnk-xl/current/ mpixlc_r @@ -992,7 +988,7 @@ using a fortran linker. -DPIO_ENABLE_LOGGING=ON - $ENV{CESMDATAROOT}/tools/pFUnit/pFUnit3.2.8_cheyenne_Intel17.0.1_MPI_openMP + $ENV{CESMDATAROOT}/tools/pFUnit/pFUnit3.2.8_cheyenne_Intel17.0.1_MPI_openMP FALSE @@ -1037,7 +1033,7 @@ using a fortran linker. mpiicpc /glade/apps/opt/papi/5.3.0/intel/12.1.5/include/ /glade/apps/opt/papi/5.3.0/intel/12.1.5/lib64 - $ENV{CESMDATAROOT}/tools/pFUnit/pFUnit3.1_Intel15.0.1_MPI + $ENV{CESMDATAROOT}/tools/pFUnit/pFUnit3.1_Intel15.0.1_MPI icc ifort diff --git a/config/cesm/machines/userdefined_laptop_template/config_compilers.xml b/config/cesm/machines/userdefined_laptop_template/config_compilers.xml index 03970f6a16b0..f3ecb9dce845 100644 --- a/config/cesm/machines/userdefined_laptop_template/config_compilers.xml +++ b/config/cesm/machines/userdefined_laptop_template/config_compilers.xml @@ -29,7 +29,7 @@ TRUE /usr/local $(shell $(NETCDF_PATH)/bin/nf-config --flibs) -framework Accelerate - $ENV{HOME}/local/pfunit/pfunit-sf.git.ae92605e8e + $ENV{HOME}/local/pfunit/pfunit-sf.git.ae92605e8e @@ -57,6 +57,6 @@ TRUE /opt/local $(shell $(NETCDF_PATH)/bin/nf-config --flibs) -framework Accelerate - $ENV{HOME}/local/pfunit/pfunit-sf.git.ae92605e8e + $ENV{HOME}/local/pfunit/pfunit-sf.git.ae92605e8e From 1c5d0cef82c014d7fcc27b6a2af0d3a53deeb791 Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Wed, 19 Apr 2017 14:38:10 -0600 Subject: [PATCH 078/219] return only aprun args and use executable from xml --- scripts/lib/CIME/aprun.py | 22 +++++++++++----------- scripts/lib/CIME/case.py | 8 ++++---- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/scripts/lib/CIME/aprun.py b/scripts/lib/CIME/aprun.py index 7991f408c007..de6c1539dc58 100644 --- a/scripts/lib/CIME/aprun.py +++ b/scripts/lib/CIME/aprun.py @@ -30,10 +30,10 @@ def _get_aprun_cmd_for_case_impl(ntasks, nthreads, rootpes, pstrids, >>> machine = "titan" >>> run_exe = "acme.exe" >>> _get_aprun_cmd_for_case_impl(ntasks, nthreads, rootpes, pstrids, max_tasks_per_node, pes_per_node, pio_numtasks, pio_async_interface, compiler, machine, run_exe) - ('aprun -S 4 -n 680 -N 8 -d 2 acme.exe : -S 2 -n 128 -N 4 -d 4 acme.exe ', 117) + (' -S 4 -n 680 -N 8 -d 2 acme.exe : -S 2 -n 128 -N 4 -d 4 acme.exe ', 117) >>> compiler = "intel" >>> _get_aprun_cmd_for_case_impl(ntasks, nthreads, rootpes, pstrids, max_tasks_per_node, pes_per_node, pio_numtasks, pio_async_interface, compiler, machine, run_exe) - ('aprun -S 4 -cc numa_node -n 680 -N 8 -d 2 acme.exe : -S 2 -cc numa_node -n 128 -N 4 -d 4 acme.exe ', 117) + (' -S 4 -cc numa_node -n 680 -N 8 -d 2 acme.exe : -S 2 -cc numa_node -n 128 -N 4 -d 4 acme.exe ', 117) """ max_tasks_per_node = 1 if max_tasks_per_node < 1 else max_tasks_per_node @@ -64,8 +64,8 @@ def _get_aprun_cmd_for_case_impl(ntasks, nthreads, rootpes, pstrids, maxt[c1] = 1 # Compute task and thread settings for batch commands - tasks_per_node, task_count, thread_count, max_thread_count, total_node_count, aprun = \ - 0, 1, maxt[0], maxt[0], 0, "aprun" + tasks_per_node, task_count, thread_count, max_thread_count, total_node_count, aprun_args = \ + 0, 1, maxt[0], maxt[0], 0, "" for c1 in xrange(1, total_tasks): if maxt[c1] != thread_count: tasks_per_node = min(pes_per_node, max_tasks_per_node / thread_count) @@ -76,11 +76,11 @@ def _get_aprun_cmd_for_case_impl(ntasks, nthreads, rootpes, pstrids, task_per_numa = int(math.ceil(tasks_per_node / 2.0)) # Option for Titan if machine == "titan" and tasks_per_node > 1: - aprun += " -S %d" % task_per_numa + aprun_args += " -S %d" % task_per_numa if compiler == "intel": - aprun += " -cc numa_node" + aprun_args += " -cc numa_node" - aprun += " -n %d -N %d -d %d %s :" % (task_count, tasks_per_node, thread_count, run_exe) + aprun_args += " -n %d -N %d -d %d %s :" % (task_count, tasks_per_node, thread_count, run_exe) node_count = int(math.ceil(float(task_count) / tasks_per_node)) total_node_count += node_count @@ -105,13 +105,13 @@ def _get_aprun_cmd_for_case_impl(ntasks, nthreads, rootpes, pstrids, # Special option for Titan with intel compiler if machine == "titan" and tasks_per_node > 1: - aprun += " -S %d" % task_per_numa + aprun_args += " -S %d" % task_per_numa if compiler == "intel": - aprun += " -cc numa_node" + aprun_args += " -cc numa_node" - aprun += " -n %d -N %d -d %d %s " % (task_count, tasks_per_node, thread_count, run_exe) + aprun_args += " -n %d -N %d -d %d %s " % (task_count, tasks_per_node, thread_count, run_exe) - return aprun, total_node_count + return aprun_args, total_node_count ############################################################################### def get_aprun_cmd_for_case(case, run_exe): diff --git a/scripts/lib/CIME/case.py b/scripts/lib/CIME/case.py index 34520f90ea3f..a427ef4fb9b3 100644 --- a/scripts/lib/CIME/case.py +++ b/scripts/lib/CIME/case.py @@ -149,7 +149,7 @@ def _initialize_derived_attributes(self): } executable = env_mach_spec.get_mpirun(self, mpi_attribs, job="case.run", exe_only=True)[0] - if executable == "aprun": + if "aprun" in executable: self.num_nodes = get_aprun_cmd_for_case(self, "acme.exe")[1] self.spare_nodes = env_mach_pes.get_spare_nodes(self.num_nodes) self.num_nodes += self.spare_nodes @@ -1119,10 +1119,10 @@ def get_mpirun_cmd(self, job="case.run"): executable, args = env_mach_specific.get_mpirun(self, mpi_attribs, job=job) # special case for aprun - if executable == "aprun": - aprun_cmd, num_nodes = get_aprun_cmd_for_case(self, run_exe) + if "aprun" in executable: + aprun_args, num_nodes = get_aprun_cmd_for_case(self, run_exe) expect(num_nodes == self.num_nodes, "Not using optimized num nodes") - return aprun_cmd + " " + run_misc_suffix + return executable + aprun_args + " " + run_misc_suffix else: mpi_arg_string = " ".join(args.values()) From c04a4cc3c912412acdb9f4ab9b311c445065bfe4 Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Wed, 19 Apr 2017 15:22:13 -0600 Subject: [PATCH 079/219] simplify netcdf lib args on hobart --- config/cesm/machines/config_compilers.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/cesm/machines/config_compilers.xml b/config/cesm/machines/config_compilers.xml index 3a5a0d0471b8..34c4bed0c308 100644 --- a/config/cesm/machines/config_compilers.xml +++ b/config/cesm/machines/config_compilers.xml @@ -667,7 +667,7 @@ using a fortran linker. mpich $ENV{NETCDF_PATH} - $SHELL{${NETCDF_PATH}/bin/nf-config --flibs} + -L$NETCDF_PATH/lib -lnetcdff -lnetcdf From c0ca617259f00cfd671c0455a68e9f4cb6b184d8 Mon Sep 17 00:00:00 2001 From: Bill Sacks Date: Wed, 19 Apr 2017 16:42:38 -0600 Subject: [PATCH 080/219] Point to serial pfunit builds on yellowstone and cheyenne --- config/cesm/machines/config_compilers.xml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/config/cesm/machines/config_compilers.xml b/config/cesm/machines/config_compilers.xml index ef5c89f8e221..ed0314241569 100644 --- a/config/cesm/machines/config_compilers.xml +++ b/config/cesm/machines/config_compilers.xml @@ -988,6 +988,7 @@ using a fortran linker. -DPIO_ENABLE_LOGGING=ON + $ENV{CESMDATAROOT}/tools/pFUnit/pFUnit3.2.8_cheyenne_Intel17.0.1_noMPI_noOpenMP $ENV{CESMDATAROOT}/tools/pFUnit/pFUnit3.2.8_cheyenne_Intel17.0.1_MPI_openMP FALSE @@ -1033,6 +1034,7 @@ using a fortran linker. mpiicpc /glade/apps/opt/papi/5.3.0/intel/12.1.5/include/ /glade/apps/opt/papi/5.3.0/intel/12.1.5/lib64 + $ENV{CESMDATAROOT}/tools/pFUnit/pFUnit3.1_yellowstone_Intel15.0.1_noMPI_noOpenMP $ENV{CESMDATAROOT}/tools/pFUnit/pFUnit3.1_Intel15.0.1_MPI icc From ef59e3064109692999d1442e75826a11525f3015 Mon Sep 17 00:00:00 2001 From: Bill Sacks Date: Wed, 19 Apr 2017 16:48:56 -0600 Subject: [PATCH 081/219] Add MPILIB attribute when querying SFC, MPIFC, etc. This is needed for the serial compilers on yellowstone --- tools/unit_testing/run_tests.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/tools/unit_testing/run_tests.py b/tools/unit_testing/run_tests.py index 2d594e19482b..44b63bd69fb4 100755 --- a/tools/unit_testing/run_tests.py +++ b/tools/unit_testing/run_tests.py @@ -328,12 +328,14 @@ def _main(): os.environ["compile_threaded"] = "true" else: os.environ["compile_threaded"] = "false" + + compiler_attrs = {'MPILIB': mpilib} if use_mpi: - os.environ["CC"] = compilerobj.get_value('MPICC') - os.environ["FC"] = compilerobj.get_value('MPIFC') + os.environ["CC"] = compilerobj.get_value('MPICC', compiler_attrs) + os.environ["FC"] = compilerobj.get_value('MPIFC', compiler_attrs) else: - os.environ["CC"] = compilerobj.get_value('SCC') - os.environ["FC"] = compilerobj.get_value('SFC') + os.environ["CC"] = compilerobj.get_value('SCC', compiler_attrs) + os.environ["FC"] = compilerobj.get_value('SFC', compiler_attrs) os.environ["UNIT_TEST_HOST"] = socket.gethostname() if not use_mpi: From 09a884cc6f604c60b657eafa21bcb6c1539a96b8 Mon Sep 17 00:00:00 2001 From: Bill Sacks Date: Wed, 19 Apr 2017 20:06:42 -0600 Subject: [PATCH 082/219] fix retrieval of compilers --- tools/unit_testing/run_tests.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/unit_testing/run_tests.py b/tools/unit_testing/run_tests.py index 44b63bd69fb4..1bec293b9aac 100755 --- a/tools/unit_testing/run_tests.py +++ b/tools/unit_testing/run_tests.py @@ -331,11 +331,11 @@ def _main(): compiler_attrs = {'MPILIB': mpilib} if use_mpi: - os.environ["CC"] = compilerobj.get_value('MPICC', compiler_attrs) - os.environ["FC"] = compilerobj.get_value('MPIFC', compiler_attrs) + os.environ["CC"] = compilerobj.get_optional_compiler_node('MPICC', compiler_attrs).text + os.environ["FC"] = compilerobj.get_optional_compiler_node('MPIFC', compiler_attrs).text else: - os.environ["CC"] = compilerobj.get_value('SCC', compiler_attrs) - os.environ["FC"] = compilerobj.get_value('SFC', compiler_attrs) + os.environ["CC"] = compilerobj.get_optional_compiler_node('SCC', compiler_attrs).text + os.environ["FC"] = compilerobj.get_optional_compiler_node('SFC', compiler_attrs).text os.environ["UNIT_TEST_HOST"] = socket.gethostname() if not use_mpi: From a6d926b7d96c2e826656a031dbaf8f596c1a6871 Mon Sep 17 00:00:00 2001 From: Bill Sacks Date: Thu, 20 Apr 2017 06:22:14 -0600 Subject: [PATCH 083/219] Try a more robust method for getting the compiler --- tools/unit_testing/run_tests.py | 39 +++++++++++++++++++++++++++++---- 1 file changed, 35 insertions(+), 4 deletions(-) diff --git a/tools/unit_testing/run_tests.py b/tools/unit_testing/run_tests.py index 1bec293b9aac..664d856fb70b 100755 --- a/tools/unit_testing/run_tests.py +++ b/tools/unit_testing/run_tests.py @@ -243,6 +243,35 @@ def find_pfunit(compilerobj, mpilib, use_openmp): You must specify PFUNIT_PATH in config_compilers.xml, with attributes MPILIB and compile_threaded."""%(mpilib, attrs['compile_threaded'])) logger.info("Using PFUNIT_PATH: %s"%pfunit_path.text) +def get_compiler(compilerobj, compiler_type, mpilib): + """Get the specific name of the given compiler from config_compilers.xml. + + Aborts if no compiler is found. + + Args: + - compilerobj: Object of type Compilers + - compiler_type: String: MPICC, MPIFC, SCC or SFC + - mpilib: String: mpi library - 'mpi-serial', 'mpich', etc. + """ + + compiler_attrs = {'MPILIB': mpilib} + compiler = compilerobj.get_optional_compiler_node(compiler_type, compiler_attrs) + if compiler is None: + # This can happen if no MPILIB attribute is specified for this + # machine/compiler/compiler_type. So try again without this attribute. + compiler = compilerobj.get_optional_compiler_node(compiler_type) + + expect(compiler is not None, + """Could not find the specific compiler for this machine and compiler, with +compiler type %s and mpilib %s."""%(compiler_type, mpilib)) + + compiler_resolved = compilerobj.get_resolved_value(compiler.text) + if (not os.path.isabs(compiler_resolved)): + # Sometimes we end up with a compiler that is simply 'mpif90'. Turn it + # into a full path here. + compiler_resolved = find_executable(compiler_resolved.strip()) + return compiler_resolved + #================================================= # Iterate over input suite specs, building the tests. #================================================= @@ -331,11 +360,13 @@ def _main(): compiler_attrs = {'MPILIB': mpilib} if use_mpi: - os.environ["CC"] = compilerobj.get_optional_compiler_node('MPICC', compiler_attrs).text - os.environ["FC"] = compilerobj.get_optional_compiler_node('MPIFC', compiler_attrs).text + os.environ["CC"] = get_compiler(compilerobj, 'MPICC', mpilib) + os.environ["FC"] = get_compiler(compilerobj, 'MPIFC', mpilib) else: - os.environ["CC"] = compilerobj.get_optional_compiler_node('SCC', compiler_attrs).text - os.environ["FC"] = compilerobj.get_optional_compiler_node('SFC', compiler_attrs).text + os.environ["CC"] = get_compiler(compilerobj, 'SCC', mpilib) + os.environ["FC"] = get_compiler(compilerobj, 'SFC', mpilib) + logger.info("CC is %s"%os.environ["CC"]) + logger.info("FC is %s"%os.environ["FC"]) os.environ["UNIT_TEST_HOST"] = socket.gethostname() if not use_mpi: From d4351e56853aee3a6cc6b01e2db51d08866072b3 Mon Sep 17 00:00:00 2001 From: Bill Sacks Date: Thu, 20 Apr 2017 06:41:09 -0600 Subject: [PATCH 084/219] Don't set CMAKE_C_COMPILER or CMAKE_Fortran_COMPILER This seems unnecessary, and had a few problems: (1) It assumed the use of MPICC and MPIF90, as opposed to SCC and SFC (2) The timing of the include of CIME_utils comes too late for this setting to have any effect. If we move the include earlier in the top-level CMakeLists.txt file (before the project line), this causes problems, I think with the 'include(Compilers)' line. --- src/externals/CMake/CIME_utils.cmake | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/externals/CMake/CIME_utils.cmake b/src/externals/CMake/CIME_utils.cmake index eeca4ae464fd..aba65f877f20 100644 --- a/src/externals/CMake/CIME_utils.cmake +++ b/src/externals/CMake/CIME_utils.cmake @@ -32,8 +32,6 @@ set(CMAKE_COLOR_MAKEFILE "${USE_COLOR}") include(${CMAKE_BINARY_DIR}/Macros.cmake RESULT_VARIABLE FOUND) list(APPEND CMAKE_MODULE_PATH "../pio2/cmake") -set(CMAKE_C_COMPILER ${MPICC}) -set(CMAKE_Fortran_COMPILER ${MPIF90}) set(CMAKE_C_FLAGS "${CPPDEFS} ${CFLAGS}") set(CMAKE_Fortran_FLAGS "${CPPDEFS} ${FFLAGS}") set (CMAKE_EXE_LINKER_FLAGS ${LDFLAGS}) From faf5fe6c2113ec2777b3777807ecc2743ca79906 Mon Sep 17 00:00:00 2001 From: Bill Sacks Date: Thu, 20 Apr 2017 06:50:36 -0600 Subject: [PATCH 085/219] Update README: no longer need a caldera session --- README.unit_testing | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/README.unit_testing b/README.unit_testing index df92424f99df..168e7ed302a2 100644 --- a/README.unit_testing +++ b/README.unit_testing @@ -1,6 +1,5 @@ -# To run all CIME unit tests on caldera, run the following command: -# (Note that this must be done from an interactive caldera session, not from yellowstone) -# Note also that this requires module load all-python-libs +# To run all the CIME Fortran unit tests, run the following command: +# On yellowstone, this requires module load all-python-libs # # The creation of a temporary directory ensures that you are doing a completely # clean build of the unit tests. (The use of the --clean flag to run_tests.py From 4731fd7d36f37cb4a8b1c5e7c2526ca762a2c47e Mon Sep 17 00:00:00 2001 From: Bill Sacks Date: Thu, 20 Apr 2017 07:40:06 -0600 Subject: [PATCH 086/219] remove unneeded line --- tools/unit_testing/run_tests.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tools/unit_testing/run_tests.py b/tools/unit_testing/run_tests.py index 664d856fb70b..91935b9a0a2f 100755 --- a/tools/unit_testing/run_tests.py +++ b/tools/unit_testing/run_tests.py @@ -358,7 +358,6 @@ def _main(): else: os.environ["compile_threaded"] = "false" - compiler_attrs = {'MPILIB': mpilib} if use_mpi: os.environ["CC"] = get_compiler(compilerobj, 'MPICC', mpilib) os.environ["FC"] = get_compiler(compilerobj, 'MPIFC', mpilib) From 0e051bf8c16bb468ec08caefe709cde25a330dfd Mon Sep 17 00:00:00 2001 From: Bill Sacks Date: Thu, 20 Apr 2017 07:49:03 -0600 Subject: [PATCH 087/219] Get compiler from the Macros.cmake file This works on roo2 with both serial and mpi. But note that on roo2, I set the full path to SFC and MPIFC. It also works to rerun unit tests out of an existing directory (using either serial or mpi) - only the bare minimum is done. (I was worried that it might not work to rebuild, based on this comment in the Makefile: ) --- CMakeLists.txt | 20 ++++++++++++++- src/externals/CMake/CIME_utils.cmake | 4 --- tools/unit_testing/run_tests.py | 37 ---------------------------- 3 files changed, 19 insertions(+), 42 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 8fbb1b9f63ff..3a46a266917b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -2,6 +2,24 @@ cmake_minimum_required(VERSION 2.8) include(ExternalProject) set(CIME_ROOT "${CMAKE_CURRENT_SOURCE_DIR}") +# This block needs to happen before the 'project' line. +# +# FIXME(wjs, 2017-04-20) If we can get this working, we should move it into a +# new file that can be included, like CIME_initial_setup. All top-level +# CMakeLists.txt files (i.e., CLM and CAM right now) will need to include this, +# too. +include(${CMAKE_BINARY_DIR}/Macros.cmake RESULT_VARIABLE FOUND) +if(NOT FOUND) + message(FATAL_ERROR "You must generate a Macros.cmake file using CIME's configure") +endif() +if("$ENV{MPILIB}" STREQUAL "mpi-serial") + set(CMAKE_C_COMPILER ${SCC}) + set(CMAKE_Fortran_COMPILER ${SFC}) +else() + set(CMAKE_C_COMPILER ${MPICC}) + set(CMAKE_Fortran_COMPILER ${MPIFC}) +endif() + project(cime_tests Fortran C) # We rely on pio for cmake utilities like findnetcdf.cmake, so that we don't @@ -31,7 +49,7 @@ ExternalProject_add(mct_project PREFIX ${CMAKE_CURRENT_BINARY_DIR} SOURCE_DIR ${MCT_ROOT} BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}/mct - CONFIGURE_COMMAND ${MCT_ROOT}/configure ${ENABLE_MPI_SERIAL} --enable-debugging --prefix=${CMAKE_CURRENT_BINARY_DIR} CFLAGS=${CFLAGS} FCFLAGS=${FFLAGS} SRCDIR=${MCT_ROOT} DEBUG="-g" + CONFIGURE_COMMAND ${MCT_ROOT}/configure ${ENABLE_MPI_SERIAL} --enable-debugging --prefix=${CMAKE_CURRENT_BINARY_DIR} CC=${CMAKE_C_COMPILER} FC=${CMAKE_Fortran_COMPILER} CFLAGS=${CFLAGS} FCFLAGS=${FFLAGS} SRCDIR=${MCT_ROOT} DEBUG="-g" BUILD_COMMAND $(MAKE) SRCDIR=${MCT_ROOT} # Leave things in rather than "installing", because we have # no need to move things around inside of the CMake binary directory. Also, diff --git a/src/externals/CMake/CIME_utils.cmake b/src/externals/CMake/CIME_utils.cmake index aba65f877f20..6c2edd436b60 100644 --- a/src/externals/CMake/CIME_utils.cmake +++ b/src/externals/CMake/CIME_utils.cmake @@ -30,14 +30,10 @@ set(CMAKE_COLOR_MAKEFILE "${USE_COLOR}") # Compiler info #================================================= -include(${CMAKE_BINARY_DIR}/Macros.cmake RESULT_VARIABLE FOUND) list(APPEND CMAKE_MODULE_PATH "../pio2/cmake") set(CMAKE_C_FLAGS "${CPPDEFS} ${CFLAGS}") set(CMAKE_Fortran_FLAGS "${CPPDEFS} ${FFLAGS}") set (CMAKE_EXE_LINKER_FLAGS ${LDFLAGS}) -if(NOT FOUND) - message(FATAL_ERROR "You must generate a Macros.cmake file using CIME's configure") -endif() include(Compilers) diff --git a/tools/unit_testing/run_tests.py b/tools/unit_testing/run_tests.py index 91935b9a0a2f..ffe8b4b57ea9 100755 --- a/tools/unit_testing/run_tests.py +++ b/tools/unit_testing/run_tests.py @@ -243,35 +243,6 @@ def find_pfunit(compilerobj, mpilib, use_openmp): You must specify PFUNIT_PATH in config_compilers.xml, with attributes MPILIB and compile_threaded."""%(mpilib, attrs['compile_threaded'])) logger.info("Using PFUNIT_PATH: %s"%pfunit_path.text) -def get_compiler(compilerobj, compiler_type, mpilib): - """Get the specific name of the given compiler from config_compilers.xml. - - Aborts if no compiler is found. - - Args: - - compilerobj: Object of type Compilers - - compiler_type: String: MPICC, MPIFC, SCC or SFC - - mpilib: String: mpi library - 'mpi-serial', 'mpich', etc. - """ - - compiler_attrs = {'MPILIB': mpilib} - compiler = compilerobj.get_optional_compiler_node(compiler_type, compiler_attrs) - if compiler is None: - # This can happen if no MPILIB attribute is specified for this - # machine/compiler/compiler_type. So try again without this attribute. - compiler = compilerobj.get_optional_compiler_node(compiler_type) - - expect(compiler is not None, - """Could not find the specific compiler for this machine and compiler, with -compiler type %s and mpilib %s."""%(compiler_type, mpilib)) - - compiler_resolved = compilerobj.get_resolved_value(compiler.text) - if (not os.path.isabs(compiler_resolved)): - # Sometimes we end up with a compiler that is simply 'mpif90'. Turn it - # into a full path here. - compiler_resolved = find_executable(compiler_resolved.strip()) - return compiler_resolved - #================================================= # Iterate over input suite specs, building the tests. #================================================= @@ -358,14 +329,6 @@ def _main(): else: os.environ["compile_threaded"] = "false" - if use_mpi: - os.environ["CC"] = get_compiler(compilerobj, 'MPICC', mpilib) - os.environ["FC"] = get_compiler(compilerobj, 'MPIFC', mpilib) - else: - os.environ["CC"] = get_compiler(compilerobj, 'SCC', mpilib) - os.environ["FC"] = get_compiler(compilerobj, 'SFC', mpilib) - logger.info("CC is %s"%os.environ["CC"]) - logger.info("FC is %s"%os.environ["FC"]) os.environ["UNIT_TEST_HOST"] = socket.gethostname() if not use_mpi: From b1e7e99815abc52ffef1629815b7078eeca843fb Mon Sep 17 00:00:00 2001 From: Bill Sacks Date: Thu, 20 Apr 2017 08:42:45 -0600 Subject: [PATCH 088/219] Strip whitespace from compiler variables in Macros.cmake I'm hopeful that this will solve the problems with getting the compiler variables from the Macros file on yellowstone and cheyenne. --- .../lib/CIME/BuildTools/cmakemacroswriter.py | 42 ++++++++++++++++++- 1 file changed, 41 insertions(+), 1 deletion(-) diff --git a/scripts/lib/CIME/BuildTools/cmakemacroswriter.py b/scripts/lib/CIME/BuildTools/cmakemacroswriter.py index 22ddac75eec9..0041a921b80c 100644 --- a/scripts/lib/CIME/BuildTools/cmakemacroswriter.py +++ b/scripts/lib/CIME/BuildTools/cmakemacroswriter.py @@ -89,7 +89,8 @@ def set_variable(self, name, value): >>> s.getvalue() u'set(foo "bar")\\n' """ - self.write_line("set(" + name + ' "' + value + '")') + value_transformed = self._transform_value(name, value) + self.write_line("set(" + name + ' "' + value_transformed + '")') def start_ifeq(self, left, right): """Write out a statement to start a conditional block. @@ -117,3 +118,42 @@ def end_ifeq(self): """ self.indent_left() self.write_line("endif()") + + def _transform_value(self, name, value): + """Some elements need their values transformed in some way for CMake to handle them properly. + This method does those transformations. + + Args: + - name (str): name of element + - value (str): value of element + + Returns transformed value + """ + + value_transformed = value + if self._element_needs_whitespace_removal(name): + value_transformed = value_transformed.strip() + + return value_transformed + + def _element_needs_whitespace_removal(self, name): + """Returns True if the given element needs whitespace removed + + Args: + - name (str): name of element + """ + + # These compiler variables are only handled correctly if white space is removed + vars_that_need_whitespace_removal = ( + 'MPICC', + 'MPICXX', + 'MPIFC', + 'SCC', + 'SCXX', + 'SFC' + ) + + if name in vars_that_need_whitespace_removal: + return True + else: + return False From 7c3da767456de0141469598c7935a2d6bc8dd3f0 Mon Sep 17 00:00:00 2001 From: Mariana Vertenstein Date: Sun, 9 Apr 2017 19:29:35 -0600 Subject: [PATCH 089/219] added new functionality for DWAV and compsets ADLND and ADWAV to test DLND and DWAV separately --- scripts/lib/CIME/nmlgen.py | 5 +- .../datm/cime_config/config_component.xml | 2 +- .../cime_config/namelist_definition_datm.xml | 149 ++++++------------ .../cime_config/namelist_definition_dice.xml | 4 +- .../data_comps/dlnd/cime_config/buildnml | 3 +- .../dlnd/cime_config/config_component.xml | 50 +++--- .../data_comps/dwav/cime_config/buildnml | 23 +-- .../dwav/cime_config/config_component.xml | 10 +- .../cime_config/namelist_definition_dwav.xml | 47 +++--- .../data_comps/dwav/dwav_comp_mod.F90 | 97 ++++++------ .../mct/cime_config/config_compsets.xml | 10 ++ 11 files changed, 178 insertions(+), 222 deletions(-) diff --git a/scripts/lib/CIME/nmlgen.py b/scripts/lib/CIME/nmlgen.py index 06dfd81e26fa..b29d788b0bf5 100644 --- a/scripts/lib/CIME/nmlgen.py +++ b/scripts/lib/CIME/nmlgen.py @@ -326,7 +326,10 @@ def _sub_fields(self, varnames): if not line: continue if "%glc" in line: - glc_nec_indices = range(self._case.get_value('GLC_NEC')) + if self._case.get_value('GLC_NEC') == 0: + glc_nec_indices = [0] + else: + glc_nec_indices = range(self._case.get_value('GLC_NEC')) glc_nec_indices.append(glc_nec_indices[-1] + 1) glc_nec_indices.pop(0) for i in glc_nec_indices: diff --git a/src/components/data_comps/datm/cime_config/config_component.xml b/src/components/data_comps/datm/cime_config/config_component.xml index f5a32e93253f..9590ef23b2a2 100644 --- a/src/components/data_comps/datm/cime_config/config_component.xml +++ b/src/components/data_comps/datm/cime_config/config_component.xml @@ -15,7 +15,7 @@ char - CORE2_NYF,CORE2_IAF,TN460,CLM_QIAN,CLM_QIAN_WISO,CLM1PT,CLMCRUNCEP,CLMCRUNCEP_V5,CLMGSWP3,CPLHIST3HrWx,COPYALL_NPS_v1,COPYALL_NPS_CORE2_v1,WRF,WW3,CPLHISTForcingForOcnIce + CORE2_NYF,CORE2_IAF,TN460,CLM_QIAN,CLM_QIAN_WISO,CLM1PT,CLMCRUNCEP,CLMCRUNCEP_V5,CLMGSWP3,COPYALL_NPS_v1,COPYALL_NPS_CORE2_v1,WRF,WW3,CPLHISTForcing CORE2_NYF run_component_datm env_run.xml diff --git a/src/components/data_comps/datm/cime_config/namelist_definition_datm.xml b/src/components/data_comps/datm/cime_config/namelist_definition_datm.xml index bd30c536205c..e3d2e3fc5ced 100644 --- a/src/components/data_comps/datm/cime_config/namelist_definition_datm.xml +++ b/src/components/data_comps/datm/cime_config/namelist_definition_datm.xml @@ -31,22 +31,21 @@ Currently the following streams are supported (term definitions precede the streams) - CLM_QIAN = Run with the CLM Qian dataset valid from 1948 to 2004 (force CLM) - CLM_QIAN_WISO = Run with the CLM Qian dataset with isotopes valid from 2000 to 2004 (force CLM) - CLMCRUNCEP = Run with the CLM CRU NCEP V4 ( default ) forcing valid from 1900 to 2010 (force CLM) - CLMCRUNCEP_V5 = Run with the CLM CRU NCEP V5 forcing valid from 1900 to 2010 (force CLM) - CLMGSWP3 = Run with the CLM GSWP3 forcing (force CLM) - CLM1PT = Run with supplied single point data (force CLM) - CORE2_NYF = CORE2 normal year forcing (for forcing POP and CICE) - CORE2_IAF = CORE2 intra-annual year forcing (for forcing POP and CICE) - CPLHIST3HrWx = Streams for 3-hourly coupler history output (for forcing CLM) - CPLHISTForcingForOcnIce = Streams for ocn/ice forcing used for spinup (for forcing POP and CICE) - presaero = Prescribed aerosol forcing - topo = Surface topography - co2tseries = Time series of prescribed CO2 forcing - Anomaly.Forcing = Time series of correction terms - BC = Bias Correction for a stream to a given set of observations - WW3 = Wave model forcing + CLM_QIAN = Run with the CLM Qian dataset valid from 1948 to 2004 (force CLM) + CLM_QIAN_WISO = Run with the CLM Qian dataset with isotopes valid from 2000 to 2004 (force CLM) + CLMCRUNCEP = Run with the CLM CRU NCEP V4 ( default ) forcing valid from 1900 to 2010 (force CLM) + CLMCRUNCEP_V5 = Run with the CLM CRU NCEP V5 forcing valid from 1900 to 2010 (force CLM) + CLMGSWP3 = Run with the CLM GSWP3 forcing (force CLM) + CLM1PT = Run with supplied single point data (force CLM) + CORE2_NYF = CORE2 normal year forcing (for forcing POP and CICE) + CORE2_IAF = CORE2 intra-annual year forcing (for forcing POP and CICE) + CPLHISTForcing = Streams for ocn/ice forcing used for spinup (for forcing POP and CICE) + presaero = Prescribed aerosol forcing + topo = Surface topography + co2tseries = Time series of prescribed CO2 forcing + Anomaly.Forcing = Time series of correction terms + BC = Bias Correction for a stream to a given set of observations + WW3 = Wave model forcing Anomaly.Forcing.Humidity Anomaly.Forcing.Longwave @@ -73,14 +72,10 @@ CLM1PT.1x1_urbanc_alpha CLM1PT.CLM_USRDAT - CPLHIST3Hrly.Solar - CPLHIST3Hrly.Precip - CPLHIST3HrWx.nonSolarNonPrecip - - CPLHISTForcingForOcnIce.Solar - CPLHISTForcingForOcnIce.nonSolarFlux - CPLHISTForcingForOcnIce.State3hr - CPLHISTForcingForOcnIce.State1hr + CPLHISTForcing.Solar + CPLHISTForcing.nonSolarFlux + CPLHISTForcing.State3hr + CPLHISTForcing.State1hr CLM_QIAN.Solar CLM_QIAN.Precip @@ -113,9 +108,8 @@ CORE2_NYF.NCEP Note for CORE2_IAF: - The most current versions of forcing files - (those recommended for use) are duplicated - below and stored at /ccsm/ocn/iaf/): + The most current versions of forcing files(those recommended for use) are duplicated + below and stored at /ccsm/ocn/iaf/): gcgcs.prec.T62.current, giss.lwdn.T62.current, giss.swdn.T62.current, giss.swup.T62.current, ncep.dn10.T62.current, ncep.q_10.T62.current ncep.slp_.T62.current, ncep.t_10.T62.current, ncep.u_10.T62.current, ncep.v_10.T62.current @@ -182,8 +176,7 @@ CORE2_IAF.NCEP.DENS.SOFS,CORE2_IAF.NCEP.PSLV.SOFS,CORE2_IAF.PREC.SOFS.DAILY,CORE2_IAF.LWDN.SOFS.DAILY,CORE2_IAF.SWDN.SOFS.DAILY,CORE2_IAF.SWUP.SOFS.DAILY,CORE2_IAF.SHUM.SOFS.6HOUR,CORE2_IAF.TBOT.SOFS.6HOUR,CORE2_IAF.U.SOFS.6HOUR,CORE2_IAF.V.SOFS.6HOUR,CORE2_IAF.CORE2.ArcFactor WW3 NLDAS - CPLHIST3HrWx.Solar,CPLHIST3HrWx.Precip,CPLHIST3HrWx.nonSolarNonPrecip - CPLHISTForcingForOcnIce.Solar,CPLHISTForcingForOcnIce.nonSolarFlux,CPLHISTForcingForOcnIce.State3hr,CPLHISTForcingForOcnIce.State1hr + CPLHISTForcing.Solar,CPLHISTForcing.nonSolarFlux,CPLHISTForcing.State3hr,CPLHISTForcing.State1hr @@ -196,8 +189,7 @@ null $DIN_LOC_ROOT/atm/datm7/domain.clm $ATM_DOMAIN_PATH - $DIN_LOC_ROOT/atm/datm7 - $DATM_CPLHIST_DIR + $DATM_CPLHIST_DIR $DIN_LOC_ROOT/atm/datm7 $DIN_LOC_ROOT/share/domains/domain.clm $DIN_LOC_ROOT_CLMFORC/atm_forcing.datm7.cruncep_qianFill.0.5d.V5.c140715 @@ -251,8 +243,7 @@ domain.lnd.1x1pt-vancouverCAN_navy.090715.nc domain.lnd.1x1pt-urbanc_alpha_navy.090715.nc $ATM_DOMAIN_FILE - domain.lnd.fv0.9x1.25_gx1v6.090309.nc - null + null domain.T62.050609.nc domain.T62.050609.nc domain.cruncep.V5.c2013.0.5d.nc @@ -328,7 +319,7 @@ area area mask mask - + time time doma_lon lon doma_lat lat @@ -384,8 +375,7 @@ $DIN_LOC_ROOT/atm/datm7/CLM1PT_data/vancouverCAN.c080124 $DIN_LOC_ROOT/atm/datm7/CLM1PT_data/urbanc_alpha.c080416 $DIN_LOC_ROOT_CLMFORC/$CLM_USRDAT_NAME/CLM1PT_data - /glade/p/cesm/shared_outputdata/cases/ccsm4/$DATM_CPLHIST_CASE/cpl/hist - $DATM_CPLHIST_DIR + $DATM_CPLHIST_DIR $DIN_LOC_ROOT/atm/datm7/atm_forcing.datm7.Qian.T62.c080727/Solar6Hrly $DIN_LOC_ROOT/atm/datm7/atm_forcing.datm7.Qian.T62.c080727/Precip6Hrly $DIN_LOC_ROOT/atm/datm7/atm_forcing.datm7.Qian.T62.c080727/TmpPrsHumWnd3Hrly @@ -445,13 +435,10 @@ clm1pt-0002-11.nc %ym.nc - $DATM_CPLHIST_CASE.cpl.ha2x3h.%ym.nc - $DATM_CPLHIST_CASE.cpl.ha2x3h.%ym.nc - $DATM_CPLHIST_CASE.cpl.ha2x3h.%ym.nc - $DATM_CPLHIST_CASE.cpl.ha2x1hi.%ym.nc - $DATM_CPLHIST_CASE.cpl.ha2x3h.%ym.nc - $DATM_CPLHIST_CASE.cpl.ha2x3h.%ym.nc - $DATM_CPLHIST_CASE.cpl.ha2x1h.%ym.nc + $DATM_CPLHIST_CASE.cpl.ha2x1hi.%ym.nc + $DATM_CPLHIST_CASE.cpl.ha2x3h.%ym.nc + $DATM_CPLHIST_CASE.cpl.ha2x3h.%ym.nc + $DATM_CPLHIST_CASE.cpl.ha2x1h.%ym.nc $DATM_CPLHIST_CASE.cpl.ha2x1d.%ym.nc clmforc.Qian.c2006.T62.Solr.%ym.nc clmforc.Qian.c2006.T62.Prec.%ym.nc @@ -1226,44 +1213,20 @@ PSRF pbot FLDS lwdn - - a2x3h_Faxa_swndr swndr - a2x3h_Faxa_swvdr swvdr - a2x3h_Faxa_swndf swndf - a2x3h_Faxa_swvdf swvdf - - - a2x3h_Faxa_rainc rainc - a2x3h_Faxa_rainl rainl - a2x3h_Faxa_snowc snowc - a2x3h_Faxa_snowl snowl - - - a2x3h_Sa_z z - a2x3h_Sa_u u - a2x3h_Sa_v v - a2x3h_Sa_tbot tbot - a2x3h_Sa_ptem ptem - a2x3h_Sa_shum shum - a2x3h_Sa_pbot pbot - a2x3h_Faxa_lwdn lwdn - a2x3h_Sa_dens dens - a2x3h_Sa_pslv pslv - - + a2x1hi_Faxa_swndr swndr a2x1hi_Faxa_swvdr swvdr a2x1hi_Faxa_swndf swndf a2x1hi_Faxa_swvdf swvdf - + a2x3h_Faxa_rainc rainc a2x3h_Faxa_rainl rainl a2x3h_Faxa_snowc snowc a2x3h_Faxa_snowl snowl a2x3h_Faxa_lwdn lwdn - + a2x3h_Sa_z z a2x3h_Sa_tbot tbot a2x3h_Sa_ptem ptem @@ -1274,7 +1237,7 @@ a2x3h_Sa_co2diag co2diag a2x3h_Sa_co2prog co2prog - + a2x1h_Sa_u u a2x1h_Sa_v v @@ -1526,8 +1489,7 @@ -999 $DATM_CLMNCEP_YR_ALIGN - $DATM_CPLHIST_YR_ALIGN - $DATM_CPLHIST_YR_ALIGN + $DATM_CPLHIST_YR_ALIGN $DATM_CLMNCEP_YR_ALIGN $DATM_CLMNCEP_YR_ALIGN $DATM_CLMNCEP_YR_ALIGN @@ -1561,8 +1523,7 @@ 1992 0001 $DATM_CLMNCEP_YR_START - $DATM_CPLHIST_YR_START - $DATM_CPLHIST_YR_START + $DATM_CPLHIST_YR_START 2000 $DATM_CLMNCEP_YR_START $DATM_CLMNCEP_YR_START @@ -1615,8 +1576,7 @@ 1992 0002 $DATM_CLMNCEP_YR_END - $DATM_CPLHIST_YR_END - $DATM_CPLHIST_YR_END + $DATM_CPLHIST_YR_END 2004 $DATM_CLMNCEP_YR_END $DATM_CLMNCEP_YR_END @@ -1666,13 +1626,10 @@ Stream offset. 0 - -10800 - -5400 - -5400 - 2700 - 900 - 900 - 900 + 2700 + 900 + 900 + 900 0 @@ -1738,12 +1695,11 @@ NULL CLMNCEP - CPLHIST CORE2_NYF CORE2_IAF COPYALL CLMNCEP - CPLHIST + CPLHIST @@ -1912,12 +1868,10 @@ nearest nearest nearest - coszen - nearest - nearest - nearest - linear - linear + nearest + nearest + linear + linear nearest coszen nearest @@ -1991,10 +1945,10 @@ 1.5 1.e30 - 2.0 - 2.0 - 2.0 - 2.0 + 2.0 + 2.0 + 2.0 + 2.0 2.0 @@ -2015,7 +1969,6 @@ null null - u:v u:v u:v u:v @@ -2147,7 +2100,7 @@ 1 - -1 + -1 diff --git a/src/components/data_comps/dice/cime_config/namelist_definition_dice.xml b/src/components/data_comps/dice/cime_config/namelist_definition_dice.xml index 57b32f6bce56..ba4793482d22 100644 --- a/src/components/data_comps/dice/cime_config/namelist_definition_dice.xml +++ b/src/components/data_comps/dice/cime_config/namelist_definition_dice.xml @@ -25,9 +25,9 @@ 'streamN.txt year_align year_first year_last ' Currently the following datamode values are supported - SSMI_IAF - SSMI NULL + COPYALL + SSTDATA **********IMPORTANT NOTE: ************* In the value matches below, regular expressions are used **** diff --git a/src/components/data_comps/dlnd/cime_config/buildnml b/src/components/data_comps/dlnd/cime_config/buildnml index 09d88806eb7e..46b0d82ad1a0 100755 --- a/src/components/data_comps/dlnd/cime_config/buildnml +++ b/src/components/data_comps/dlnd/cime_config/buildnml @@ -1,6 +1,6 @@ #!/usr/bin/env python -"""Namelist creator for CIME's data ocn model. +"""Namelist creator for CIME's data land model. """ # Typically ignore this. @@ -106,6 +106,7 @@ def _create_namelists(case, confdir, inst_string, infile, nmlgen): nmlgen.update_shr_strdata_nml(config, stream, stream_path) else: nmlgen.create_stream_file_and_update_shr_strdata_nml(config, stream, stream_path, data_list_path) + #---------------------------------------------------- # Create `shr_strdata_nml` namelist group. #---------------------------------------------------- diff --git a/src/components/data_comps/dlnd/cime_config/config_component.xml b/src/components/data_comps/dlnd/cime_config/config_component.xml index f74f63c47d79..3e8d2a02d615 100644 --- a/src/components/data_comps/dlnd/cime_config/config_component.xml +++ b/src/components/data_comps/dlnd/cime_config/config_component.xml @@ -34,11 +34,11 @@ char UNSET - $DIN_LOC_ROOT/lnd/dlnd7/CPLHIST_SNO/b.e10.BG20TRCN.f09_g16.002_c121001 - $DIN_LOC_ROOT/lnd/dlnd7/CPLHIST_SNO/b.e10.BG1850CN.f09_g16.002_c121001 - $DIN_LOC_ROOT/lnd/dlnd7/CPLHIST_SNO/b.e10.BG20TRCN.f09_g16.002_c121001 - $DIN_LOC_ROOT/lnd/dlnd7/CPLHIST_SNO/b.e10.BGRCP85CN.f09_g16.002_c121001 - $DIN_LOC_ROOT/lnd/dlnd7/CPLHIST_SNO/b.e10.BG20TRCN.f09_g16.002_c121001 + $DIN_LOC_ROOT/lnd/dlnd7/CPLHIST_SNO/b.e10.BG20TRCN.f09_g16.002_c121001 + $DIN_LOC_ROOT/lnd/dlnd7/CPLHIST_SNO/b.e10.BG1850CN.f09_g16.002_c121001 + $DIN_LOC_ROOT/lnd/dlnd7/CPLHIST_SNO/b.e10.BG20TRCN.f09_g16.002_c121001 + $DIN_LOC_ROOT/lnd/dlnd7/CPLHIST_SNO/b.e10.BGRCP85CN.f09_g16.002_c121001 + $DIN_LOC_ROOT/lnd/dlnd7/CPLHIST_SNO/b.e10.BG20TRCN.f09_g16.002_c121001 run_component_dlnd env_run.xml @@ -49,11 +49,11 @@ char UNSET - b.e10.BG20TRCN.f09_g16.002 - b.e10.BG1850CN.f09_g16.002 - b.e10.BG20TRCN.f09_g16.002 - b.e10.BG20TRCN.f09_g16.002 - b.e10.BGRCP85CN.f09_g16.002 + b.e10.BG20TRCN.f09_g16.002 + b.e10.BG1850CN.f09_g16.002 + b.e10.BG20TRCN.f09_g16.002 + b.e10.BG20TRCN.f09_g16.002 + b.e10.BGRCP85CN.f09_g16.002 run_component_dlnd env_run.xml @@ -64,11 +64,11 @@ integer 1 - 1 - 1 - 1850 - 1850 - 2006 + 1 + 1 + 1850 + 1850 + 2006 run_component_dlnd env_run.xml @@ -79,11 +79,11 @@ integer 1 - 1976 - 26 - 1850 - 1850 - 2006 + 1976 + 26 + 1850 + 1850 + 2006 run_component_dlnd env_run.xml @@ -94,11 +94,11 @@ integer 1 - 2005 - 100 - 2005 - 2005 - 2100 + 2005 + 100 + 2005 + 2005 + 2100 run_component_dlnd env_run.xml diff --git a/src/components/data_comps/dwav/cime_config/buildnml b/src/components/data_comps/dwav/cime_config/buildnml index 7a0d67cee2c3..3aeffe5afbb3 100755 --- a/src/components/data_comps/dwav/cime_config/buildnml +++ b/src/components/data_comps/dwav/cime_config/buildnml @@ -1,7 +1,6 @@ #!/usr/bin/env python -"""Namelist creator for CIME's data ocn model. -`build_namelist` function. +"""Namelist creator for CIME's data wave model. """ # Typically ignore this. # pylint: disable=invalid-name @@ -9,10 +8,7 @@ # Disable these because this is our standard setup # pylint: disable=wildcard-import,unused-wildcard-import,wrong-import-position -import os -import shutil -import sys -import glob +import os, shutil, sys, glob _CIMEROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..","..","..","..","..") sys.path.append(os.path.join(_CIMEROOT, "scripts", "Tools")) @@ -63,8 +59,7 @@ def _create_namelists(case, confdir, inst_string, infile, nmlgen): #---------------------------------------------------- # Clear out old data. #---------------------------------------------------- - data_list_path = os.path.join(case.get_case_root(), "Buildconf", - "dwav.input_data_list") + data_list_path = os.path.join(case.get_case_root(), "Buildconf", "dwav.input_data_list") if os.path.exists(data_list_path): os.remove(data_list_path) @@ -97,8 +92,7 @@ def _create_namelists(case, confdir, inst_string, infile, nmlgen): inst_stream = stream + inst_string logger.debug("DWAV stream is %s", inst_stream) - stream_path = os.path.join(confdir, - "dwav.streams.txt." + inst_stream) + stream_path = os.path.join(confdir, "dwav.streams.txt." + inst_stream) user_stream_path = os.path.join(case.get_case_root(), "user_dwav.streams.txt." + inst_stream) @@ -113,11 +107,10 @@ def _create_namelists(case, confdir, inst_string, infile, nmlgen): #---------------------------------------------------- # Create dwav_nml namelists group #---------------------------------------------------- + # set per-stream variables nmlgen.create_shr_strdata_nml() - nmlgen.add_default("decomp", "1d") - nmlgen.add_default("force_prognostic_true", value=".false.") - nmlgen.add_default("restfilm", value="undefined") - nmlgen.add_default("restfils", value="undefined") + + # set variables that are not per-stream if wav_domain_file != "UNSET": full_domain_path = os.path.join(wav_domain_path, wav_domain_file) nmlgen.add_default("domainfile", value=full_domain_path) @@ -126,7 +119,7 @@ def _create_namelists(case, confdir, inst_string, infile, nmlgen): # Finally, write out all the namelists. #---------------------------------------------------- namelist_file = os.path.join(confdir, "dwav_in") - nmlgen.write_output_file(namelist_file, data_list_path) + nmlgen.write_output_file(namelist_file, data_list_path, groups=['dwav_nml','shr_strdata_nml']) ############################################################################### def buildnml(case, caseroot, compname): diff --git a/src/components/data_comps/dwav/cime_config/config_component.xml b/src/components/data_comps/dwav/cime_config/config_component.xml index a7fb6c1c0558..3b73827e7fdf 100644 --- a/src/components/data_comps/dwav/cime_config/config_component.xml +++ b/src/components/data_comps/dwav/cime_config/config_component.xml @@ -13,11 +13,11 @@ char - null,copyall - copyall + NULL,CLIMO + NULL - null - copyall + NULL + CLIMO run_component_dwav env_run.xml @@ -28,7 +28,7 @@ dwav null mode: - dwav copy mode: + dwav climatological mode: diff --git a/src/components/data_comps/dwav/cime_config/namelist_definition_dwav.xml b/src/components/data_comps/dwav/cime_config/namelist_definition_dwav.xml index c2f12ca9b4e6..81398a316f7a 100644 --- a/src/components/data_comps/dwav/cime_config/namelist_definition_dwav.xml +++ b/src/components/data_comps/dwav/cime_config/namelist_definition_dwav.xml @@ -47,10 +47,10 @@ char(100) streams streams_file - List of streams used for the given datm_mode. + List of streams used for the given dwav_mode. - NULL - wav.copyall + null + climo @@ -60,7 +60,7 @@ streams_file Stream domain file directory. - $DIN_LOC_ROOT/wav/dwav + $DIN_LOC_ROOT/wav/dwav @@ -70,7 +70,7 @@ streams_file Stream domain file path(s). - waveclim.mon.ww3a.150612.nc + waveclim.mon.ww3a.150612.nc @@ -80,7 +80,7 @@ streams_file Stream domain variable name(s). - + time time xc lon yc lat @@ -96,7 +96,7 @@ streams_file Stream data file directory. - $DIN_LOC_ROOT/wav/dwav + $DIN_LOC_ROOT/wav/dwav @@ -106,7 +106,7 @@ streams_file Stream data file path(s). - waveclim.mon.ww3a.150612.nc + waveclim.mon.ww3a.150612.nc @@ -116,7 +116,7 @@ streams_file Stream data variable name(s). - + lamult lamult ustokes ustokes vstokes vstokes @@ -140,7 +140,7 @@ streams_file Simulation year to align stream to. - 1 + 1 @@ -150,7 +150,7 @@ streams_file First year of stream. - 1 + 1 @@ -160,7 +160,7 @@ streams_file Last year of stream. - 1 + 1 @@ -188,15 +188,14 @@ fields not found on an input stream will be set to zero. Set by the following xml variables in env_run.xml DWAV_MODE specifies values for wav mode: copyall,null - default value: copyall - null - copyall + copyall + null - + char streams abs @@ -207,7 +206,7 @@ for all input data for this strdata input. - + null @@ -253,7 +252,7 @@ and fillmask are ignored. - + NOT_SET @@ -268,7 +267,7 @@ save and reuse a set of weights later. - + NOT_SET @@ -317,7 +316,7 @@ is set, mapalgo and mapmask are ignored. - + NOT_SET @@ -332,7 +331,7 @@ allows a user to save and reuse a set of weights later. - + NOT_SET @@ -401,7 +400,7 @@ - + char streams shr_strdata_nml @@ -483,8 +482,8 @@ logical - drof - drof_nml + dwav + dwav_nml If TRUE, prognostic is forced to true. .false. diff --git a/src/components/data_comps/dwav/dwav_comp_mod.F90 b/src/components/data_comps/dwav/dwav_comp_mod.F90 index 4c45d364d4a2..f33019887975 100644 --- a/src/components/data_comps/dwav/dwav_comp_mod.F90 +++ b/src/components/data_comps/dwav/dwav_comp_mod.F90 @@ -1,3 +1,6 @@ +#ifdef AIX +@PROCESS ALIAS_SIZE(805306368) +#endif module dwav_comp_mod ! !USES: @@ -51,9 +54,8 @@ module dwav_comp_mod integer(IN),parameter :: master_task=0 ! task number of master task integer(IN) :: logunit ! logging unit number integer :: inst_index ! number of current instance (ie. 1) - character(len=16) :: inst_name ! fullname of current instance (ie. "lnd_0001") - character(len=16) :: inst_suffix ! char string associated with instance - ! (ie. "_0001" or "") + character(len=16) :: inst_name ! fullname of current instance (ie. "wav_0001") + character(len=16) :: inst_suffix ! char string associated with instance (ie. "_0001" or "") character(CL) :: wav_mode ! mode integer(IN) :: dbug = 0 ! debug level (higher is more) @@ -66,10 +68,8 @@ module dwav_comp_mod type(mct_rearr) :: rearr integer(IN),parameter :: ktrans = 3 - character(12),parameter :: avifld(1:ktrans) = & - (/"lamult ","ustokes ","vstokes "/) - character(12),parameter :: avofld(1:ktrans) = & - (/"Sw_lamult ","Sw_ustokes ","Sw_vstokes "/) + character(12),parameter :: avifld(1:ktrans) = (/"lamult ","ustokes ","vstokes "/) + character(12),parameter :: avofld(1:ktrans) = (/"Sw_lamult ","Sw_ustokes ","Sw_vstokes "/) !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CONTAINS @@ -128,21 +128,25 @@ subroutine dwav_comp_init( EClock, cdata, x2w, w2x, NLFilename ) integer(IN) :: yearLast ! last year to use in data stream integer(IN) :: yearAlign ! data year that aligns with yearFirst - character(CL) :: wav_in ! dshr wav namelist - character(CL) :: decomp ! decomp strategy - character(CL) :: rest_file ! restart filename - character(CL) :: rest_file_strm ! restart filename for stream - character(CL) :: restfilm ! restart filename for namelist - character(CL) :: restfils ! restart filename for stream for namelist + character(CL) :: wav_in ! dshr wav namelist + character(CL) :: decomp ! decomp strategy + character(CL) :: rest_file ! restart filename + character(CL) :: rest_file_strm ! restart filename for stream + character(CL) :: restfilm ! restart filename for namelist + character(CL) :: restfils ! restart filename for stream for namelist + logical :: force_prognostic_true ! if true set prognostic true + logical :: exists ! file existance integer(IN) :: nu ! unit number !----- define namelist ----- namelist / dwav_nml / & - decomp, restfilm, restfils + decomp, restfilm, restfils, & + force_prognostic_true !--- formats --- character(*), parameter :: F00 = "('(dwav_comp_init) ',8a)" + character(*), parameter :: F0L = "('(docn_comp_init) ',a, l2)" character(*), parameter :: F01 = "('(dwav_comp_init) ',a,5i8)" character(*), parameter :: F02 = "('(dwav_comp_init) ',a,4es13.6)" character(*), parameter :: F03 = "('(dwav_comp_init) ',a,i8,a)" @@ -151,8 +155,7 @@ subroutine dwav_comp_init( EClock, cdata, x2w, w2x, NLFilename ) character(*), parameter :: F90 = "('(dwav_comp_init) ',73('='))" character(*), parameter :: F91 = "('(dwav_comp_init) ',73('-'))" character(*), parameter :: subName = "(dwav_comp_init) " - -!------------------------------------------------------------------------------- + !------------------------------------------------------------------------------- call t_startf('DWAV_INIT') @@ -202,7 +205,6 @@ subroutine dwav_comp_init( EClock, cdata, x2w, w2x, NLFilename ) !---------------------------------------------------------------------------- call t_startf('dwav_readnml') - !write(logunit,F00)' dwav_readnml...' filename = "dwav_in"//trim(inst_suffix) decomp = "1d" @@ -218,35 +220,37 @@ subroutine dwav_comp_init( EClock, cdata, x2w, w2x, NLFilename ) write(logunit,F01) 'ERROR: reading input namelist, '//trim(filename)//' iostat=',ierr call shr_sys_abort(subName//': namelist read error '//trim(filename)) end if - write(logunit,F00)' wav_in = ',trim(wav_in) write(logunit,F00)' decomp = ',trim(decomp) write(logunit,F00)' restfilm = ',trim(restfilm) write(logunit,F00)' restfils = ',trim(restfils) + write(logunit,F0L)' force_prognostic_true = ',force_prognostic_true endif call shr_mpi_bcast(decomp,mpicom,'decomp') call shr_mpi_bcast(restfilm,mpicom,'restfilm') call shr_mpi_bcast(restfils,mpicom,'restfils') + call shr_mpi_bcast(force_prognostic_true,mpicom,'force_prognostic_true') rest_file = trim(restfilm) rest_file_strm = trim(restfils) + if (force_prognostic_true) then + wav_present = .true. + wav_prognostic = .true. + endif !---------------------------------------------------------------------------- ! Read dshr namelist !---------------------------------------------------------------------------- - !write(logunit,F00)' read dshr nml...' call shr_strdata_readnml(SDWAV,trim(filename),mpicom=mpicom) !---------------------------------------------------------------------------- ! Validate mode !---------------------------------------------------------------------------- - !write(logunit,F00)' validate mode...' - wav_mode = trim(SDWAV%dataMode) ! check that we know how to handle the mode - if (trim(wav_mode) == 'null' .or. & + if (trim(wav_mode) == 'null' .or. & trim(wav_mode) == 'copyall') then if (my_task == master_task) & write(logunit,F00) ' wav mode = ',trim(wav_mode) @@ -262,7 +266,6 @@ subroutine dwav_comp_init( EClock, cdata, x2w, w2x, NLFilename ) !---------------------------------------------------------------------------- call t_startf('dwav_strdata_init') - !write(logunit,F00)' dwav_strdata_init...' if (trim(wav_mode) /= 'null') then wav_present = .true. @@ -271,8 +274,7 @@ subroutine dwav_comp_init( EClock, cdata, x2w, w2x, NLFilename ) call shr_strdata_pioinit(SDWAV, iosystem, shr_pio_getiotype(trim(inst_name))) - call shr_strdata_init(SDWAV,mpicom,compid,name='wav', & - calendar=calendar) + call shr_strdata_init(SDWAV,mpicom,compid,name='wav', calendar=calendar) endif if (my_task == master_task) then @@ -440,25 +442,20 @@ subroutine dwav_comp_run( EClock, cdata, x2w, w2x) type(mct_gsMap) , pointer :: gsmap type(mct_gGrid) , pointer :: ggrid - integer(IN) :: CurrentYMD ! model date - integer(IN) :: CurrentTOD ! model sec into model date - integer(IN) :: yy,mm,dd ! year month day - integer(IN) :: n ! indices - integer(IN) :: nf ! fields loop index - integer(IN) :: nl ! ocn frac index - integer(IN) :: lsize ! size of attr vect + integer(IN) :: CurrentYMD ! model date + integer(IN) :: CurrentTOD ! model sec into model date + integer(IN) :: yy,mm,dd ! year month day + integer(IN) :: n ! indices + integer(IN) :: nf ! fields loop index + integer(IN) :: lsize ! size of attr vect integer(IN) :: shrlogunit, shrloglev ! original log unit and level -! logical :: glcrun_alarm ! is glc going to run now - logical :: newdata ! has newdata been read - logical :: mssrmlf ! remove old data - integer(IN) :: idt ! integer timestep - real(R8) :: dt ! timestep -! real(R8) :: hn ! h field - logical :: write_restart ! restart now - character(CL) :: case_name ! case name - character(CL) :: rest_file ! restart_file - character(CL) :: rest_file_strm ! restart_file for stream - integer(IN) :: nu ! unit number + integer(IN) :: idt ! integer timestep + real(R8) :: dt ! timestep + logical :: write_restart ! restart now + character(CL) :: case_name ! case name + character(CL) :: rest_file ! restart_file + character(CL) :: rest_file_strm ! restart_file for stream + integer(IN) :: nu ! unit number integer(IN) :: nflds_x2w type(seq_infodata_type), pointer :: infodata @@ -494,14 +491,14 @@ subroutine dwav_comp_run( EClock, cdata, x2w, w2x) call t_startf('dwav_unpack') -! lsize = mct_avect_lsize(x2o) -! nflds_x2o = mct_avect_nRattr(x2o) + ! lsize = mct_avect_lsize(x2o) + ! nflds_x2o = mct_avect_nRattr(x2o) -! do nf=1,nflds_x2o -! do n=1,lsize -! ?? = x2o%rAttr(nf,n) -! enddo -! enddo + ! do nf=1,nflds_x2o + ! do n=1,lsize + ! ?? = x2o%rAttr(nf,n) + ! enddo + ! enddo call t_stopf('dwav_unpack') diff --git a/src/drivers/mct/cime_config/config_compsets.xml b/src/drivers/mct/cime_config/config_compsets.xml index c11354e14503..55c509630d26 100644 --- a/src/drivers/mct/cime_config/config_compsets.xml +++ b/src/drivers/mct/cime_config/config_compsets.xml @@ -42,6 +42,16 @@ 2000_DATM%NYF_SLND_DICE%SSMI_DOCN%DOM_DROF%NYF_SGLC_SWAV + + ADLND + 2000_SATM_DLND%SCPL_SICE_SOCN_SROF_SGLC_SWAV + + + + ADWAV + 2000_SATM_SLND_SICE_SOCN_SROF_SGLC_DWAV%CLIMO + + ADESP 2000_DATM%NYF_SLND_DICE%SSMI_DOCN%DOM_DROF%NYF_SGLC_SWAV_DESP From 5e1c49af3ed095bf45c73acb635466d3064f6ce2 Mon Sep 17 00:00:00 2001 From: mvertens Date: Tue, 11 Apr 2017 16:39:15 -0600 Subject: [PATCH 090/219] updates to datm to remove NLDAS, WRF and NPSxxx --- .../datm/cime_config/config_component.xml | 29 +++++++-------- .../cime_config/namelist_definition_datm.xml | 35 +------------------ .../data_comps/datm/datm_comp_mod.F90 | 26 -------------- 3 files changed, 13 insertions(+), 77 deletions(-) diff --git a/src/components/data_comps/datm/cime_config/config_component.xml b/src/components/data_comps/datm/cime_config/config_component.xml index 9590ef23b2a2..1f0d627ff646 100644 --- a/src/components/data_comps/datm/cime_config/config_component.xml +++ b/src/components/data_comps/datm/cime_config/config_component.xml @@ -15,27 +15,23 @@ char - CORE2_NYF,CORE2_IAF,TN460,CLM_QIAN,CLM_QIAN_WISO,CLM1PT,CLMCRUNCEP,CLMCRUNCEP_V5,CLMGSWP3,COPYALL_NPS_v1,COPYALL_NPS_CORE2_v1,WRF,WW3,CPLHISTForcing + CORE2_NYF,CORE2_IAF,CLM_QIAN,CLM_QIAN_WISO,CLM1PT,CLMCRUNCEP,CLMCRUNCEP_V5,CLMGSWP3,WW3,CPLHISTForcing CORE2_NYF run_component_datm env_run.xml Mode for data atmosphere component. - The default is CORE2_NYF (CORE2 normal year forcing) is the - DATM mode used in C and G compsets. CLM_QIAN, CLMCRUNCEP, CLMGSWP3 and CLM1PT are - modes using observational data for forcing CLM in I compsets. + CORE2_NYF (CORE2 normal year forcing) is the DATM mode used in C and G compsets. + CLM_QIAN, CLMCRUNCEP, CLMGSWP3 and CLM1PT are modes using observational data for I compsets. - CORE2_NYF - CORE2_IAF - WW3 - COPYALL_NPS_v1 - COPYALL_NPS_CORE2_v1 + CORE2_NYF + CORE2_IAF + WW3 CLM_QIAN_WISO - CLM_QIAN - CLMCRUNCEP - CLMGSWP3 - CLM1PT - CPLHIST3HrWx - CPLHISTForcingForOcnIce + CLM_QIAN + CLMCRUNCEP + CLMGSWP3 + CLM1PT + CPLHISTForcing @@ -58,7 +54,7 @@ none none none - cplhist + cplhist run_component_datm env_run.xml @@ -280,7 +276,6 @@ COREv2 datm normal year forcing: (requires additional user-supplied data) COREv2 datm interannual year forcing: (requires additional user-supplied data) WW3 wave watch data from a short period of hi temporal frequency COREv2 data - DATM NPS forcing: diff --git a/src/components/data_comps/datm/cime_config/namelist_definition_datm.xml b/src/components/data_comps/datm/cime_config/namelist_definition_datm.xml index e3d2e3fc5ced..9beb8ed39469 100644 --- a/src/components/data_comps/datm/cime_config/namelist_definition_datm.xml +++ b/src/components/data_comps/datm/cime_config/namelist_definition_datm.xml @@ -39,7 +39,7 @@ CLM1PT = Run with supplied single point data (force CLM) CORE2_NYF = CORE2 normal year forcing (for forcing POP and CICE) CORE2_IAF = CORE2 intra-annual year forcing (for forcing POP and CICE) - CPLHISTForcing = Streams for ocn/ice forcing used for spinup (for forcing POP and CICE) + CPLHISTForcing = Streams for lnd or ocn/ice forcing used for spinup presaero = Prescribed aerosol forcing topo = Surface topography co2tseries = Time series of prescribed CO2 forcing @@ -175,7 +175,6 @@ CORE2_IAF.GCGCS.PREC,CORE2_IAF.GISS.LWDN,CORE2_IAF.GISS.SWDN,CORE2_IAF.GISS.SWUP,CORE2_IAF.NCEP.DN10,CORE2_IAF.NCEP.Q_10,CORE2_IAF.NCEP.SLP_,CORE2_IAF.NCEP.T_10,CORE2_IAF.NCEP.U_10,CORE2_IAF.NCEP.V_10,CORE2_IAF.CORE2.ArcFactor CORE2_IAF.NCEP.DENS.SOFS,CORE2_IAF.NCEP.PSLV.SOFS,CORE2_IAF.PREC.SOFS.DAILY,CORE2_IAF.LWDN.SOFS.DAILY,CORE2_IAF.SWDN.SOFS.DAILY,CORE2_IAF.SWUP.SOFS.DAILY,CORE2_IAF.SHUM.SOFS.6HOUR,CORE2_IAF.TBOT.SOFS.6HOUR,CORE2_IAF.U.SOFS.6HOUR,CORE2_IAF.V.SOFS.6HOUR,CORE2_IAF.CORE2.ArcFactor WW3 - NLDAS CPLHISTForcing.Solar,CPLHISTForcing.nonSolarFlux,CPLHISTForcing.State3hr,CPLHISTForcing.State1hr @@ -217,7 +216,6 @@ $DIN_LOC_ROOT/atm/datm7 $DIN_LOC_ROOT/atm/datm7 $DIN_LOC_ROOT/wav/ww3 - $DIN_LOC_ROOT/share/domains/domain.clm $DIN_LOC_ROOT/atm/datm7/CO2 $DIN_LOC_ROOT/atm/datm7/bias_correction/precip/gpcp/qian $DIN_LOC_ROOT/atm/datm7/clm_output/cruncep_precip_1deg/gpcp_1deg_bias_correction @@ -274,7 +272,6 @@ domain.T62.050609.nc CORE2.t_10.ArcFactor.T62.1997-2004.nc core2_G4_wns_30min_20000601_to_05.nc - domain.lnd.nldas2_0224x0464_c110415.nc fco2_datm_1765-2007_c100614.nc fco2_datm_rcp2.6_1765-2500_c130312.nc fco2_datm_rcp4.5_1765-2500_c130312.nc @@ -395,7 +392,6 @@ $DIN_LOC_ROOT/atm/datm7/CORE2 $DIN_LOC_ROOT/ocn/iaf $DIN_LOC_ROOT/wav/ww3 - $DIN_LOC_ROOT/atm/datm7/NLDAS $DIN_LOC_ROOT/atm/datm7/CO2 $DIN_LOC_ROOT/atm/datm7/bias_correction/precip/gpcp/qian $DIN_LOC_ROOT/atm/datm7/clm_output/cruncep_precip_1deg/gpcp_1deg_bias_correction @@ -1110,20 +1106,6 @@ CORE2.t_10.ArcFactor.T62.1997-2004.nc core2_G4_wns_30min_20000601_to_05.nc - - clmforc.nldas.1980-01.nc - clmforc.nldas.1980-02.nc - clmforc.nldas.1980-03.nc - clmforc.nldas.1980-04.nc - clmforc.nldas.1980-05.nc - clmforc.nldas.1980-06.nc - clmforc.nldas.1980-07.nc - clmforc.nldas.1980-08.nc - clmforc.nldas.1980-09.nc - clmforc.nldas.1980-10.nc - clmforc.nldas.1980-11.nc - clmforc.nldas.1980-12.nc - fco2_datm_1765-2007_c100614.nc fco2_datm_rcp2.6_1765-2500_c130312.nc fco2_datm_rcp4.5_1765-2500_c130312.nc @@ -1386,15 +1368,6 @@ vWind v airSeaTempDiff tbot - - TBOT tbot - WIND wind - QBOT shum - PSRF pbot - FLDS lwdn - PRECTmms precn - FSDS swdn - CO2 co2diag @@ -1497,7 +1470,6 @@ 1 1 2000 - 1980 1850 2005 1979 @@ -1551,7 +1523,6 @@ 1948 1948 2000 - 1980 1850 2005 1979 @@ -1604,7 +1575,6 @@ 2009 2009 2000 - 1980 2007 2500 2004 @@ -1698,7 +1668,6 @@ CORE2_NYF CORE2_IAF COPYALL - CLMNCEP CPLHIST @@ -1968,11 +1937,9 @@ null - null u:v u:v u:v - null diff --git a/src/components/data_comps/datm/datm_comp_mod.F90 b/src/components/data_comps/datm/datm_comp_mod.F90 index 48a7d63e4171..e6099e42b63e 100644 --- a/src/components/data_comps/datm/datm_comp_mod.F90 +++ b/src/components/data_comps/datm/datm_comp_mod.F90 @@ -475,7 +475,6 @@ subroutine datm_comp_init( EClock, cdata, x2a, a2x, NLFilename ) if (trim(atm_mode) == 'NULL' .or. & trim(atm_mode) == 'CORE2_NYF' .or. & trim(atm_mode) == 'CORE2_IAF' .or. & - trim(atm_mode) == 'WRF' .or. & trim(atm_mode) == 'CLMNCEP' .or. & trim(atm_mode) == 'CPLHIST' .or. & trim(atm_mode) == 'COPYALL' ) then @@ -975,31 +974,6 @@ subroutine datm_comp_run( EClock, cdata, x2a, a2x) case('CPLHIST') ! do nothing extra - case ('WRF') - lsize = mct_avect_lsize(a2x) - do n = 1,lsize - - !--- fabricate required swdn components from total swdn --- - a2x%rAttr(kswvdr,n) = avstrm%rAttr(sswdn,n)*(0.28_R8) - a2x%rAttr(kswndr,n) = avstrm%rAttr(sswdn,n)*(0.31_R8) - a2x%rAttr(kswvdf,n) = avstrm%rAttr(sswdn,n)*(0.24_R8) - a2x%rAttr(kswndf,n) = avstrm%rAttr(sswdn,n)*(0.17_R8) - - !--- just a diagnostic, not really needed - a2x%rAttr(kswnet,n) = avstrm%rAttr(sswdn,n)-avstrm%rAttr(sswup,n) - - !--- convert from hPa - a2x%rAttr(kpslv,n) = a2x%rAttr(kpslv,n)*100._R8 - a2x%rAttr(kpbot,n) = a2x%rAttr(kpbot,n)*100._R8 - - !--- tcraig, file has terrain height on it, set to 10m - a2x%rAttr(kz,n) = 10.0_R8 - - !--- convert to degK from degC - a2x%rAttr(ktbot,n) = a2x%rAttr(ktbot,n) + tKFrz - - enddo - case('CORE2_NYF','CORE2_IAF') if (firstcall) then if (sprec < 1 .or. sswdn < 1) then From e7f84b1a2cd69d5f8f8f77319ebf1fd17f3951d2 Mon Sep 17 00:00:00 2001 From: Michael Levy Date: Thu, 20 Apr 2017 10:32:54 -0600 Subject: [PATCH 091/219] Clean up documentation for check_map tool Mostly making sure README is still accurate, but I added a little bit of verbose output to area conservation check portion of the tool and also cleaned up the Makefile so that loading the ESMF module is not required to clean the build. There were also some files missing from .gitignore that make git status a lot easier to parse after running the tool a few times. --- tools/mapping/check_maps/.gitignore | 2 ++ tools/mapping/check_maps/README | 32 +++++++++---------- .../src/ESMF_RegridWeightGenCheck.F90 | 2 ++ tools/mapping/check_maps/src/Makefile | 13 ++++++-- 4 files changed, 30 insertions(+), 19 deletions(-) diff --git a/tools/mapping/check_maps/.gitignore b/tools/mapping/check_maps/.gitignore index cdfc1c16f6e4..7d65634c86f7 100644 --- a/tools/mapping/check_maps/.gitignore +++ b/tools/mapping/check_maps/.gitignore @@ -1,2 +1,4 @@ ESMF_RegridWeightGenCheck src/*.o +*.nc +*.Log diff --git a/tools/mapping/check_maps/README b/tools/mapping/check_maps/README index f1731dddc390..0970d3fc28b4 100644 --- a/tools/mapping/check_maps/README +++ b/tools/mapping/check_maps/README @@ -1,9 +1,3 @@ -========================================================================== -$Id: README 46983 2013-05-09 22:08:12Z tcraig $ -$URL: https://svn-ccsm-models.cgd.ucar.edu/tools/mapping/trunk_tags/mapping_141106/check_maps/README $ -========================================================================== - - =============== ABOUT THIS TOOL =============== @@ -42,23 +36,29 @@ BUILD ===== To compile this tool, you need to have the ESMFMKFILE environment variable set. -The easiest way to do that is to load the ESMF modules. To do this, you must -already have the intel module loaded. Then load the version of ESMF you want -to use (as of April 2013, the most recent version available is 6.1.1): +The easiest way to do that is to load the ESMF modules. On yellowstone and +cheyenne, you must already have the intel module loaded. Then load the version +of ESMF you want to use (as of April 2017, the most recent version available is +7.0.0): +Yellowstone: $ module load esmf -$ module load esmf-6.3.0r-ncdfio-uni-O +$ module load esmf-7.0.0-ncdfio-uni-O + +Cheyenne: +$ module load esmf_libs/7.0.0 +$ module load esmf-7.0.0-ncdfio-uni-O -This will set ESMFMKFILE. With this variable set, enter the mapping/check_maps/src/ -directory and run gmake +This will set ESMFMKFILE. With this variable set, enter the src/ directory and +run gmake -$ cd mapping/check_maps/src +$ cd src $ gmake [VERBOSE=TRUE] VERBOSE=TRUE turns on some extra diagnostic output and is optional. -Back in the mapping/check_maps/ directory, you should now have an executable -file named ESMF_RegridWeightGenCheck. +Back in this directory, you should now have an executable file named +ESMF_RegridWeightGenCheck. Note that at this time, the tool only works in serial mode (building with the mpi-enabled version of the ESMF makefile leads to a segmentation fault when @@ -78,6 +78,6 @@ listed in FILELIST --help, -h Output this usage information Notes: - 1) For use on yellowstone, geyser, or caldera only! + 1) For use on cheyenne, yellowstone, geyser, or caldera only! 2) Need to set ESMFMKFILE (see comments in Makefile) or compilation will fail 3) If -rc option is not enabled, -v flag is ignored and verbose / concise will depend on previous compilation diff --git a/tools/mapping/check_maps/src/ESMF_RegridWeightGenCheck.F90 b/tools/mapping/check_maps/src/ESMF_RegridWeightGenCheck.F90 index 189cc70ba6fd..01c500ea1b5d 100644 --- a/tools/mapping/check_maps/src/ESMF_RegridWeightGenCheck.F90 +++ b/tools/mapping/check_maps/src/ESMF_RegridWeightGenCheck.F90 @@ -716,6 +716,8 @@ program OfflineTester failCnt = failCnt + 1 if (Verbose) then print *, "FAILED: conservation error = ", totArea + print *, " area1 = ", sum(grid1area) + print *, " area2 = ", sum(grid2area) else print *, "FAILED: conservation error = ", totArea, & " in test ", j diff --git a/tools/mapping/check_maps/src/Makefile b/tools/mapping/check_maps/src/Makefile index 873af453426b..2d1ddb3ba757 100644 --- a/tools/mapping/check_maps/src/Makefile +++ b/tools/mapping/check_maps/src/Makefile @@ -15,11 +15,18 @@ ### The executable should run on the yellowstone login node ###### ################################################################################ -ifneq ($(origin ESMFMKFILE), environment) -$(error Environment variable ESMFMKFILE was not set.) +# Don't require ESMF to be loaded to run "make clean": +USE_ESMF=TRUE +ifeq ($(MAKECMDGOALS),clean) + USE_ESMF=FALSE endif -include $(ESMFMKFILE) +ifeq ($(USE_ESMF),TRUE) + ifneq ($(origin ESMFMKFILE), environment) + $(error Environment variable ESMFMKFILE was not set.) + endif + include $(ESMFMKFILE) +endif TARGET = ESMF_RegridWeightGenCheck # To compile with verbose output, run From 531ad6d0dcc71b92be1c3148c899eed3e43b44b0 Mon Sep 17 00:00:00 2001 From: mvertens Date: Tue, 11 Apr 2017 17:43:10 -0600 Subject: [PATCH 092/219] more updates for cleaning up data models --- .../dice/cime_config/config_component.xml | 4 +-- .../cime_config/namelist_definition_dice.xml | 32 +++++++++---------- .../docn/cime_config/config_component.xml | 5 ++- .../cime_config/namelist_definition_docn.xml | 28 ++++++++-------- .../cime_config/namelist_definition_drof.xml | 17 ++-------- .../data_comps/drof/drof_comp_mod.F90 | 4 +-- 6 files changed, 38 insertions(+), 52 deletions(-) diff --git a/src/components/data_comps/dice/cime_config/config_component.xml b/src/components/data_comps/dice/cime_config/config_component.xml index 26591c92f2bb..3aab87e32dc4 100644 --- a/src/components/data_comps/dice/cime_config/config_component.xml +++ b/src/components/data_comps/dice/cime_config/config_component.xml @@ -21,7 +21,7 @@ ssmi ssmi_iaf prescribed - copyall + ww3 null run_component_dice @@ -48,7 +48,7 @@ dice mode is ssmi: dice mode is ssmi_iaf: dice mode is prescribed: - dice mode is copy: + dice mode is ww3: dice mode is null: diff --git a/src/components/data_comps/dice/cime_config/namelist_definition_dice.xml b/src/components/data_comps/dice/cime_config/namelist_definition_dice.xml index ba4793482d22..d7d79c7fd688 100644 --- a/src/components/data_comps/dice/cime_config/namelist_definition_dice.xml +++ b/src/components/data_comps/dice/cime_config/namelist_definition_dice.xml @@ -55,11 +55,11 @@ streams_file List of streams used for the given dice_mode. - NULL - SSMI - SSMI_IAF + NULL + SSMI + SSMI_IAF prescribed - copyall + ww3 @@ -69,7 +69,7 @@ derived does not appear in namelist - only used to set domain and data information - $SSTICE_GRID_FILENAME + $SSTICE_GRID_FILENAME $SSTICE_GRID_FILENAME @@ -80,7 +80,7 @@ derived does not appear in namelist - only used to set domain and data information - $SSTICE_GRID_FILENAME + $SSTICE_GRID_FILENAME $SSTICE_GRID_FILENAME @@ -122,7 +122,7 @@ area area mask mask - + time time gridLon lon gridLat lat @@ -226,7 +226,7 @@ ifrac ifrac - + iceCon ifrac @@ -252,7 +252,7 @@ 1 1 $SSTICE_YEAR_ALIGN - $SSTICE_YEAR_ALIGN + $SSTICE_YEAR_ALIGN @@ -266,7 +266,7 @@ 1 1948 $SSTICE_YEAR_START - $SSTICE_YEAR_START + $SSTICE_YEAR_START @@ -280,7 +280,7 @@ 1 2009 $SSTICE_YEAR_END - $SSTICE_YEAR_END + $SSTICE_YEAR_END @@ -326,11 +326,11 @@ If DICE_MODE is set to null, datamodel will be set to NULL - NULL - SSTDATA - SSTDATA + NULL + SSTDATA + SSTDATA SSTDATA - COPYALL + COPYALL @@ -347,7 +347,7 @@ $ICE_DOMAIN_PATH/$ICE_DOMAIN_FILE $SSTICE_GRID_FILENAME - $SSTICE_GRID_FILENAME + $SSTICE_GRID_FILENAME diff --git a/src/components/data_comps/docn/cime_config/config_component.xml b/src/components/data_comps/docn/cime_config/config_component.xml index 686ab1bea336..fcf76d7bc96f 100644 --- a/src/components/data_comps/docn/cime_config/config_component.xml +++ b/src/components/data_comps/docn/cime_config/config_component.xml @@ -26,6 +26,7 @@ copyall pres_aquap som_aquap + ww3 run_component_docn env_run.xml @@ -219,9 +220,8 @@ docn null mode: docn slab ocean mode: docn data mode: - docn us20 mode: - docn copy mode: docn interannual mode: + docn ww3 mode: @@ -231,4 +231,3 @@ - diff --git a/src/components/data_comps/docn/cime_config/namelist_definition_docn.xml b/src/components/data_comps/docn/cime_config/namelist_definition_docn.xml index 5d249cdbd805..494271d89a05 100644 --- a/src/components/data_comps/docn/cime_config/namelist_definition_docn.xml +++ b/src/components/data_comps/docn/cime_config/namelist_definition_docn.xml @@ -33,7 +33,7 @@ prescribed SSTDATA (Run with prescribed SST, ICE_COV) som SOM (Slab ocean model) null NULL (NULL mode) - copyall COPYALL (Copy fields only) + ww3 COPYALL (Copy fields only) --> @@ -51,7 +51,7 @@ som som interannual - copyall + ww3 @@ -61,7 +61,7 @@ derived does not appear in namelist - only used to set domain and data information - $SSTICE_GRID_FILENAME + $SSTICE_GRID_FILENAME $SSTICE_GRID_FILENAME @@ -72,7 +72,7 @@ derived does not appear in namelist - only used to set domain and data information - $SSTICE_DATA_FILENAME + $SSTICE_DATA_FILENAME $SSTICE_DATA_FILENAME @@ -114,7 +114,7 @@ area area mask mask - + time time gridLon lon gridLat lat @@ -174,7 +174,7 @@ sst t - + iceCon s @@ -214,7 +214,7 @@ -999 $SSTICE_YEAR_ALIGN - $SSTICE_YEAR_ALIGN + $SSTICE_YEAR_ALIGN 1 1 @@ -228,7 +228,7 @@ -999 $SSTICE_YEAR_START - $SSTICE_YEAR_START + $SSTICE_YEAR_START 1 1850 @@ -241,10 +241,10 @@ Last year of stream. -999 - $SSTICE_YEAR_END - $SSTICE_YEAR_END - 1 - 2014 + $SSTICE_YEAR_END + $SSTICE_YEAR_END + 1 + 2014 @@ -312,13 +312,13 @@ default: SSTDATA (prescribed setting for DOCN_MODE)' - NULL + NULL SSTDATA SSTDATA SOM SOM IAF - COPYALL + COPYALL diff --git a/src/components/data_comps/drof/cime_config/namelist_definition_drof.xml b/src/components/data_comps/drof/cime_config/namelist_definition_drof.xml index a5a953ba8ed6..03a819b67f16 100644 --- a/src/components/data_comps/drof/cime_config/namelist_definition_drof.xml +++ b/src/components/data_comps/drof/cime_config/namelist_definition_drof.xml @@ -212,7 +212,7 @@ char streams shr_strdata_nml - NULL,CPLHIST,DIATREN_ANN_RX1,DIATREN_IAF_RX1 + NULL,COPYALL The runoff data is associated with the river model. datamode = "NULL" @@ -223,21 +223,10 @@ dataMode = "COPYALL" Copies all fields directly from the input data streams Any required fields not found on an input stream will be set to zero. - datamode = "CPLHIST" - Reads in data from coupler history files generated by a previous run - datamode = DIATREN_ANN_RX1 - Reads in annual forcing river data used for CORE2 forcing runs - datamode = DIATREN_ANN_RX1,DIATREN_IAF_RX1 - Reads in intra-annual forcing river data used for CORE2 forcing runs - Set by the the DROF_MODE xml variables in env_run.xml - specifies values for rof mode: CPLHIST,DIATREN_ANN_RX1,DIATREN_IAF_RX1,NULL - default value: DIATREN_ANN_RX1 - NULL - CPLHIST - DIATREN_ANN_RX1 - DIATREN_IAF_RX1 + COPYALL + NULL diff --git a/src/components/data_comps/drof/drof_comp_mod.F90 b/src/components/data_comps/drof/drof_comp_mod.F90 index 060b5fad678c..4908fba2d652 100644 --- a/src/components/data_comps/drof/drof_comp_mod.F90 +++ b/src/components/data_comps/drof/drof_comp_mod.F90 @@ -256,9 +256,7 @@ subroutine drof_comp_init( EClock, cdata, x2r, r2x, NLFilename ) ! check that we know how to handle the mode if (trim(rof_mode) == 'NULL' .or. & - trim(rof_mode) == 'CPLHIST' .or. & - trim(rof_mode) == 'DIATREN_ANN_RX1' .or. & - trim(rof_mode) == 'DIATREN_IAF_RX1') then + trim(rof_mode) == 'COPYALL') if (my_task == master_task) write(logunit,F00) 'rof mode = ',trim(rof_mode) else write(logunit,F00) ' ERROR illegal rof mode = ',trim(rof_mode) From 80873741b397cd9b947dd71f5e01dff45200039f Mon Sep 17 00:00:00 2001 From: mvertens Date: Wed, 12 Apr 2017 14:07:52 -0600 Subject: [PATCH 093/219] removed AWAV compset and support for DATM%WW3, DOCN%WW3 and DICE%WW3 --- .../datm/cime_config/config_component.xml | 4 +-- .../cime_config/namelist_definition_datm.xml | 25 ---------------- .../dice/cime_config/config_component.xml | 2 -- .../cime_config/namelist_definition_dice.xml | 11 ------- .../docn/cime_config/config_component.xml | 13 ++------ .../cime_config/namelist_definition_docn.xml | 16 ---------- .../mct/cime_config/config_compsets.xml | 30 +++++++++---------- 7 files changed, 18 insertions(+), 83 deletions(-) diff --git a/src/components/data_comps/datm/cime_config/config_component.xml b/src/components/data_comps/datm/cime_config/config_component.xml index 1f0d627ff646..888066ca2bda 100644 --- a/src/components/data_comps/datm/cime_config/config_component.xml +++ b/src/components/data_comps/datm/cime_config/config_component.xml @@ -15,7 +15,7 @@ char - CORE2_NYF,CORE2_IAF,CLM_QIAN,CLM_QIAN_WISO,CLM1PT,CLMCRUNCEP,CLMCRUNCEP_V5,CLMGSWP3,WW3,CPLHISTForcing + CORE2_NYF,CORE2_IAF,CLM_QIAN,CLM_QIAN_WISO,CLM1PT,CLMCRUNCEP,CLMCRUNCEP_V5,CLMGSWP3,CPLHISTForcing CORE2_NYF run_component_datm env_run.xml @@ -25,7 +25,6 @@ CORE2_NYF CORE2_IAF - WW3 CLM_QIAN_WISO CLM_QIAN CLMCRUNCEP @@ -275,7 +274,6 @@ single point tower site atm input data: COREv2 datm normal year forcing: (requires additional user-supplied data) COREv2 datm interannual year forcing: (requires additional user-supplied data) - WW3 wave watch data from a short period of hi temporal frequency COREv2 data diff --git a/src/components/data_comps/datm/cime_config/namelist_definition_datm.xml b/src/components/data_comps/datm/cime_config/namelist_definition_datm.xml index 9beb8ed39469..230e42d60f57 100644 --- a/src/components/data_comps/datm/cime_config/namelist_definition_datm.xml +++ b/src/components/data_comps/datm/cime_config/namelist_definition_datm.xml @@ -45,7 +45,6 @@ co2tseries = Time series of prescribed CO2 forcing Anomaly.Forcing = Time series of correction terms BC = Bias Correction for a stream to a given set of observations - WW3 = Wave model forcing Anomaly.Forcing.Humidity Anomaly.Forcing.Longwave @@ -137,8 +136,6 @@ topo.observed - WW3 - **********IMPORTANT NOTE: ************* In the value matches below, regular expressions are used **** If two matches are equivalent, the FIRST one will be used, so need to make sure @@ -174,7 +171,6 @@ CORE2_NYF.GISS,CORE2_NYF.GXGXS,CORE2_NYF.NCEP CORE2_IAF.GCGCS.PREC,CORE2_IAF.GISS.LWDN,CORE2_IAF.GISS.SWDN,CORE2_IAF.GISS.SWUP,CORE2_IAF.NCEP.DN10,CORE2_IAF.NCEP.Q_10,CORE2_IAF.NCEP.SLP_,CORE2_IAF.NCEP.T_10,CORE2_IAF.NCEP.U_10,CORE2_IAF.NCEP.V_10,CORE2_IAF.CORE2.ArcFactor CORE2_IAF.NCEP.DENS.SOFS,CORE2_IAF.NCEP.PSLV.SOFS,CORE2_IAF.PREC.SOFS.DAILY,CORE2_IAF.LWDN.SOFS.DAILY,CORE2_IAF.SWDN.SOFS.DAILY,CORE2_IAF.SWUP.SOFS.DAILY,CORE2_IAF.SHUM.SOFS.6HOUR,CORE2_IAF.TBOT.SOFS.6HOUR,CORE2_IAF.U.SOFS.6HOUR,CORE2_IAF.V.SOFS.6HOUR,CORE2_IAF.CORE2.ArcFactor - WW3 CPLHISTForcing.Solar,CPLHISTForcing.nonSolarFlux,CPLHISTForcing.State3hr,CPLHISTForcing.State1hr @@ -215,7 +211,6 @@ $DIN_LOC_ROOT/atm/datm7 $DIN_LOC_ROOT/atm/datm7 $DIN_LOC_ROOT/atm/datm7 - $DIN_LOC_ROOT/wav/ww3 $DIN_LOC_ROOT/atm/datm7/CO2 $DIN_LOC_ROOT/atm/datm7/bias_correction/precip/gpcp/qian $DIN_LOC_ROOT/atm/datm7/clm_output/cruncep_precip_1deg/gpcp_1deg_bias_correction @@ -271,7 +266,6 @@ domain.T62.050609.nc domain.T62.050609.nc CORE2.t_10.ArcFactor.T62.1997-2004.nc - core2_G4_wns_30min_20000601_to_05.nc fco2_datm_1765-2007_c100614.nc fco2_datm_rcp2.6_1765-2500_c130312.nc fco2_datm_rcp4.5_1765-2500_c130312.nc @@ -337,13 +331,6 @@ area area mask mask - - time time - gridLon lon - gridLat lat - area area - mask mask - time time LONGXY lon @@ -391,7 +378,6 @@ $DIN_LOC_ROOT/atm/datm7/NYF $DIN_LOC_ROOT/atm/datm7/CORE2 $DIN_LOC_ROOT/ocn/iaf - $DIN_LOC_ROOT/wav/ww3 $DIN_LOC_ROOT/atm/datm7/CO2 $DIN_LOC_ROOT/atm/datm7/bias_correction/precip/gpcp/qian $DIN_LOC_ROOT/atm/datm7/clm_output/cruncep_precip_1deg/gpcp_1deg_bias_correction @@ -1105,7 +1091,6 @@ ncep.v_10.T62.2009.20120412.nc CORE2.t_10.ArcFactor.T62.1997-2004.nc - core2_G4_wns_30min_20000601_to_05.nc fco2_datm_1765-2007_c100614.nc fco2_datm_rcp2.6_1765-2500_c130312.nc fco2_datm_rcp4.5_1765-2500_c130312.nc @@ -1363,11 +1348,6 @@ TarcFactor tarcf - - uWind u - vWind v - airSeaTempDiff tbot - CO2 co2diag @@ -1469,7 +1449,6 @@ 1 1 1 - 2000 1850 2005 1979 @@ -1522,7 +1501,6 @@ 1948 1948 1948 - 2000 1850 2005 1979 @@ -1574,7 +1552,6 @@ 2009 2009 2009 - 2000 2007 2500 2004 @@ -1667,7 +1644,6 @@ CLMNCEP CORE2_NYF CORE2_IAF - COPYALL CPLHIST @@ -1939,7 +1915,6 @@ null u:v u:v - u:v diff --git a/src/components/data_comps/dice/cime_config/config_component.xml b/src/components/data_comps/dice/cime_config/config_component.xml index 3aab87e32dc4..92bc2a30261c 100644 --- a/src/components/data_comps/dice/cime_config/config_component.xml +++ b/src/components/data_comps/dice/cime_config/config_component.xml @@ -21,7 +21,6 @@ ssmi ssmi_iaf prescribed - ww3 null run_component_dice @@ -48,7 +47,6 @@ dice mode is ssmi: dice mode is ssmi_iaf: dice mode is prescribed: - dice mode is ww3: dice mode is null: diff --git a/src/components/data_comps/dice/cime_config/namelist_definition_dice.xml b/src/components/data_comps/dice/cime_config/namelist_definition_dice.xml index d7d79c7fd688..dfb5e4b1733a 100644 --- a/src/components/data_comps/dice/cime_config/namelist_definition_dice.xml +++ b/src/components/data_comps/dice/cime_config/namelist_definition_dice.xml @@ -59,7 +59,6 @@ SSMI SSMI_IAF prescribed - ww3 @@ -69,7 +68,6 @@ derived does not appear in namelist - only used to set domain and data information - $SSTICE_GRID_FILENAME $SSTICE_GRID_FILENAME @@ -80,7 +78,6 @@ derived does not appear in namelist - only used to set domain and data information - $SSTICE_GRID_FILENAME $SSTICE_GRID_FILENAME @@ -122,7 +119,6 @@ area area mask mask - time time gridLon lon gridLat lat @@ -226,9 +222,6 @@ ifrac ifrac - - iceCon ifrac - @@ -252,7 +245,6 @@ 1 1 $SSTICE_YEAR_ALIGN - $SSTICE_YEAR_ALIGN @@ -266,7 +258,6 @@ 1 1948 $SSTICE_YEAR_START - $SSTICE_YEAR_START @@ -280,7 +271,6 @@ 1 2009 $SSTICE_YEAR_END - $SSTICE_YEAR_END @@ -330,7 +320,6 @@ SSTDATA SSTDATA SSTDATA - COPYALL diff --git a/src/components/data_comps/docn/cime_config/config_component.xml b/src/components/data_comps/docn/cime_config/config_component.xml index fcf76d7bc96f..fda915c9241d 100644 --- a/src/components/data_comps/docn/cime_config/config_component.xml +++ b/src/components/data_comps/docn/cime_config/config_component.xml @@ -84,9 +84,6 @@ char CAMDATA - - WW3 - run_component_docn env_run.xml Prescribed SST and ice coverage stream name. @@ -104,9 +101,9 @@ $DIN_LOC_ROOT/atm/cam/sst/sst_HadOIBl_bc_0.47x0.63_clim_c061106.nc $DIN_LOC_ROOT/atm/cam/sst/sst_HadOIBl_bc_0.23x0.31_clim_c110526.nc $DIN_LOC_ROOT/atm/cam/sst/sst_HadOIBl_bc_1x1_1850_2012_c130411.nc - $DIN_LOC_ROOT/atm/cam/sst/sst_HadOIBl_bc_48x96_1850_2012_c130411.nc - $DIN_LOC_ROOT/atm/cam/sst/sst_HadOIBl_bc_1.9x2.5_1850_2012_c130411.nc - $DIN_LOC_ROOT/atm/cam/sst/sst_HadOIBl_bc_0.9x1.25_1850_2012_c130411.nc + $DIN_LOC_ROOT/atm/cam/sst/sst_HadOIBl_bc_48x96_1850_2012_c130411.nc + $DIN_LOC_ROOT/atm/cam/sst/sst_HadOIBl_bc_1.9x2.5_1850_2012_c130411.nc + $DIN_LOC_ROOT/atm/cam/sst/sst_HadOIBl_bc_0.9x1.25_1850_2012_c130411.nc $DIN_LOC_ROOT/atm/cam/sst/sst_HadOIBl_bc_0.47x0.63_1850_2012_c130411.nc $DIN_LOC_ROOT/atm/cam/sst/sst_HadOIBl_bc_0.23x0.31_1850_2012_c130411.nc $DIN_LOC_ROOT/atm/cam/sst/sst_HadOIBl_bc_1x1_clim_pi_c101029.nc @@ -119,7 +116,6 @@ $DIN_LOC_ROOT/atm/cam/sst/sst_HadOIBl_bc_0.9x1.25_clim_c040926.nc $DIN_LOC_ROOT/atm/cam/sst/sst_HadOIBl_bc_4x5_clim_c110526.nc $DIN_LOC_ROOT/atm/cam/sst/sst_HadOIBl_bc_4x5_clim_c110526.nc - $DIN_LOC_ROOT/wav/ww3/core2_G4_ice_30min_20000601_to_05.nc run_component_docn env_run.xml @@ -156,7 +152,6 @@ $DIN_LOC_ROOT/atm/cam/ocnfrac/domain.camocn.0.9x1.25_gx1v6_090403.nc $DIN_LOC_ROOT/atm/cam/ocnfrac/domain.camocn.4x5_gx3v7_100120.nc $DIN_LOC_ROOT/atm/cam/ocnfrac/domain.camocn.4x5_gx3v7_100120.nc - $DIN_LOC_ROOT/wav/ww3/core2_G4_ice_30min_20000601_to_05.nc run_component_cam_sstice env_run.xml @@ -206,7 +201,6 @@ 0 2012 - 2000 run_component_cam_sstice env_run.xml @@ -221,7 +215,6 @@ docn slab ocean mode: docn data mode: docn interannual mode: - docn ww3 mode: diff --git a/src/components/data_comps/docn/cime_config/namelist_definition_docn.xml b/src/components/data_comps/docn/cime_config/namelist_definition_docn.xml index 494271d89a05..26ac6b980401 100644 --- a/src/components/data_comps/docn/cime_config/namelist_definition_docn.xml +++ b/src/components/data_comps/docn/cime_config/namelist_definition_docn.xml @@ -33,7 +33,6 @@ prescribed SSTDATA (Run with prescribed SST, ICE_COV) som SOM (Slab ocean model) null NULL (NULL mode) - ww3 COPYALL (Copy fields only) --> @@ -51,7 +50,6 @@ som som interannual - ww3 @@ -61,7 +59,6 @@ derived does not appear in namelist - only used to set domain and data information - $SSTICE_GRID_FILENAME $SSTICE_GRID_FILENAME @@ -72,7 +69,6 @@ derived does not appear in namelist - only used to set domain and data information - $SSTICE_DATA_FILENAME $SSTICE_DATA_FILENAME @@ -114,11 +110,6 @@ area area mask mask - - time time - gridLon lon - gridLat lat - time time lon lon @@ -174,9 +165,6 @@ sst t - - iceCon s - T t S s @@ -214,7 +202,6 @@ -999 $SSTICE_YEAR_ALIGN - $SSTICE_YEAR_ALIGN 1 1 @@ -228,7 +215,6 @@ -999 $SSTICE_YEAR_START - $SSTICE_YEAR_START 1 1850 @@ -242,7 +228,6 @@ -999 $SSTICE_YEAR_END - $SSTICE_YEAR_END 1 2014 @@ -318,7 +303,6 @@ SOM SOM IAF - COPYALL diff --git a/src/drivers/mct/cime_config/config_compsets.xml b/src/drivers/mct/cime_config/config_compsets.xml index 55c509630d26..fb129089b514 100644 --- a/src/drivers/mct/cime_config/config_compsets.xml +++ b/src/drivers/mct/cime_config/config_compsets.xml @@ -9,19 +9,22 @@ The compset longname below has the specified order atm, lnd, ice, ocn, river, glc wave esp cesm-options - The notation for the compset longname is + The notation for the compset longname below is TIME_ATM[%phys]_LND[%phys]_ICE[%phys]_OCN[%phys]_ROF[%phys]_GLC[%phys]_WAV[%phys][_ESP][_BGC%phys] - Where for the CAM specific compsets below the following is supported + + The following compsets are those that can be tested in CIME stand-alone configurations + without any prognostic components. + For the compsets below the following are the only allowable values of the components. + TIME = Time period (e.g. 2000, HIST, RCP8...) - ATM = [CAM4, CAM5] - LND = [CLM40, CLM45, CLM50, SLND] - ICE = [CICE, DICE, SICE] - OCN = [DOCN, ,AQUAP, SOCN] - ROF = [RTM, SROF] - GLC = [CISM1, CISM2, SGLC] - WAV = [SWAV WW3] - ESP = [SESP DESP] - BGC = optional BGC scenario + ATM = [DATM, SATM, XATM] + LND = [DLND, SLND, XLND] + ICE = [DICE, SICE, XICE] + OCN = [DOCN, SOCN, XOCN] + ROF = [DROF, SROF, XROF] + GLC = [ SGLC ] + WAV = [DWAV, SWAV ] + ESP = [DESP, SESP ] The OPTIONAL %phys attributes specify submodes of the given system For example DOCN%DOM is the data ocean model for DOCN @@ -67,11 +70,6 @@ 2000_DATM%IAF_SLND_DICE%IAF_DOCN%IAF_DROF%IAF_SGLC_SWAV - - AWAV - 2000_DATM%WW3_SLND_DICE%COPY_DOCN%COPY_SROF_SGLC_WW3 - - S 2000_SATM_SLND_SICE_SOCN_SROF_SGLC_SWAV_SESP From 1c4563a864b2de3c1c1b1f281f9be8f928f398f1 Mon Sep 17 00:00:00 2001 From: Mariana Vertenstein Date: Wed, 12 Apr 2017 23:12:20 -0600 Subject: [PATCH 094/219] fixed bugs --- .../data_comps/dice/cime_config/namelist_definition_dice.xml | 4 ---- src/components/data_comps/drof/drof_comp_mod.F90 | 3 +-- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/src/components/data_comps/dice/cime_config/namelist_definition_dice.xml b/src/components/data_comps/dice/cime_config/namelist_definition_dice.xml index dfb5e4b1733a..e5b2cdb7db04 100644 --- a/src/components/data_comps/dice/cime_config/namelist_definition_dice.xml +++ b/src/components/data_comps/dice/cime_config/namelist_definition_dice.xml @@ -119,10 +119,6 @@ area area mask mask - time time - gridLon lon - gridLat lat - diff --git a/src/components/data_comps/drof/drof_comp_mod.F90 b/src/components/data_comps/drof/drof_comp_mod.F90 index 4908fba2d652..a2c9974bd3ac 100644 --- a/src/components/data_comps/drof/drof_comp_mod.F90 +++ b/src/components/data_comps/drof/drof_comp_mod.F90 @@ -255,8 +255,7 @@ subroutine drof_comp_init( EClock, cdata, x2r, r2x, NLFilename ) ! check that we know how to handle the mode - if (trim(rof_mode) == 'NULL' .or. & - trim(rof_mode) == 'COPYALL') + if (trim(rof_mode) == 'NULL' .or. trim(rof_mode) == 'COPYALL') then if (my_task == master_task) write(logunit,F00) 'rof mode = ',trim(rof_mode) else write(logunit,F00) ' ERROR illegal rof mode = ',trim(rof_mode) From 1f2a389c44d09ca4a9ac67ffe6446bbfafd77700 Mon Sep 17 00:00:00 2001 From: Mariana Vertenstein Date: Sat, 15 Apr 2017 14:40:07 -0600 Subject: [PATCH 095/219] added new grid for ADWAV testing --- config/cesm/config_grids.xml | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/config/cesm/config_grids.xml b/config/cesm/config_grids.xml index 8fab255610d3..a709e2d5338f 100644 --- a/config/cesm/config_grids.xml +++ b/config/cesm/config_grids.xml @@ -862,10 +862,9 @@ gx1v7 - - ww3a - ww3a - ww3a + + + ww3a From a4bd90ac63d9d27155ef0b70b735f011219c181f Mon Sep 17 00:00:00 2001 From: Bill Sacks Date: Thu, 20 Apr 2017 10:41:02 -0600 Subject: [PATCH 096/219] Move initial compiler setup into a file that can be included --- CMakeLists.txt | 20 ++------------------ src/externals/CMake/CIME_initial_setup.cmake | 16 ++++++++++++++++ src/externals/CMake/CIME_utils.cmake | 4 ++++ src/externals/CMake/README.md | 8 +++++++- 4 files changed, 29 insertions(+), 19 deletions(-) create mode 100644 src/externals/CMake/CIME_initial_setup.cmake diff --git a/CMakeLists.txt b/CMakeLists.txt index 3a46a266917b..6329c5295768 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -2,23 +2,8 @@ cmake_minimum_required(VERSION 2.8) include(ExternalProject) set(CIME_ROOT "${CMAKE_CURRENT_SOURCE_DIR}") -# This block needs to happen before the 'project' line. -# -# FIXME(wjs, 2017-04-20) If we can get this working, we should move it into a -# new file that can be included, like CIME_initial_setup. All top-level -# CMakeLists.txt files (i.e., CLM and CAM right now) will need to include this, -# too. -include(${CMAKE_BINARY_DIR}/Macros.cmake RESULT_VARIABLE FOUND) -if(NOT FOUND) - message(FATAL_ERROR "You must generate a Macros.cmake file using CIME's configure") -endif() -if("$ENV{MPILIB}" STREQUAL "mpi-serial") - set(CMAKE_C_COMPILER ${SCC}) - set(CMAKE_Fortran_COMPILER ${SFC}) -else() - set(CMAKE_C_COMPILER ${MPICC}) - set(CMAKE_Fortran_COMPILER ${MPIFC}) -endif() +list(APPEND CMAKE_MODULE_PATH ${CIME_CMAKE_MODULE_DIRECTORY}) +include(CIME_initial_setup) project(cime_tests Fortran C) @@ -26,7 +11,6 @@ project(cime_tests Fortran C) # need to duplicate this cmake code list(APPEND CMAKE_MODULE_PATH "${CIME_ROOT}/src/externals/pio2/cmake") -list(APPEND CMAKE_MODULE_PATH ${CIME_CMAKE_MODULE_DIRECTORY}) include(CIME_utils) find_package(NetCDF COMPONENTS C Fortran) include_directories(${NetCDF_C_INCLUDE_DIRS} ${NetCDF_Fortran_INCLUDE_DIRS}) diff --git a/src/externals/CMake/CIME_initial_setup.cmake b/src/externals/CMake/CIME_initial_setup.cmake new file mode 100644 index 000000000000..c3e833175749 --- /dev/null +++ b/src/externals/CMake/CIME_initial_setup.cmake @@ -0,0 +1,16 @@ +# Module used for CIME testing. +# +# This module does some initial setup that must be done BEFORE the 'project' +# line in the main CMakeLists.txt file. + +include(${CMAKE_BINARY_DIR}/Macros.cmake RESULT_VARIABLE FOUND) +if(NOT FOUND) + message(FATAL_ERROR "You must generate a Macros.cmake file using CIME's configure") +endif() +if("$ENV{MPILIB}" STREQUAL "mpi-serial") + set(CMAKE_C_COMPILER ${SCC}) + set(CMAKE_Fortran_COMPILER ${SFC}) +else() + set(CMAKE_C_COMPILER ${MPICC}) + set(CMAKE_Fortran_COMPILER ${MPIFC}) +endif() diff --git a/src/externals/CMake/CIME_utils.cmake b/src/externals/CMake/CIME_utils.cmake index 6c2edd436b60..88377ad2920f 100644 --- a/src/externals/CMake/CIME_utils.cmake +++ b/src/externals/CMake/CIME_utils.cmake @@ -3,6 +3,10 @@ # This module contains statements that would otherwise be boilerplate in # most CIME tests. It enables CTest testing, handles the USE_COLOR and # ENABLE_GENF90 arguments, and includes several other modules. +# +# Some of the things done here must be done AFTER the 'project' line in the main +# CMakeLists.txt file. This assumes that CIME_initial_setup has already been +# included. #========================================================================== # Copyright (c) 2013-2014, University Corporation for Atmospheric Research diff --git a/src/externals/CMake/README.md b/src/externals/CMake/README.md index 21d36fb3be65..ae583206b0e9 100644 --- a/src/externals/CMake/README.md +++ b/src/externals/CMake/README.md @@ -28,6 +28,12 @@ Sourcelist_utils - Use source file lists defined over multiple directories. Modules that are CESM-specific and/or incomplete: -CIME_utils - Handles a few options, and includes several other modules. +CIME\_initial\_setup - Handles setup that must be done before the 'project' +line. This must be included before the 'project' line in the main CMakeLists.txt +file. + +CIME_utils - Handles a few options, and includes several other modules. This +must be included after the 'project' line in the main CMakeLists.txt file, and +after the inclusion of CIME\_initial\_setup. Compilers - Specify compiler-specific behavior, add build types for CESM. From a1fa5c2d993a951255985704d2322d3fef9d9b61 Mon Sep 17 00:00:00 2001 From: Michael Levy Date: Thu, 20 Apr 2017 10:55:59 -0600 Subject: [PATCH 097/219] Update install instructions for gen_domain With updates to configure script, INSTALL was out of date. Also changed the "veryclean" target to "distclean" for consistency with other tools and made sure we don't check for the Macros.make file unless we are actually building the executable. --- tools/mapping/gen_domain_files/INSTALL | 21 ++++++++++----------- tools/mapping/gen_domain_files/src/Makefile | 18 +++++++++++++++--- 2 files changed, 25 insertions(+), 14 deletions(-) diff --git a/tools/mapping/gen_domain_files/INSTALL b/tools/mapping/gen_domain_files/INSTALL index 3b253d264d7b..a244892d2969 100644 --- a/tools/mapping/gen_domain_files/INSTALL +++ b/tools/mapping/gen_domain_files/INSTALL @@ -2,17 +2,16 @@ HOW TO BUILD ============ -Prior to building, you must make sure environment variables CIMEROOT and CIME_MODEL are set. - (1) $ cd src -(2) $ $CIMEROOT/tools/configure --machine [machine name] --macros-format Makefile -(3) Bash users: - $ . ./.env_mach_specific.sh - csh users: - $ source ./.env_mach_specific.csh -(4) $ gmake +(2) $ ../../../configure --macros-format Makefile --mpilib mpi-serial +Bash users: +(3) $ (. ./.env_mach_specific.sh ; gmake) +csh users: +(3) $ (source ./.env_mach_specific.csh ; gmake) -Note: in the second step, replace [machine name] with the machine you are -building on. Also, some machines have dedicated build nodes, so you might need -to SSH to another node before the 'gmake' step. +Note: in the second step, you may need to include "--machine [machine name]", +where [machine name] is the name of the machine you are building on. In most +cases configure can figure that out on its own, but if you get an error that is +the first fix to try. Also, some machines have dedicated build nodes, so you +might need to SSH to another node before the 'gmake' step. diff --git a/tools/mapping/gen_domain_files/src/Makefile b/tools/mapping/gen_domain_files/src/Makefile index 0a10703350c9..82c749eb39a4 100644 --- a/tools/mapping/gen_domain_files/src/Makefile +++ b/tools/mapping/gen_domain_files/src/Makefile @@ -36,7 +36,19 @@ default: $(EXENAME) $(MACROS): @echo "use the configure script located in the Machines directory to create the Makefile $(MACROS) file" --include $(MACROS) +# Do not include Macros for clean or distclean targets +USE_MACROS=TRUE +ifeq ($(MAKECMDGOALS),clean) + USE_MACROS=FALSE +endif +ifeq ($(MAKECMDGOALS),distcleanclean) + USE_MACROS=FALSE +endif + +ifeq ($(USE_MACROS),TRUE) + -include $(MACROS) +endif + # Check for the netcdf library and include directories ifdef NETCDF_PATH LIB_NETCDF:=$(NETCDF_PATH)/lib @@ -133,7 +145,7 @@ $(EXENAME): $(OBJS) clean: $(RM) -f $(OBJS) $(EXENAME) *.mod -veryclean: - $(RM) -f $(OBJS) $(EXENAME) *.mod $(MACROS) env_mach* Depends* .env_mach* +distclean: clean + ../../../configure --clean gen_domain.o : gen_domain.F90 From 90317879f547dd5e4563bdeef4919ae5853d45d8 Mon Sep 17 00:00:00 2001 From: Michael Levy Date: Thu, 20 Apr 2017 11:08:40 -0600 Subject: [PATCH 098/219] Add cheyenne support to ESMF wrappers create_ESMF_map.sh can now auto-detect running on cheyenne and load the appropriate ESMF modules. Both yellowstone and cheyenne use ESMF 7.0.0 (yellowstone was previously using 6.3.0rp1), and the README files allude to running on cheyenne instead of just yellowstone / caldera / geyser. Also adding a .gitignore file for gen_mapping_files/. --- tools/mapping/gen_mapping_files/README | 4 ++-- .../gen_ESMF_mapping_file/.gitignore | 2 ++ .../gen_ESMF_mapping_file/README | 4 ++-- .../gen_ESMF_mapping_file/create_ESMF_map.sh | 24 +++++++++++++------ 4 files changed, 23 insertions(+), 11 deletions(-) create mode 100644 tools/mapping/gen_mapping_files/gen_ESMF_mapping_file/.gitignore diff --git a/tools/mapping/gen_mapping_files/README b/tools/mapping/gen_mapping_files/README index cd60a919454d..a9594191d3a3 100644 --- a/tools/mapping/gen_mapping_files/README +++ b/tools/mapping/gen_mapping_files/README @@ -120,8 +120,8 @@ Note: if rtm is specified and lnd is not, then this tool will You can also set the following env variables: ESMFBIN_PATH - Path to ESMF binaries - (Leave unset on yellowstone and caldera and the tool - will be loaded from modules) + (Leave unset on cheyenne, yellowstone, and caldera and the + tool will be loaded from modules) MPIEXEC ------ Name of mpirun executable (default is mpirun.lsf on yellowstone and caldera; if you run interactively on yellowstone, mpi is not used) diff --git a/tools/mapping/gen_mapping_files/gen_ESMF_mapping_file/.gitignore b/tools/mapping/gen_mapping_files/gen_ESMF_mapping_file/.gitignore new file mode 100644 index 000000000000..4e025b24ca32 --- /dev/null +++ b/tools/mapping/gen_mapping_files/gen_ESMF_mapping_file/.gitignore @@ -0,0 +1,2 @@ +*.Log +*.nc diff --git a/tools/mapping/gen_mapping_files/gen_ESMF_mapping_file/README b/tools/mapping/gen_mapping_files/gen_ESMF_mapping_file/README index c0f157a05c01..e0741bdb2d5c 100644 --- a/tools/mapping/gen_mapping_files/gen_ESMF_mapping_file/README +++ b/tools/mapping/gen_mapping_files/gen_ESMF_mapping_file/README @@ -82,8 +82,8 @@ where You can also set the following env variables: ESMFBIN_PATH - Path to ESMF binaries - (Leave unset on yellowstone and caldera and the tool - will be loaded from modules) + (Leave unset on cheyenne, yellowstone, and caldera and the + tool will be loaded from modules) MPIEXEC ------ Name of mpirun executable (default is mpirun.lsf on yellowstone and caldera; if you run interactively on yellowstone, mpi is not used) diff --git a/tools/mapping/gen_mapping_files/gen_ESMF_mapping_file/create_ESMF_map.sh b/tools/mapping/gen_mapping_files/gen_ESMF_mapping_file/create_ESMF_map.sh index 012fbe9e417d..60448bf1c8d1 100755 --- a/tools/mapping/gen_mapping_files/gen_ESMF_mapping_file/create_ESMF_map.sh +++ b/tools/mapping/gen_mapping_files/gen_ESMF_mapping_file/create_ESMF_map.sh @@ -80,8 +80,8 @@ usage() { echo '' echo 'You can also set the following env variables:' echo ' ESMFBIN_PATH - Path to ESMF binaries ' - echo ' (Leave unset on yellowstone/caldera/pronghorn and the tool' - echo ' will be loaded from modules)' + echo ' (Leave unset on cheyenne/yellowstone/caldera/pronghorn' + echo ' and the tool will be loaded from modules)' echo ' MPIEXEC ------ Name of mpirun executable' echo ' (default is mpirun.lsf on yellowstone/caldera/pronghorn; if' echo ' you run interactively on yellowstone, mpi is not used)' @@ -225,6 +225,9 @@ if [ $MACH == "UNSET" ]; then ys* ) MACH="yellowstone" ;; + cheyenne* ) + MACH="cheyenne" + ;; geyser* ) MACH="geyser" ;; @@ -244,10 +247,13 @@ if [ $MACH == "UNSET" ]; then fi # Machine specific settings: -# 1) can not run in parallel interactively on yellowstone +# 1) can not run in parallel interactively on yellowstone or cheyenne if [ $MACH == "yellowstone" ] && [ $interactive == "YES" ]; then serial="TRUE" fi +if [ $MACH == "cheyenne" ] && [ $interactive == "YES" ]; then + serial="TRUE" +fi # 2) jaguar requires additional environment var if [ $MACH == "jaguar" ] && [ -z "$REGRID_PROC" ]; then REGRID_PROC=8 @@ -310,22 +316,26 @@ fi case $MACH in ## yellowstone, geyser, caldera, or pronghorn - "yellowstone" | "geyser" | "caldera" | "pronghorn" ) + "cheyenne" | "yellowstone" | "geyser" | "caldera" | "pronghorn" ) # From tcsh, script will not find module command # So check to see if module works, otherwise source an init file module list > /dev/null 2>&1 || source /etc/profile.d/modules.sh module purge module load intel module load nco - module load esmf + if [ $MACH == "cheyenne" ]; then + module load esmf_libs/7.0.0 + else + module load esmf + fi if [ $serial == "TRUE" ]; then - module load esmf-6.3.0rp1-ncdfio-uni-O + module load esmf-7.0.0-ncdfio-uni-O if [ -z "$MPIEXEC" ]; then MPIEXEC="" fi else - module load esmf-6.3.0rp1-ncdfio-mpi-O + module load esmf-7.0.0-ncdfio-mpi-O if [ -z "$MPIEXEC" ]; then MPIEXEC="mpirun.lsf" fi From 11527f9a1c52942cabf874b26b900ec424d721a0 Mon Sep 17 00:00:00 2001 From: James Foucar Date: Thu, 20 Apr 2017 11:13:50 -0600 Subject: [PATCH 099/219] Errput may be empty string or None In either case, you want to inform the user using output instead of errput. --- scripts/lib/CIME/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/lib/CIME/utils.py b/scripts/lib/CIME/utils.py index a3f7da17ddae..07186476228f 100644 --- a/scripts/lib/CIME/utils.py +++ b/scripts/lib/CIME/utils.py @@ -259,7 +259,7 @@ def run_cmd_no_fail(cmd, input_str=None, from_dir=None, verbose=None, if stat != 0: # If command produced no errput, put output in the exception since we # have nothing else to go on. - errput = output if errput == "" else errput + errput = output if not errput else errput expect(False, "Command: '%s' failed with error '%s'%s" % (cmd, errput, "" if from_dir is None else " from dir '%s'" % from_dir)) From 79acf6016a5906de7116e5f7d1014be4d5020051 Mon Sep 17 00:00:00 2001 From: Erich L Foster Date: Thu, 20 Apr 2017 11:19:39 -0600 Subject: [PATCH 100/219] Add Support for += in Namelists Namelists.pm had the ability to parse += in namelist files. This adds the support for += into the python version. Fixes #839 --- scripts/lib/CIME/namelist.py | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/scripts/lib/CIME/namelist.py b/scripts/lib/CIME/namelist.py index d91a31184c32..2f4366a39f89 100644 --- a/scripts/lib/CIME/namelist.py +++ b/scripts/lib/CIME/namelist.py @@ -1440,12 +1440,15 @@ def _eat_comment(self): self._advance() return True - def _expect_char(self, chars): + def _expect_char(self, chars, RETURN=False): """Raise an error if the wrong character is present. Does not return anything, but raises a `_NamelistParseError` if `chars` does not contain the character at the current position. + The RETURN optional is used to allow for checking of consecutive + characters such as '+=' + >>> x = _NamelistParser('ab') >>> x._expect_char('a') >>> x._advance() @@ -1455,13 +1458,17 @@ def _expect_char(self, chars): _NamelistParseError: Error in parsing namelist: expected 'a' but found 'b' >>> x._expect_char('ab') """ - if self._curr() not in chars: + if self._curr() not in chars and not RETURN: if len(chars) == 1: char_description = repr(str(chars)) else: char_description = "one of the characters in %r" % str(chars) raise _NamelistParseError("expected %s but found %r" % (char_description, str(self._curr()))) + elif self._curr() in chars and RETURN: + return True + elif self._curr() not in chars and RETURN: + return False def _parse_namelist_group_name(self): r"""Parses and returns a namelist group name at the current position. @@ -1536,7 +1543,7 @@ def _parse_variable_name(self, allow_equals=True): _NamelistParseError: Error in parsing namelist: '' is not a valid variable name """ old_pos = self._pos - separators = (' ', '\n', '=') if allow_equals else (' ', '\n') + separators = (' ', '\n', '=', '+') if allow_equals else (' ', '\n') while self._curr() not in separators: self._advance() text = self._text[old_pos:self._pos] @@ -1964,10 +1971,15 @@ def _parse_name_and_values(self, allow_eof_end=False): SystemExit: ERROR: Too many values for array foo(1:2) >>> _NamelistParser("foo=1,")._parse_name_and_values(allow_eof_end=True) (u'foo', [u'1', u'']) + >>> _NamelistParser("foo+=1")._parse_name_and_values(allow_eof_end=True) + (u'foo', [u'1']) """ name = self._parse_variable_name() self._eat_whitespace() + # check to see if we have a "+=" + if self._expect_char("+", RETURN=True): + self._advance() self._expect_char("=") try: self._advance() From 9329b159275a261b24a3f421f5e7ababbbfd8bee Mon Sep 17 00:00:00 2001 From: Jason Sarich Date: Thu, 20 Apr 2017 13:20:58 -0500 Subject: [PATCH 101/219] Fix bug where LockedFiles weren't checked if there was a dot in filepath Fixes issue #1322 --- scripts/lib/CIME/check_lockedfiles.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/lib/CIME/check_lockedfiles.py b/scripts/lib/CIME/check_lockedfiles.py index ee99a37398b0..312437143888 100644 --- a/scripts/lib/CIME/check_lockedfiles.py +++ b/scripts/lib/CIME/check_lockedfiles.py @@ -88,7 +88,7 @@ def check_lockedfiles(caseroot=None): for lfile in lockedfiles: fpart = os.path.basename(lfile) # ignore files used for tests such as env_mach_pes.ERP1.xml by looking for extra dots in the name - if lfile.count('.') > 1: + if fpart.count('.') > 1: continue cfile = os.path.join(caseroot, fpart) if os.path.isfile(cfile): From 1c1058118e6e73ed647ea4753d7a16af992847c3 Mon Sep 17 00:00:00 2001 From: Bill Sacks Date: Thu, 20 Apr 2017 12:55:52 -0600 Subject: [PATCH 102/219] Steps toward getting unit tests running on hobart Links are now dying with an error that it can't find -lifcore --- config/cesm/machines/config_compilers.xml | 1 + src/externals/CMake/CIME_utils.cmake | 2 +- tools/unit_testing/run_tests.py | 5 +++++ 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/config/cesm/machines/config_compilers.xml b/config/cesm/machines/config_compilers.xml index 8ee48fc3c087..2abaae9853af 100644 --- a/config/cesm/machines/config_compilers.xml +++ b/config/cesm/machines/config_compilers.xml @@ -690,6 +690,7 @@ using a fortran linker. -mkl=cluster + /fs/cgd/csm/tools/pFUnit/pFUnit3.2.8_hobart_Intel15.0.2_noMPI_noOpenMP diff --git a/src/externals/CMake/CIME_utils.cmake b/src/externals/CMake/CIME_utils.cmake index 88377ad2920f..82867033e865 100644 --- a/src/externals/CMake/CIME_utils.cmake +++ b/src/externals/CMake/CIME_utils.cmake @@ -37,7 +37,7 @@ set(CMAKE_COLOR_MAKEFILE "${USE_COLOR}") list(APPEND CMAKE_MODULE_PATH "../pio2/cmake") set(CMAKE_C_FLAGS "${CPPDEFS} ${CFLAGS}") set(CMAKE_Fortran_FLAGS "${CPPDEFS} ${FFLAGS}") -set (CMAKE_EXE_LINKER_FLAGS ${LDFLAGS}) +set(CMAKE_EXE_LINKER_FLAGS ${LDFLAGS} ${SLIBS}) include(Compilers) diff --git a/tools/unit_testing/run_tests.py b/tools/unit_testing/run_tests.py index ffe8b4b57ea9..422ef15765fc 100755 --- a/tools/unit_testing/run_tests.py +++ b/tools/unit_testing/run_tests.py @@ -330,6 +330,11 @@ def _main(): os.environ["compile_threaded"] = "false" os.environ["UNIT_TEST_HOST"] = socket.gethostname() + if "NETCDF_PATH" in os.environ and not "NETCDF" in os.environ: + # The CMake Netcdf find utility that we use (from pio2) seems to key off + # of the environment variable NETCDF, but not NETCDF_PATH + logger.info("Setting NETCDF environment variable: %s"%os.environ["NETCDF_PATH"]) + os.environ["NETCDF"] = os.environ["NETCDF_PATH"] if not use_mpi: mpirun_command = "" From 7fa83de092a6969886ebbd95107946b3589ac224 Mon Sep 17 00:00:00 2001 From: Michael Levy Date: Thu, 20 Apr 2017 13:32:55 -0600 Subject: [PATCH 103/219] Forgot to update map_field in previous commits The map_field utility INSTALL file is up to date and the Makefile is more closely aligned with that of gen_domain --- tools/mapping/map_field/INSTALL | 21 ++++++++++----------- tools/mapping/map_field/src/Makefile | 26 +++++++++++++++++++------- 2 files changed, 29 insertions(+), 18 deletions(-) diff --git a/tools/mapping/map_field/INSTALL b/tools/mapping/map_field/INSTALL index bf9508417f2a..a244892d2969 100644 --- a/tools/mapping/map_field/INSTALL +++ b/tools/mapping/map_field/INSTALL @@ -2,17 +2,16 @@ HOW TO BUILD ============ -Prior to building, you must make sure $CIMEROOT is set. - (1) $ cd src -(2) $ $CIMEROOT/tools/configure macros-format Makefile -(3) Bash users: - $ . ./.env_mach_specific.sh - csh users: - $ source ./.env_mach_specific.csh -(4) $ gmake +(2) $ ../../../configure --macros-format Makefile --mpilib mpi-serial +Bash users: +(3) $ (. ./.env_mach_specific.sh ; gmake) +csh users: +(3) $ (source ./.env_mach_specific.csh ; gmake) -Note: in the second step, replace [machine name] with the machine you are -building on. Also, some machines have dedicated build nodes, so you might need -to SSH to another node before the 'gmake' step. +Note: in the second step, you may need to include "--machine [machine name]", +where [machine name] is the name of the machine you are building on. In most +cases configure can figure that out on its own, but if you get an error that is +the first fix to try. Also, some machines have dedicated build nodes, so you +might need to SSH to another node before the 'gmake' step. diff --git a/tools/mapping/map_field/src/Makefile b/tools/mapping/map_field/src/Makefile index bc2dbd56e8ea..27b53b6128fa 100644 --- a/tools/mapping/map_field/src/Makefile +++ b/tools/mapping/map_field/src/Makefile @@ -29,14 +29,26 @@ null := CURDIR = . EXENAME = ../map_field RM = rm -MACFILE = Macros.make +MACROS = Macros.make default: $(EXENAME) -$(MACFILE): - @echo "use the configure script located in the Machines directory to create the Makefile $(MACFILE) file" +$(MACROS): + @echo "use the configure script located in the Machines directory to create the Makefile $(MACROS) file" + +# Do not include Macros for clean or distclean targets +USE_MACROS=TRUE +ifeq ($(MAKECMDGOALS),clean) + USE_MACROS=FALSE +endif +ifeq ($(MAKECMDGOALS),distcleanclean) + USE_MACROS=FALSE +endif + +ifeq ($(USE_MACROS),TRUE) + -include $(MACROS) +endif --include $(MACFILE) # Check for the netcdf library and include directories ifdef NETCDF_PATH LIB_NETCDF:=$(NETCDF_PATH)/lib @@ -97,7 +109,7 @@ else FPPDEFS := $(CPPDEFS) endif #Primary Target: build the tool -all: $(EXENAME) $(MACFILE) +all: $(EXENAME) $(MACROS) OBJS := map_field.o @@ -133,7 +145,7 @@ $(EXENAME): $(OBJS) clean: $(RM) -f $(OBJS) $(EXENAME) *.mod -veryclean: - $(RM) -f $(OBJS) $(EXENAME) *.mod $(MACFILE) .env_mach* env_mach* +distclean: clean + ../../../configure --clean map_field.o : map_field.F90 From a403d2e56d63e981bfdb2d7c83622e37013a9d0e Mon Sep 17 00:00:00 2001 From: Bill Sacks Date: Thu, 20 Apr 2017 13:42:07 -0600 Subject: [PATCH 104/219] Build CMAKE_EXE_LINKER_FLAGS correctly This avoids adding a semicolon --- src/externals/CMake/CIME_utils.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/externals/CMake/CIME_utils.cmake b/src/externals/CMake/CIME_utils.cmake index 82867033e865..bd0f254b1d8f 100644 --- a/src/externals/CMake/CIME_utils.cmake +++ b/src/externals/CMake/CIME_utils.cmake @@ -37,7 +37,7 @@ set(CMAKE_COLOR_MAKEFILE "${USE_COLOR}") list(APPEND CMAKE_MODULE_PATH "../pio2/cmake") set(CMAKE_C_FLAGS "${CPPDEFS} ${CFLAGS}") set(CMAKE_Fortran_FLAGS "${CPPDEFS} ${FFLAGS}") -set(CMAKE_EXE_LINKER_FLAGS ${LDFLAGS} ${SLIBS}) +set(CMAKE_EXE_LINKER_FLAGS "${LDFLAGS} ${SLIBS}") include(Compilers) From 617930115b79fbb9eefdb567d35ffaca6cff3f49 Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Thu, 20 Apr 2017 15:10:03 -0600 Subject: [PATCH 105/219] remove the cprnc.out files prior to running tests --- scripts/lib/CIME/SystemTests/system_tests_common.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/scripts/lib/CIME/SystemTests/system_tests_common.py b/scripts/lib/CIME/SystemTests/system_tests_common.py index d0eba6383a58..3dc944966271 100644 --- a/scripts/lib/CIME/SystemTests/system_tests_common.py +++ b/scripts/lib/CIME/SystemTests/system_tests_common.py @@ -197,6 +197,10 @@ def run_indv(self, suffix="base", st_archive=False): run_type = self._case.get_value("RUN_TYPE") rest_option = self._case.get_value("REST_OPTION") rest_n = self._case.get_value("REST_N") + rundir = self._case.get_value("RUNDIR") + # remove any cprnc output leftover from previous runs + for compout in glob.iglob(os.path.join(rundir,"*.cprnc.out")): + os.remove(compout) infostr = "doing an %d %s %s test" % (stop_n, stop_option,run_type) if rest_option == "none" or rest_option == "never": From a01ec185353238f1994cd5c743752646b23cc00b Mon Sep 17 00:00:00 2001 From: Bill Sacks Date: Thu, 20 Apr 2017 15:12:27 -0600 Subject: [PATCH 106/219] Fix CMakeLists.txt in examples --- tools/unit_testing/Examples/circle_area/tests/CMakeLists.txt | 4 +++- .../Examples/interpolate_1d/tests/CMakeLists.txt | 5 ++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/tools/unit_testing/Examples/circle_area/tests/CMakeLists.txt b/tools/unit_testing/Examples/circle_area/tests/CMakeLists.txt index f7bcbf92b2fc..06b4b9ada401 100644 --- a/tools/unit_testing/Examples/circle_area/tests/CMakeLists.txt +++ b/tools/unit_testing/Examples/circle_area/tests/CMakeLists.txt @@ -1,7 +1,9 @@ cmake_minimum_required(VERSION 2.8) -project(circle_area_tests Fortran C) list(APPEND CMAKE_MODULE_PATH ${CIME_CMAKE_MODULE_DIRECTORY}) +include(CIME_initial_setup) + +project(circle_area_tests Fortran C) include(CIME_utils) diff --git a/tools/unit_testing/Examples/interpolate_1d/tests/CMakeLists.txt b/tools/unit_testing/Examples/interpolate_1d/tests/CMakeLists.txt index 378330073d27..e1747bb812b7 100644 --- a/tools/unit_testing/Examples/interpolate_1d/tests/CMakeLists.txt +++ b/tools/unit_testing/Examples/interpolate_1d/tests/CMakeLists.txt @@ -1,7 +1,10 @@ cmake_minimum_required(VERSION 2.8) -project(interpolate_1d_tests Fortran C) list(APPEND CMAKE_MODULE_PATH ${CIME_CMAKE_MODULE_DIRECTORY}) +include(CIME_initial_setup) + +project(interpolate_1d_tests Fortran C) + include(CIME_utils) add_subdirectory(../src interpolate_1d) From 5b5a2c5664e200a319ca32432d451bcc398a1850 Mon Sep 17 00:00:00 2001 From: Erich L Foster Date: Thu, 20 Apr 2017 15:30:29 -0600 Subject: [PATCH 107/219] Add Support of true and false in Namelist The Namelist.pm only allowed for .false. or .true. in namelist files. I have fixed the regex to allow for dots or no dots. Fixes #1283 --- utils/perl5lib/Build/Namelist.pm | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/perl5lib/Build/Namelist.pm b/utils/perl5lib/Build/Namelist.pm index 5c222c1fab18..b75e5f2be0ed 100644 --- a/utils/perl5lib/Build/Namelist.pm +++ b/utils/perl5lib/Build/Namelist.pm @@ -627,8 +627,8 @@ my $valint = "[+-]?[0-9]+"; my $valint_repeat = "${valint}\\*$valint"; # Logical data. -my $vallogical1 = "\\.[Tt][Rr][Uu][Ee]\\."; -my $vallogical2 = "\\.[Ff][Aa][Ll][Ss][Ee]\\."; +my $vallogical1 = "\\.?[Tt][Rr][Uu][Ee]\\.?"; +my $vallogical2 = "\\.?[Ff][Aa][Ll][Ss][Ee]\\.?"; my $vallogical = "$vallogical1|$vallogical2"; my $vallogical_repeat = "${valint}\\*$vallogical1|${valint}\\*$vallogical2"; From dcf6dc90fb41dcccf645b520c7552d6183f7b0b4 Mon Sep 17 00:00:00 2001 From: Erich L Foster Date: Fri, 21 Apr 2017 08:28:59 -0600 Subject: [PATCH 108/219] Added Support for [FfTt] in Namelist.pm Added additional support for the lagical F, f, T, and t in the perl module for namelist variables. --- utils/perl5lib/Build/Namelist.pm | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/perl5lib/Build/Namelist.pm b/utils/perl5lib/Build/Namelist.pm index b75e5f2be0ed..cd3643067d26 100644 --- a/utils/perl5lib/Build/Namelist.pm +++ b/utils/perl5lib/Build/Namelist.pm @@ -629,7 +629,8 @@ my $valint_repeat = "${valint}\\*$valint"; # Logical data. my $vallogical1 = "\\.?[Tt][Rr][Uu][Ee]\\.?"; my $vallogical2 = "\\.?[Ff][Aa][Ll][Ss][Ee]\\.?"; -my $vallogical = "$vallogical1|$vallogical2"; +my $vallogical3 = "[FfTt]"; +my $vallogical = "$vallogical1|$vallogical2|$vallogical3"; my $vallogical_repeat = "${valint}\\*$vallogical1|${valint}\\*$vallogical2"; # Real data. From a5cc0b0cbf7a5a944dbb9b7912a29bd3ac0e48bb Mon Sep 17 00:00:00 2001 From: Bill Sacks Date: Fri, 21 Apr 2017 09:27:59 -0600 Subject: [PATCH 109/219] In cmake macros file, always strip leading and trailing whitespace This isn't necessarily needed for variables other than the compiler, but Jim Edwards suggested doing this to keep things simple and because there *may* be other variables that need it. He thinks it shouldn't be a problem to do this for all variables. --- .../lib/CIME/BuildTools/cmakemacroswriter.py | 36 +++++-------------- 1 file changed, 8 insertions(+), 28 deletions(-) diff --git a/scripts/lib/CIME/BuildTools/cmakemacroswriter.py b/scripts/lib/CIME/BuildTools/cmakemacroswriter.py index 0041a921b80c..a1789d983876 100644 --- a/scripts/lib/CIME/BuildTools/cmakemacroswriter.py +++ b/scripts/lib/CIME/BuildTools/cmakemacroswriter.py @@ -89,7 +89,7 @@ def set_variable(self, name, value): >>> s.getvalue() u'set(foo "bar")\\n' """ - value_transformed = self._transform_value(name, value) + value_transformed = self._transform_value(value) self.write_line("set(" + name + ' "' + value_transformed + '")') def start_ifeq(self, left, right): @@ -119,41 +119,21 @@ def end_ifeq(self): self.indent_left() self.write_line("endif()") - def _transform_value(self, name, value): + def _transform_value(self, value): """Some elements need their values transformed in some way for CMake to handle them properly. This method does those transformations. Args: - - name (str): name of element - value (str): value of element Returns transformed value """ - value_transformed = value - if self._element_needs_whitespace_removal(name): - value_transformed = value_transformed.strip() + # Not all variables need leading & trailing whitespace removed, but some + # do. In particular, compiler variables (MPICC, MPICXX, MPIFC, SCC, + # SCXX, SFC) are only handled correctly if leading & trailing whitespace + # is removed. It doesn't seem to hurt to remove whitespace from all + # variables. + value_transformed = value.strip() return value_transformed - - def _element_needs_whitespace_removal(self, name): - """Returns True if the given element needs whitespace removed - - Args: - - name (str): name of element - """ - - # These compiler variables are only handled correctly if white space is removed - vars_that_need_whitespace_removal = ( - 'MPICC', - 'MPICXX', - 'MPIFC', - 'SCC', - 'SCXX', - 'SFC' - ) - - if name in vars_that_need_whitespace_removal: - return True - else: - return False From 00ab9eed8d19142f4e67d2bf24473ea84931a4de Mon Sep 17 00:00:00 2001 From: Chris Fischer Date: Fri, 31 Mar 2017 15:35:41 -0600 Subject: [PATCH 110/219] Rewrite testreporter into python. The perl version was having issues handling web posts differently on different systems. Fix issue where testreport failed to be copied to testing directory. Test suite: scripts_regression_tests.py. Populated testdata base for alpha06g on cheyenne and hobart Test baseline: Test namelist changes: Test status: bit for bit Fixes #1292 User interface changes?: Code review: --- scripts/Tools/testreporter.py | 240 +++++++++++++++++++++++++++++ scripts/lib/CIME/test_scheduler.py | 15 +- 2 files changed, 250 insertions(+), 5 deletions(-) create mode 100755 scripts/Tools/testreporter.py diff --git a/scripts/Tools/testreporter.py b/scripts/Tools/testreporter.py new file mode 100755 index 000000000000..d7e136d172f2 --- /dev/null +++ b/scripts/Tools/testreporter.py @@ -0,0 +1,240 @@ +#!/usr/bin/env python + +""" + +Simple script to populate CESM test database with test results. This can be +run with out any knowledge of CIME. + +""" + +import os +import glob +import xml.etree.ElementTree as ET +from xml.dom import minidom +import HTMLParser +import urllib +import argparse + + + +# Parse command line options + +parser = argparse.ArgumentParser(description='Arguements for testreporter') +parser.add_argument("--tagname", + help="Name of the tag being tested.") +parser.add_argument("--testid", + help="Test id, ie c2_0_a6g,c2_0_b6g.") +parser.add_argument("--testroot", + help="Root directory for tests to populate the database.") +parser.add_argument("--testtype", + help="Type of test, prealpha or prebeta.") +parser.add_argument("--dryrun",action="store_true", + help="Do a dry run, database will not be populated.") +parser.add_argument("--dumpxml",action="store_true", + help="Dump XML test results to sceen.") +args = parser.parse_args() + +# Fill in values needed from xml files (COMPILER, MPILIB, MACH, BASELINE_NAME_CMP. + +os.chdir(args.testroot) + +xml_file=glob.glob("*"+args.testid+"/env_build.xml") +root = ET.parse(xml_file[0]).getroot() +for child in root.iter('group'): + for entry in child.iter('entry'): + id = entry.get('id') + value = entry.get('value') + if id == "COMPILER": + compiler=value + if id == "MPILIB": + mpilib = value + +xml_file=glob.glob("*"+args.testid+"/env_case.xml") +root = ET.parse(xml_file[0]).getroot() +for child in root.iter('group'): + for entry in child.iter('entry'): + id = entry.get('id') + value = entry.get('value') + if id == "MACH": + machine=value + +xml_file=glob.glob("*"+args.testid+"/env_test.xml") +root = ET.parse(xml_file[0]).getroot() +for child in root.iter('group'): + for entry in child.iter('entry'): + id = entry.get('id') + value = entry.get('value') + if id == "BASELINE_NAME_CMP": + baseline=value + + +# +# Create XML +# + +testrecord = ET.Element("testrecord") +tag_name = ET.SubElement(testrecord,'tag_name').text=args.tagname +mach = ET.SubElement(testrecord,'mach').text=machine +compiler = ET.SubElement(testrecord,'compiler',attrib={"version":""}).text=compiler +mpilib = ET.SubElement(testrecord,'mpilib',attrib={"version":""}).text=mpilib +testroot = ET.SubElement(testrecord,'testroot').text=args.testroot +testtype = ET.SubElement(testrecord,'testtype').text=args.testtype +baselinetag = ET.SubElement(testrecord,'baselinetag').text= baseline +# +# Create lists on tests based on the testid in the testroot directory. +# +test_names=glob.glob("*"+args.testid) +# +# Loop over all tests and parse the test results +# +test_status={} +for test_name in test_names: + if test_name == "cs.status."+args.testid: + continue + test_status[test_name,'COMMENT']="" + test_status[test_name,'BASELINE']='----' + test_status[test_name,'MEMCOMP']='----' + test_status[test_name,'MEMLEAK']='----' + test_status[test_name,'NLCOMP']='----' + test_status[test_name,'STATUS']='----' + test_status[test_name,'TPUTCOMP']='----' + # + # Check to see if TestStatus is present, if not then continue + # I might want to set the status to fail + # + try: + lines = [line.rstrip('\n') for line in open(test_name+"/TestStatus")] + except: + test_status[test_name,'STATUS']="FAIL" + test_status[test_name,'COMMENT']="TestStatus missing. " + continue + # + # Loop over each line of TestStatus, and check for different types of failures. + # + for line in lines: + if "NLCOMP" in line: + test_status[test_name,'NLCOMP']=line[0:4] + if "MEMLEAK" in line: + test_status[test_name,'MEMLEAK']=line[0:4] + if "MEMCOMP" in line: + test_status[test_name,'MEMCOMP']=line[0:4] + if "BASELINE" in line: + test_status[test_name,'BASELINE']=line[0:4] + if "TPUTCOMP" in line: + test_status[test_name,'TPUTCOMP']=line[0:4] + if "INIT" in line: + test_status[test_name,'INIT']=line[0:4] + if line[0:4] == "FAIL": + test_status[test_name,'STATUS']="SFAIL" + test_status[test_name,'COMMENT']+="INIT fail! " + break + if "CREATE_NEWCASE" in line: + test_status[test_name,'CREATE_NEWCASE']=line[0:4] + if line[0:4] == "FAIL": + test_status[test_name,'STATUS']="SFAIL" + test_status[test_name,'COMMENT']+="CREATE_NEWCASE fail! " + break + if "XML" in line: + test_status[test_name,'XML']=line[0:4] + if line[0:4] == "FAIL": + test_status[test_name,'STATUS']="SFAIL" + test_status[test_name,'COMMENT']+="XML fail! " + break + if "SETUP" in line: + test_status[test_name,'SETUP']=line[0:4] + if line[0:4] == "FAIL": + test_status[test_name,'STATUS']="SFAIL" + test_status[test_name,'COMMENT']+="SETUP fail! " + break + if "SHAREDLIB_BUILD" in line: + test_status[test_name,'SHAREDLIB_BUILD']=line[0:4] + if line[0:4] == "FAIL": + test_status[test_name,'STATUS']="CFAIL" + test_status[test_name,'COMMENT']+="SHAREDLIB_BUILD fail! " + break + if "MODEL_BUILD" in line: + test_status[test_name,'MODEL_BUILD']=line[0:4] + if line[0:4] == "FAIL": + test_status[test_name,'STATUS']="CFAIL" + test_status[test_name,'COMMENT']+="MODEL_BUILD fail! " + break + if "SUBMIT" in line: + test_status[test_name,'STATUS']=line[0:4] + if line[0:4] == "FAIL": + test_status[test_name,'COMMENT']+="SUBMIT fail! " + break + if "RUN" in line: + test_status[test_name,'STATUS']=line[0:4] + if line[0:4] == "FAIL": + test_status[test_name,'COMMENT']+="RUN fail! " + break + if "COMPARE_base_rest" in line: + test_status[test_name,'STATUS']=line[0:4] + if line[0:4] == "FAIL": + test_status[test_name,'COMMENT']+="Restart fail! " + break + if "COMPARE_base_hybrid" in line: + test_status[test_name,'STATUS']=line[0:4] + if line[0:4] == "FAIL": + test_status[test_name,'COMMENT']+="Hybrid fail! " + break + + # + # Do not include time comments. Just a preference to have cleaner comments in the test database + # + try: + if 'time=' not in line: + test_status[test_name,'COMMENT']+=line.split(' ',3)[3]+' ' + except: + pass + + # + # File in the xml with the test results + # + tests = ET.Element('tests',attrib={"testname":test_name}) + testrecord.append(tests) + category=ET.SubElement(tests,'category',attrib={"name":"casestatus"}) + category=ET.SubElement(tests,'category',attrib={"name":"comment"}).text= test_status[test_name,'COMMENT'] + category=ET.SubElement(tests,'category',attrib={"name":"compare"}).text= test_status[test_name,'BASELINE'] + category=ET.SubElement(tests,'category',attrib={"name":"memcomp"}).text= test_status[test_name,'MEMCOMP'] + category=ET.SubElement(tests,'category',attrib={"name":"memleak"}).text=test_status[test_name,'MEMLEAK'] + category=ET.SubElement(tests,'category',attrib={"name":"nlcomp"}).text= test_status[test_name,'NLCOMP'] + category=ET.SubElement(tests,'category',attrib={"name":"status"}).text= test_status[test_name,'STATUS'] + category=ET.SubElement(tests,'category',attrib={"name":"tputcomp"}).text= test_status[test_name,'TPUTCOMP'] + + +# +# Convert XML to a string +# +xmlstr = ET.tostring(testrecord,method="xml",encoding="UTF-8") + +# +# Make the XML string human readable and print it out +# +xml=minidom.parseString(xmlstr) +testXML = xml.toprettyxml(encoding="UTF-8") +# +# Dump xml to the screen. +# +if args.dumpxml: + print testXML + +# +# Prompt for username and password, then post the XML string to the test database website +# +if not args.dryrun: + username=raw_input("Username:") + os.system("stty -echo") + password=raw_input("Password:") + os.system("stty echo") + params={'username':username,'password':password,'testXML':testXML} + url="https://csegweb.cgd.ucar.edu/testdb/cgi-bin/processXMLtest.cgi" + params = urllib.urlencode(params) + f = urllib.urlopen(url, params) + # + # Print any messages from the post command + # + print f.read() + print f.code + + diff --git a/scripts/lib/CIME/test_scheduler.py b/scripts/lib/CIME/test_scheduler.py index dd97d93d301b..9d4a1f24d2f1 100644 --- a/scripts/lib/CIME/test_scheduler.py +++ b/scripts/lib/CIME/test_scheduler.py @@ -774,11 +774,16 @@ def _setup_cs_files(self): os.chmod(cs_submit_file, os.stat(cs_submit_file).st_mode | stat.S_IXUSR | stat.S_IXGRP) - if get_model == "cesm": - testreporter = os.path.join(self._test_root,"testreporter.pl") - shutil.copy(os.path.join(self._cime_root,"scripts","Testing","testreporter.pl"), - testreporter) - os.chmod(testreporter, os.stat(testreporter).st_mode | stat.S_IXUSR | stat.S_IXGRP) + if get_model() == "cesm": + template_file = os.path.join(python_libs_root, "testreporter.template") + template = open(template_file, "r").read() + template = template.replace("", + os.path.join(self._cime_root, "scripts", "Tools")) + testreporter_file = os.path.join(self._test_root, "testreporter") + with open(testreporter_file, "w") as fd: + fd.write(template) + os.chmod(testreporter_file, os.stat(testreporter_file).st_mode + | stat.S_IXUSR | stat.S_IXGRP) except Exception as e: logger.warning("FAILED to set up cs files: %s" % str(e)) From 01fd3f7311157c80df113d81113054dc4e83dce1 Mon Sep 17 00:00:00 2001 From: Chris Fischer Date: Mon, 3 Apr 2017 13:24:17 -0600 Subject: [PATCH 111/219] Update indenting for testreporter.py Test suite: Test baseline: Test namelist changes: Test status: [bit for bit, roundoff, climate changing] Fixes [CIME Github issue #] User interface changes?: Code review: --- scripts/Tools/testreporter.py | 140 +++++++++++++++++----------------- 1 file changed, 70 insertions(+), 70 deletions(-) diff --git a/scripts/Tools/testreporter.py b/scripts/Tools/testreporter.py index d7e136d172f2..74735fe56873 100755 --- a/scripts/Tools/testreporter.py +++ b/scripts/Tools/testreporter.py @@ -41,30 +41,30 @@ xml_file=glob.glob("*"+args.testid+"/env_build.xml") root = ET.parse(xml_file[0]).getroot() for child in root.iter('group'): - for entry in child.iter('entry'): - id = entry.get('id') - value = entry.get('value') - if id == "COMPILER": - compiler=value - if id == "MPILIB": - mpilib = value + for entry in child.iter('entry'): + test_id = entry.get('id') + value = entry.get('value') + if test_id == "COMPILER": + compiler=value + if test_id == "MPILIB": + mpilib = value xml_file=glob.glob("*"+args.testid+"/env_case.xml") root = ET.parse(xml_file[0]).getroot() for child in root.iter('group'): - for entry in child.iter('entry'): - id = entry.get('id') - value = entry.get('value') - if id == "MACH": + for entry in child.iter('entry'): + test_id = entry.get('id') + value = entry.get('value') + if test_id == "MACH": machine=value xml_file=glob.glob("*"+args.testid+"/env_test.xml") root = ET.parse(xml_file[0]).getroot() for child in root.iter('group'): - for entry in child.iter('entry'): - id = entry.get('id') - value = entry.get('value') - if id == "BASELINE_NAME_CMP": + for entry in child.iter('entry'): + test_id = entry.get('id') + value = entry.get('value') + if test_id == "BASELINE_NAME_CMP": baseline=value @@ -113,80 +113,80 @@ # for line in lines: if "NLCOMP" in line: - test_status[test_name,'NLCOMP']=line[0:4] + test_status[test_name,'NLCOMP']=line[0:4] if "MEMLEAK" in line: - test_status[test_name,'MEMLEAK']=line[0:4] + test_status[test_name,'MEMLEAK']=line[0:4] if "MEMCOMP" in line: - test_status[test_name,'MEMCOMP']=line[0:4] + test_status[test_name,'MEMCOMP']=line[0:4] if "BASELINE" in line: - test_status[test_name,'BASELINE']=line[0:4] + test_status[test_name,'BASELINE']=line[0:4] if "TPUTCOMP" in line: - test_status[test_name,'TPUTCOMP']=line[0:4] + test_status[test_name,'TPUTCOMP']=line[0:4] if "INIT" in line: - test_status[test_name,'INIT']=line[0:4] - if line[0:4] == "FAIL": - test_status[test_name,'STATUS']="SFAIL" - test_status[test_name,'COMMENT']+="INIT fail! " - break + test_status[test_name,'INIT']=line[0:4] + if line[0:4] == "FAIL": + test_status[test_name,'STATUS']="SFAIL" + test_status[test_name,'COMMENT']+="INIT fail! " + break if "CREATE_NEWCASE" in line: - test_status[test_name,'CREATE_NEWCASE']=line[0:4] - if line[0:4] == "FAIL": - test_status[test_name,'STATUS']="SFAIL" - test_status[test_name,'COMMENT']+="CREATE_NEWCASE fail! " - break + test_status[test_name,'CREATE_NEWCASE']=line[0:4] + if line[0:4] == "FAIL": + test_status[test_name,'STATUS']="SFAIL" + test_status[test_name,'COMMENT']+="CREATE_NEWCASE fail! " + break if "XML" in line: - test_status[test_name,'XML']=line[0:4] - if line[0:4] == "FAIL": - test_status[test_name,'STATUS']="SFAIL" - test_status[test_name,'COMMENT']+="XML fail! " - break + test_status[test_name,'XML']=line[0:4] + if line[0:4] == "FAIL": + test_status[test_name,'STATUS']="SFAIL" + test_status[test_name,'COMMENT']+="XML fail! " + break if "SETUP" in line: - test_status[test_name,'SETUP']=line[0:4] - if line[0:4] == "FAIL": - test_status[test_name,'STATUS']="SFAIL" - test_status[test_name,'COMMENT']+="SETUP fail! " - break + test_status[test_name,'SETUP']=line[0:4] + if line[0:4] == "FAIL": + test_status[test_name,'STATUS']="SFAIL" + test_status[test_name,'COMMENT']+="SETUP fail! " + break if "SHAREDLIB_BUILD" in line: - test_status[test_name,'SHAREDLIB_BUILD']=line[0:4] - if line[0:4] == "FAIL": - test_status[test_name,'STATUS']="CFAIL" - test_status[test_name,'COMMENT']+="SHAREDLIB_BUILD fail! " - break + test_status[test_name,'SHAREDLIB_BUILD']=line[0:4] + if line[0:4] == "FAIL": + test_status[test_name,'STATUS']="CFAIL" + test_status[test_name,'COMMENT']+="SHAREDLIB_BUILD fail! " + break if "MODEL_BUILD" in line: - test_status[test_name,'MODEL_BUILD']=line[0:4] - if line[0:4] == "FAIL": - test_status[test_name,'STATUS']="CFAIL" - test_status[test_name,'COMMENT']+="MODEL_BUILD fail! " - break + test_status[test_name,'MODEL_BUILD']=line[0:4] + if line[0:4] == "FAIL": + test_status[test_name,'STATUS']="CFAIL" + test_status[test_name,'COMMENT']+="MODEL_BUILD fail! " + break if "SUBMIT" in line: - test_status[test_name,'STATUS']=line[0:4] - if line[0:4] == "FAIL": - test_status[test_name,'COMMENT']+="SUBMIT fail! " - break + test_status[test_name,'STATUS']=line[0:4] + if line[0:4] == "FAIL": + test_status[test_name,'COMMENT']+="SUBMIT fail! " + break if "RUN" in line: - test_status[test_name,'STATUS']=line[0:4] - if line[0:4] == "FAIL": - test_status[test_name,'COMMENT']+="RUN fail! " - break + test_status[test_name,'STATUS']=line[0:4] + if line[0:4] == "FAIL": + test_status[test_name,'COMMENT']+="RUN fail! " + break if "COMPARE_base_rest" in line: - test_status[test_name,'STATUS']=line[0:4] - if line[0:4] == "FAIL": - test_status[test_name,'COMMENT']+="Restart fail! " - break + test_status[test_name,'STATUS']=line[0:4] + if line[0:4] == "FAIL": + test_status[test_name,'COMMENT']+="Restart fail! " + break if "COMPARE_base_hybrid" in line: - test_status[test_name,'STATUS']=line[0:4] - if line[0:4] == "FAIL": - test_status[test_name,'COMMENT']+="Hybrid fail! " - break + test_status[test_name,'STATUS']=line[0:4] + if line[0:4] == "FAIL": + test_status[test_name,'COMMENT']+="Hybrid fail! " + break # # Do not include time comments. Just a preference to have cleaner comments in the test database # try: - if 'time=' not in line: - test_status[test_name,'COMMENT']+=line.split(' ',3)[3]+' ' + if 'time=' not in line: + test_status[test_name,'COMMENT']+=line.split(' ',3)[3]+' ' except: - pass + pass # # File in the xml with the test results @@ -217,7 +217,7 @@ # Dump xml to the screen. # if args.dumpxml: - print testXML + print testXML # # Prompt for username and password, then post the XML string to the test database website From 8abef1c5714b5c9efb9648d0d78a2229c1255a3d Mon Sep 17 00:00:00 2001 From: Chris Fischer Date: Mon, 17 Apr 2017 15:25:07 -0600 Subject: [PATCH 112/219] Merge in esmci/master Test suite: Test baseline: Test namelist changes: Test status: [bit for bit, roundoff, climate changing] Fixes [CIME Github issue #] User interface changes?: Code review: --- scripts/Tools/testreporter.py | 9 +- scripts/lib/CIME/XML/test_reporter.py | 120 ++++++++++++++++++++++++++ scripts/lib/CIME/test_scheduler.py | 2 +- 3 files changed, 125 insertions(+), 6 deletions(-) create mode 100644 scripts/lib/CIME/XML/test_reporter.py diff --git a/scripts/Tools/testreporter.py b/scripts/Tools/testreporter.py index 74735fe56873..5ff6546f1875 100755 --- a/scripts/Tools/testreporter.py +++ b/scripts/Tools/testreporter.py @@ -11,7 +11,6 @@ import glob import xml.etree.ElementTree as ET from xml.dom import minidom -import HTMLParser import urllib import argparse @@ -55,8 +54,8 @@ for entry in child.iter('entry'): test_id = entry.get('id') value = entry.get('value') - if test_id == "MACH": - machine=value + if test_id == "MACH": + machine=value xml_file=glob.glob("*"+args.testid+"/env_test.xml") root = ET.parse(xml_file[0]).getroot() @@ -64,8 +63,8 @@ for entry in child.iter('entry'): test_id = entry.get('id') value = entry.get('value') - if test_id == "BASELINE_NAME_CMP": - baseline=value + if test_id == "BASELINE_NAME_CMP": + baseline=value # diff --git a/scripts/lib/CIME/XML/test_reporter.py b/scripts/lib/CIME/XML/test_reporter.py new file mode 100644 index 000000000000..edbdc6664957 --- /dev/null +++ b/scripts/lib/CIME/XML/test_reporter.py @@ -0,0 +1,120 @@ +""" +Interface to the archive.xml file. This class inherits from GenericXML.py + +""" + +from CIME.XML.standard_module_setup import * +from CIME.XML.generic_xml import GenericXML +from CIME.utils import expect,get_model + +import urllib + + + +class TestReporter(GenericXML): + + def __init__(self): + """ + initialize an object + """ + + expect(get_model() == 'cesm', "testreport is only meant to populate the CESM test database." ) + self.root = None + + GenericXML.__init__(self) + + def setup_header(self, tagname,machine,compiler,mpilib,testroot,testtype,baseline): + # + # Create the XML header that the testdb is expecting to recieve + # + tlelem = ET.Element("testrecord") + elem = ET.Element('tag_name') + elem.text = tagname + tlelem.append(elem) + elem = ET.Element('mach') + elem.text = machine + tlelem.append(elem) + elem = ET.Element('compiler',attrib={"version":""}) + elem.text = compiler + tlelem.append(elem) + elem = ET.Element('mpilib',attrib={"version":""}) + elem.text = mpilib + tlelem.append(elem) + elem = ET.Element('testroot') + elem.text = testroot + tlelem.append(elem) + elem = ET.Element('testtype') + elem.text = testtype + tlelem.append(elem) + elem = ET.Element('baselinetag') + elem.text = baseline + tlelem.append(elem) + + self.root=tlelem + + + + def dumpxml(self): + # + # Print testreport XML string to screen + # + GenericXML.write(self,outfile="TestRecord.xml") + + def add_result(self,test_name,test_status): + # + # Add a test result to the XML structure. + # + tlelem = ET.Element('tests',attrib={"testname":test_name}) + elem=ET.Element('category',attrib={"name":"casestatus"}) + tlelem.append(elem) + elem=ET.Element('category',attrib={"name":"comment"}) + elem.text= test_status['COMMENT'] + tlelem.append(elem) + + elem=ET.Element('category',attrib={"name":"compare"}) + elem.text= test_status['BASELINE'] + tlelem.append(elem) + + elem=ET.Element('category',attrib={"name":"memcomp"}) + elem.text= test_status['MEMCOMP'] + tlelem.append(elem) + + elem=ET.Element('category',attrib={"name":"memleak"}) + elem.text= test_status['MEMLEAK'] + tlelem.append(elem) + + elem=ET.Element('category',attrib={"name":"nlcomp"}) + elem.text= test_status['NLCOMP'] + tlelem.append(elem) + + elem=ET.Element('category',attrib={"name":"status"}) + elem.text= test_status['STATUS'] + tlelem.append(elem) + + elem=ET.Element('category',attrib={"name":"tputcomp"}) + elem.text= test_status['TPUTCOMP'] + tlelem.append(elem) + + self.root.append(tlelem) + + + + def push2testdb(self): + # + # Post test result XML to CESM test database + # + xmlstr = ET.tostring(self.root,method="xml",encoding="UTF-8") + username=raw_input("Username:") + os.system("stty -echo") + password=raw_input("Password:") + os.system("stty echo") + params={'username':username,'password':password,'testXML':xmlstr} + url="https://csegweb.cgd.ucar.edu/testdb/cgi-bin/processXMLtest.cgi" + params = urllib.urlencode(params) + f = urllib.urlopen(url, params) + # + # Print any messages from the post command + # + print f.read() + print f.code + diff --git a/scripts/lib/CIME/test_scheduler.py b/scripts/lib/CIME/test_scheduler.py index 9d4a1f24d2f1..deaf0f0a67c9 100644 --- a/scripts/lib/CIME/test_scheduler.py +++ b/scripts/lib/CIME/test_scheduler.py @@ -8,7 +8,7 @@ they can be run outside the context of TestScheduler. """ -import shutil, traceback, stat, threading, time, glob +import traceback, stat, threading, time, glob from CIME.XML.standard_module_setup import * import CIME.compare_namelists import CIME.utils From 55b660d915e7976983bad885fea331a1893dd7f9 Mon Sep 17 00:00:00 2001 From: Chris Fischer Date: Thu, 20 Apr 2017 14:03:16 -0600 Subject: [PATCH 113/219] Update python version of testreport to apply requested changes from the PR. Test suite: Test baseline: Test namelist changes: Test status: [bit for bit, roundoff, climate changing] Fixes [CIME Github issue #] User interface changes?: Code review: --- scripts/Tools/testreporter.py | 196 ++++++++++++++-------------------- 1 file changed, 80 insertions(+), 116 deletions(-) diff --git a/scripts/Tools/testreporter.py b/scripts/Tools/testreporter.py index 5ff6546f1875..4ca00fb8ffa5 100755 --- a/scripts/Tools/testreporter.py +++ b/scripts/Tools/testreporter.py @@ -2,17 +2,22 @@ """ -Simple script to populate CESM test database with test results. This can be -run with out any knowledge of CIME. +Simple script to populate CESM test database with test results. """ -import os +from standard_script_setup import * + +from CIME.XML.env_build import EnvBuild +from CIME.XML.env_case import EnvCase +from CIME.XML.env_test import EnvTest +from CIME.XML.test_reporter import TestReporter +from CIME.utils import expect + + import glob -import xml.etree.ElementTree as ET -from xml.dom import minidom -import urllib -import argparse + + @@ -37,48 +42,38 @@ os.chdir(args.testroot) +# +# Retrieve compiler name and mpi library +# xml_file=glob.glob("*"+args.testid+"/env_build.xml") -root = ET.parse(xml_file[0]).getroot() -for child in root.iter('group'): - for entry in child.iter('entry'): - test_id = entry.get('id') - value = entry.get('value') - if test_id == "COMPILER": - compiler=value - if test_id == "MPILIB": - mpilib = value +expect(len(xml_file) > 0, "Tests not found. It's possible your testid, %s is wrong." %args.testid ) +print len(xml_file) +envxml=(EnvBuild(".",infile=xml_file[0])) +compiler=envxml.get_value("COMPILER") +mpilib=envxml.get_value("MPILIB") +# +# Retrieve machine name +# xml_file=glob.glob("*"+args.testid+"/env_case.xml") -root = ET.parse(xml_file[0]).getroot() -for child in root.iter('group'): - for entry in child.iter('entry'): - test_id = entry.get('id') - value = entry.get('value') - if test_id == "MACH": - machine=value +envxml=(EnvCase(".",infile=xml_file[0])) +machine=envxml.get_value("MACH") +# +# Retrieve baseline tag to compare to +# xml_file=glob.glob("*"+args.testid+"/env_test.xml") -root = ET.parse(xml_file[0]).getroot() -for child in root.iter('group'): - for entry in child.iter('entry'): - test_id = entry.get('id') - value = entry.get('value') - if test_id == "BASELINE_NAME_CMP": - baseline=value - +envxml=(EnvTest(".",infile=xml_file[0])) +baseline = envxml.get_value("BASELINE_NAME_CMP") # -# Create XML +# Create XML header # -testrecord = ET.Element("testrecord") -tag_name = ET.SubElement(testrecord,'tag_name').text=args.tagname -mach = ET.SubElement(testrecord,'mach').text=machine -compiler = ET.SubElement(testrecord,'compiler',attrib={"version":""}).text=compiler -mpilib = ET.SubElement(testrecord,'mpilib',attrib={"version":""}).text=mpilib -testroot = ET.SubElement(testrecord,'testroot').text=args.testroot -testtype = ET.SubElement(testrecord,'testtype').text=args.testtype -baselinetag = ET.SubElement(testrecord,'baselinetag').text= baseline +testxml=TestReporter() +testrecord=None +testxml.setup_header(args.tagname,machine,compiler,mpilib,args.testroot,args.testtype,baseline) + # # Create lists on tests based on the testid in the testroot directory. # @@ -90,13 +85,13 @@ for test_name in test_names: if test_name == "cs.status."+args.testid: continue - test_status[test_name,'COMMENT']="" - test_status[test_name,'BASELINE']='----' - test_status[test_name,'MEMCOMP']='----' - test_status[test_name,'MEMLEAK']='----' - test_status[test_name,'NLCOMP']='----' - test_status[test_name,'STATUS']='----' - test_status[test_name,'TPUTCOMP']='----' + test_status['COMMENT']="" + test_status['BASELINE']='----' + test_status['MEMCOMP']='----' + test_status['MEMLEAK']='----' + test_status['NLCOMP']='----' + test_status['STATUS']='----' + test_status['TPUTCOMP']='----' # # Check to see if TestStatus is present, if not then continue # I might want to set the status to fail @@ -104,78 +99,78 @@ try: lines = [line.rstrip('\n') for line in open(test_name+"/TestStatus")] except: - test_status[test_name,'STATUS']="FAIL" - test_status[test_name,'COMMENT']="TestStatus missing. " + test_status['STATUS']="FAIL" + test_status['COMMENT']="TestStatus missing. " continue # # Loop over each line of TestStatus, and check for different types of failures. # for line in lines: if "NLCOMP" in line: - test_status[test_name,'NLCOMP']=line[0:4] + test_status['NLCOMP']=line[0:4] if "MEMLEAK" in line: - test_status[test_name,'MEMLEAK']=line[0:4] + test_status['MEMLEAK']=line[0:4] if "MEMCOMP" in line: - test_status[test_name,'MEMCOMP']=line[0:4] + test_status['MEMCOMP']=line[0:4] if "BASELINE" in line: - test_status[test_name,'BASELINE']=line[0:4] + test_status['BASELINE']=line[0:4] if "TPUTCOMP" in line: - test_status[test_name,'TPUTCOMP']=line[0:4] + test_status['TPUTCOMP']=line[0:4] if "INIT" in line: - test_status[test_name,'INIT']=line[0:4] + test_status['INIT']=line[0:4] if line[0:4] == "FAIL": - test_status[test_name,'STATUS']="SFAIL" - test_status[test_name,'COMMENT']+="INIT fail! " + test_status['STATUS']="SFAIL" + test_status['COMMENT']+="INIT fail! " break if "CREATE_NEWCASE" in line: - test_status[test_name,'CREATE_NEWCASE']=line[0:4] + test_status['CREATE_NEWCASE']=line[0:4] if line[0:4] == "FAIL": - test_status[test_name,'STATUS']="SFAIL" - test_status[test_name,'COMMENT']+="CREATE_NEWCASE fail! " + test_status['STATUS']="SFAIL" + test_status['COMMENT']+="CREATE_NEWCASE fail! " break if "XML" in line: - test_status[test_name,'XML']=line[0:4] + test_status['XML']=line[0:4] if line[0:4] == "FAIL": - test_status[test_name,'STATUS']="SFAIL" - test_status[test_name,'COMMENT']+="XML fail! " + test_status['STATUS']="SFAIL" + test_status['COMMENT']+="XML fail! " break if "SETUP" in line: - test_status[test_name,'SETUP']=line[0:4] + test_status['SETUP']=line[0:4] if line[0:4] == "FAIL": - test_status[test_name,'STATUS']="SFAIL" - test_status[test_name,'COMMENT']+="SETUP fail! " + test_status['STATUS']="SFAIL" + test_status['COMMENT']+="SETUP fail! " break if "SHAREDLIB_BUILD" in line: - test_status[test_name,'SHAREDLIB_BUILD']=line[0:4] + test_status['SHAREDLIB_BUILD']=line[0:4] if line[0:4] == "FAIL": - test_status[test_name,'STATUS']="CFAIL" - test_status[test_name,'COMMENT']+="SHAREDLIB_BUILD fail! " + test_status['STATUS']="CFAIL" + test_status['COMMENT']+="SHAREDLIB_BUILD fail! " break if "MODEL_BUILD" in line: - test_status[test_name,'MODEL_BUILD']=line[0:4] + test_status['MODEL_BUILD']=line[0:4] if line[0:4] == "FAIL": - test_status[test_name,'STATUS']="CFAIL" - test_status[test_name,'COMMENT']+="MODEL_BUILD fail! " + test_status['STATUS']="CFAIL" + test_status['COMMENT']+="MODEL_BUILD fail! " break if "SUBMIT" in line: - test_status[test_name,'STATUS']=line[0:4] + test_status['STATUS']=line[0:4] if line[0:4] == "FAIL": - test_status[test_name,'COMMENT']+="SUBMIT fail! " + test_status['COMMENT']+="SUBMIT fail! " break if "RUN" in line: - test_status[test_name,'STATUS']=line[0:4] + test_status['STATUS']=line[0:4] if line[0:4] == "FAIL": - test_status[test_name,'COMMENT']+="RUN fail! " + test_status['COMMENT']+="RUN fail! " break if "COMPARE_base_rest" in line: - test_status[test_name,'STATUS']=line[0:4] + test_status['STATUS']=line[0:4] if line[0:4] == "FAIL": - test_status[test_name,'COMMENT']+="Restart fail! " + test_status['COMMENT']+="Restart fail! " break if "COMPARE_base_hybrid" in line: - test_status[test_name,'STATUS']=line[0:4] + test_status['STATUS']=line[0:4] if line[0:4] == "FAIL": - test_status[test_name,'COMMENT']+="Hybrid fail! " + test_status['COMMENT']+="Hybrid fail! " break # @@ -183,57 +178,26 @@ # try: if 'time=' not in line: - test_status[test_name,'COMMENT']+=line.split(' ',3)[3]+' ' + test_status['COMMENT']+=line.split(' ',3)[3]+' ' except: pass # - # File in the xml with the test results + # Fill in the xml with the test results # - tests = ET.Element('tests',attrib={"testname":test_name}) - testrecord.append(tests) - category=ET.SubElement(tests,'category',attrib={"name":"casestatus"}) - category=ET.SubElement(tests,'category',attrib={"name":"comment"}).text= test_status[test_name,'COMMENT'] - category=ET.SubElement(tests,'category',attrib={"name":"compare"}).text= test_status[test_name,'BASELINE'] - category=ET.SubElement(tests,'category',attrib={"name":"memcomp"}).text= test_status[test_name,'MEMCOMP'] - category=ET.SubElement(tests,'category',attrib={"name":"memleak"}).text=test_status[test_name,'MEMLEAK'] - category=ET.SubElement(tests,'category',attrib={"name":"nlcomp"}).text= test_status[test_name,'NLCOMP'] - category=ET.SubElement(tests,'category',attrib={"name":"status"}).text= test_status[test_name,'STATUS'] - category=ET.SubElement(tests,'category',attrib={"name":"tputcomp"}).text= test_status[test_name,'TPUTCOMP'] + testxml.add_result(test_name,test_status) -# -# Convert XML to a string -# -xmlstr = ET.tostring(testrecord,method="xml",encoding="UTF-8") - -# -# Make the XML string human readable and print it out -# -xml=minidom.parseString(xmlstr) -testXML = xml.toprettyxml(encoding="UTF-8") # # Dump xml to the screen. # if args.dumpxml: - print testXML + testxml.dumpxml() # # Prompt for username and password, then post the XML string to the test database website # if not args.dryrun: - username=raw_input("Username:") - os.system("stty -echo") - password=raw_input("Password:") - os.system("stty echo") - params={'username':username,'password':password,'testXML':testXML} - url="https://csegweb.cgd.ucar.edu/testdb/cgi-bin/processXMLtest.cgi" - params = urllib.urlencode(params) - f = urllib.urlopen(url, params) - # - # Print any messages from the post command - # - print f.read() - print f.code + testxml.push2testdb() From 08f53ec59309b47d850febbba04715f9ed317523 Mon Sep 17 00:00:00 2001 From: Chris Fischer Date: Fri, 21 Apr 2017 11:03:24 -0600 Subject: [PATCH 114/219] Add testreporter.template. Fix comment in test_reporter.py Test suite: Test baseline: Test namelist changes: Test status: [bit for bit, roundoff, climate changing] Fixes [CIME Github issue #] User interface changes?: Code review: --- scripts/lib/CIME/XML/test_reporter.py | 2 +- scripts/lib/testreporter.template | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 scripts/lib/testreporter.template diff --git a/scripts/lib/CIME/XML/test_reporter.py b/scripts/lib/CIME/XML/test_reporter.py index edbdc6664957..5b950557b9e5 100644 --- a/scripts/lib/CIME/XML/test_reporter.py +++ b/scripts/lib/CIME/XML/test_reporter.py @@ -1,5 +1,5 @@ """ -Interface to the archive.xml file. This class inherits from GenericXML.py +Interface to the testreporter xml. This class inherits from GenericXML.py """ diff --git a/scripts/lib/testreporter.template b/scripts/lib/testreporter.template new file mode 100644 index 000000000000..c8bfa79405a2 --- /dev/null +++ b/scripts/lib/testreporter.template @@ -0,0 +1,3 @@ +#! /bin/bash + +/testreporter.py "$@" From 1e851c266e77d03c2b3a32e3856be6777877fec6 Mon Sep 17 00:00:00 2001 From: Erich L Foster Date: Fri, 21 Apr 2017 11:55:50 -0600 Subject: [PATCH 115/219] Added '+=' seperator to string literal parser When checking for string literals I forgot to add '+=' to separators. Additionally, it was necessary to keep track of added values when parsing. --- scripts/lib/CIME/namelist.py | 100 ++++++++++++++++++++++++++--------- 1 file changed, 75 insertions(+), 25 deletions(-) diff --git a/scripts/lib/CIME/namelist.py b/scripts/lib/CIME/namelist.py index 2f4366a39f89..52a25b44fc8f 100644 --- a/scripts/lib/CIME/namelist.py +++ b/scripts/lib/CIME/namelist.py @@ -1666,6 +1666,29 @@ def _look_ahead_for_equals(self, pos): break return False + def _look_ahead_for_plusequals(self, pos): + r"""Look ahead to see if the next whitespace character is '='. + + The `pos` argument is the position in the text to start from while + looking. This function returns a boolean. + + >>> _NamelistParser('+=')._look_ahead_for_plusequals(0) + True + >>> _NamelistParser('a \n+=')._look_ahead_for_plusequals(1) + True + >>> _NamelistParser('')._look_ahead_for_plusequals(0) + False + >>> _NamelistParser('a+=')._look_ahead_for_plusequals(0) + False + """ + for test_pos in range(pos, self._len): + if self._text[test_pos] not in (' ', '\n'): + if self._text[test_pos] == '+': + return self._look_ahead_for_equals(test_pos + 1) + else: + break + return False + def _parse_literal(self, allow_name=False, allow_eof_end=False): r"""Parse and return a variable value at the current position. @@ -1741,16 +1764,28 @@ def _parse_literal(self, allow_name=False, allow_eof_end=False): Traceback (most recent call last): ... _NamelistParseError: Error in parsing namelist: expected literal value, but got 'foo=' + >>> _NamelistParser('foo+= ')._parse_literal() + Traceback (most recent call last): + ... + _NamelistParseError: Error in parsing namelist: expected literal value, but got 'foo+=' >>> _NamelistParser('5,')._parse_literal(allow_name=True) u'5' >>> x = _NamelistParser('foo= ') >>> x._parse_literal(allow_name=True) >>> x._curr() u'f' + >>> x = _NamelistParser('foo+= ') + >>> x._parse_literal(allow_name=True) + >>> x._curr() + u'f' >>> _NamelistParser('6*foo= ')._parse_literal(allow_name=True) Traceback (most recent call last): ... _NamelistParseError: Error in parsing namelist: expected literal value, but got '6*foo=' + >>> _NamelistParser('6*foo+= ')._parse_literal(allow_name=True) + Traceback (most recent call last): + ... + _NamelistParseError: Error in parsing namelist: expected literal value, but got '6*foo+=' >>> x = _NamelistParser('foo = ') >>> x._parse_literal(allow_name=True) >>> x._curr() @@ -1789,6 +1824,7 @@ def _parse_literal(self, allow_name=False, allow_eof_end=False): separators = [' ', '\n', ',', '/'] if allow_name: separators.append('=') + separators.append('+=') while new_pos != self._len and self._text[new_pos] not in separators: # allow commas if they are inside () if self._text[new_pos] == '(': @@ -1801,9 +1837,12 @@ def _parse_literal(self, allow_name=False, allow_eof_end=False): # At the end of the file, give up by throwing an EOF. self._advance(self._len) # If `allow_name` is set, we need to check and see if the next non-blank - # character is '=', and return `None` if so. + # character is '=' or the next two are '+=', and return `None` if so. if allow_name and self._look_ahead_for_equals(new_pos): return + elif allow_name and self._look_ahead_for_plusequals(new_pos): + return + self._advance(new_pos - self._pos, check_eof=allow_eof_end) text = self._text[old_pos:self._pos] if not any(is_valid_fortran_namelist_literal(type_, text) @@ -1926,60 +1965,62 @@ def _parse_name_and_values(self, allow_eof_end=False): alternate file format in "groupless" mode.) >>> _NamelistParser("foo='bar' /")._parse_name_and_values() - (u'foo', [u"'bar'"]) + (u'foo', [u"'bar'"], False) >>> _NamelistParser("foo(3)='bar' /")._parse_name_and_values() - (u'foo(3)', [u"'bar'"]) + (u'foo(3)', [u"'bar'"], False) >>> _NamelistParser("foo ='bar' /")._parse_name_and_values() - (u'foo', [u"'bar'"]) + (u'foo', [u"'bar'"], False) >>> _NamelistParser("foo=\n'bar' /")._parse_name_and_values() - (u'foo', [u"'bar'"]) + (u'foo', [u"'bar'"], False) >>> _NamelistParser("foo 'bar' /")._parse_name_and_values() Traceback (most recent call last): ... _NamelistParseError: Error in parsing namelist: expected '=' but found "'" >>> _NamelistParser("foo='bar','bazz' /")._parse_name_and_values() - (u'foo', [u"'bar'", u"'bazz'"]) + (u'foo', [u"'bar'", u"'bazz'"], False) >>> _NamelistParser("foo=,,'bazz',6*/")._parse_name_and_values() - (u'foo', [u'', u'', u"'bazz'", u'6*']) + (u'foo', [u'', u'', u"'bazz'", u'6*'], False) >>> _NamelistParser("foo='bar' 'bazz' foo2='ban'")._parse_name_and_values() - (u'foo', [u"'bar'", u"'bazz'"]) + (u'foo', [u"'bar'", u"'bazz'"], False) >>> _NamelistParser("foo='bar' 'bazz' foo2(2)='ban'")._parse_name_and_values() - (u'foo', [u"'bar'", u"'bazz'"]) + (u'foo', [u"'bar'", u"'bazz'"], False) >>> _NamelistParser("foo= foo2='ban' ")._parse_name_and_values() Traceback (most recent call last): ... _NamelistParseError: Error in parsing namelist: expected literal value, but got "foo2='ban'" >>> _NamelistParser("foo=,,'bazz',6* ")._parse_name_and_values(allow_eof_end=True) - (u'foo', [u'', u'', u"'bazz'", u'6*']) + (u'foo', [u'', u'', u"'bazz'", u'6*'], False) >>> _NamelistParser("foo(3)='bazz'")._parse_name_and_values(allow_eof_end=True) - (u'foo(3)', [u"'bazz'"]) + (u'foo(3)', [u"'bazz'"], False) >>> _NamelistParser("foo=")._parse_name_and_values() Traceback (most recent call last): ... _NamelistEOF: Unexpected end of file encountered in namelist. >>> _NamelistParser("foo=")._parse_name_and_values(allow_eof_end=True) - (u'foo', [u'']) + (u'foo', [u''], False) >>> _NamelistParser("foo= ")._parse_name_and_values(allow_eof_end=True) - (u'foo', [u'']) + (u'foo', [u''], False) >>> _NamelistParser("foo=2")._parse_name_and_values(allow_eof_end=True) - (u'foo', [u'2']) + (u'foo', [u'2'], False) >>> _NamelistParser("foo=1,2")._parse_name_and_values(allow_eof_end=True) - (u'foo', [u'1', u'2']) + (u'foo', [u'1', u'2'], False) >>> _NamelistParser("foo(1:2)=1,2,3 ")._parse_name_and_values(allow_eof_end=True) Traceback (most recent call last): ... SystemExit: ERROR: Too many values for array foo(1:2) >>> _NamelistParser("foo=1,")._parse_name_and_values(allow_eof_end=True) - (u'foo', [u'1', u'']) + (u'foo', [u'1', u''], False) >>> _NamelistParser("foo+=1")._parse_name_and_values(allow_eof_end=True) - (u'foo', [u'1']) + (u'foo', [u'1'], True) """ name = self._parse_variable_name() + addto = False # This keeps track of whether += existed self._eat_whitespace() # check to see if we have a "+=" if self._expect_char("+", RETURN=True): self._advance() + addto=True # tell parser that we want to add to dictionary values self._expect_char("=") try: self._advance() @@ -1987,7 +2028,7 @@ def _parse_name_and_values(self, allow_eof_end=False): except _NamelistEOF: # If we hit the end of file, return a name assigned to a null value. if allow_eof_end: - return name, [u''] + return name, [u''], addto else: raise # Expect at least one literal, even if it's a null value. @@ -2006,7 +2047,7 @@ def _parse_name_and_values(self, allow_eof_end=False): arraylen =max(0,1 + ((maxindex - minindex)/step)) expect(len(values) <= arraylen, "Too many values for array %s"%(name)) - return name, values + return name, values, addto def _parse_namelist_group(self): r"""Parse an entire namelist group, adding info to `self._settings`. @@ -2059,12 +2100,13 @@ def _parse_namelist_group(self): self._settings[group_name] = {} self._eat_whitespace() while self._curr() != '/': - name, values = self._parse_name_and_values() + name, values, addto = self._parse_name_and_values() dsettings = [] if self._groupless: - if name in self._settings: - dsettings = self._settings[name] - values = merge_literal_lists(dsettings, values) + if name in self._settings and not addto: + values = merge_literal_lists(self._settings[name], values) + elif name in self._settings and addto: + values = self._settings[name] + values self._settings[name] = values else: group = self._settings[group_name] @@ -2107,6 +2149,12 @@ def parse_namelist(self): OrderedDict([(u'foo(2)', [u"'bar'"])]) >>> _NamelistParser("foo(2)='bar', foo(3)='bazz'", groupless=True).parse_namelist() OrderedDict([(u'foo(2)', [u"'bar'"]), (u'foo(3)', [u"'bazz'"])]) + >>> _NamelistParser("foo='bar', foo='bazz'", groupless=True).parse_namelist() + OrderedDict([(u'foo', [u"'bazz'"])]) + >>> _NamelistParser("foo='bar'\n foo+='bazz'", groupless=True).parse_namelist() + OrderedDict([(u'foo', [u"'bar'", u"'bazz'"])]) + >>> _NamelistParser("foo='bar', 'bazz'\n foo+='ban'", groupless=True).parse_namelist() + OrderedDict([(u'foo', [u"'bar'", u"'bazz'", u"'ban'"])]) """ # Return empty dictionary for empty files. if self._len == 0: @@ -2120,9 +2168,11 @@ def parse_namelist(self): # Handle case with no namelist groups. if self._groupless and self._curr() != '&': while self._pos < self._len: - name, values = self._parse_name_and_values(allow_eof_end=True) - if name in self._settings: + name, values, addto = self._parse_name_and_values(allow_eof_end=True) + if name in self._settings and not addto: values = merge_literal_lists(self._settings[name], values) + elif name in self._settings and addto: + values = self._settings[name] + values self._settings[name] = values return self._settings # Loop over namelist groups in the file. From 34db2f2da5d41b6c0997333c1f5a0c9b47a879ef Mon Sep 17 00:00:00 2001 From: Chris Fischer Date: Fri, 21 Apr 2017 15:48:00 -0600 Subject: [PATCH 116/219] More changes for testreport from code review. Test suite: Test baseline: Test namelist changes: Test status: [bit for bit, roundoff, climate changing] Fixes [CIME Github issue #] User interface changes?: Code review: --- scripts/Tools/testreporter.py | 347 ++++++++++++++------------ scripts/lib/CIME/XML/test_reporter.py | 7 - 2 files changed, 186 insertions(+), 168 deletions(-) diff --git a/scripts/Tools/testreporter.py b/scripts/Tools/testreporter.py index 4ca00fb8ffa5..4ea6666dc244 100755 --- a/scripts/Tools/testreporter.py +++ b/scripts/Tools/testreporter.py @@ -13,191 +13,216 @@ from CIME.XML.env_test import EnvTest from CIME.XML.test_reporter import TestReporter from CIME.utils import expect +from CIME.XML.generic_xml import GenericXML import glob +############################################################################### +def parse_command_line(args): +############################################################################### + parser = argparse.ArgumentParser() + CIME.utils.setup_standard_logging_options(parser) -# Parse command line options + # Parse command line options -parser = argparse.ArgumentParser(description='Arguements for testreporter') -parser.add_argument("--tagname", +# parser = argparse.ArgumentParser(description='Arguements for testreporter') + parser.add_argument("--tagname", help="Name of the tag being tested.") -parser.add_argument("--testid", - help="Test id, ie c2_0_a6g,c2_0_b6g.") -parser.add_argument("--testroot", + parser.add_argument("--testid", + help="Test id, ie c2_0_a6g_ing,c2_0_b6g_gnu.") + parser.add_argument("--testroot", help="Root directory for tests to populate the database.") -parser.add_argument("--testtype", + parser.add_argument("--testtype", help="Type of test, prealpha or prebeta.") -parser.add_argument("--dryrun",action="store_true", + parser.add_argument("--dryrun",action="store_true", help="Do a dry run, database will not be populated.") -parser.add_argument("--dumpxml",action="store_true", + parser.add_argument("--dumpxml",action="store_true", help="Dump XML test results to sceen.") -args = parser.parse_args() - -# Fill in values needed from xml files (COMPILER, MPILIB, MACH, BASELINE_NAME_CMP. - -os.chdir(args.testroot) - -# -# Retrieve compiler name and mpi library -# -xml_file=glob.glob("*"+args.testid+"/env_build.xml") -expect(len(xml_file) > 0, "Tests not found. It's possible your testid, %s is wrong." %args.testid ) -print len(xml_file) -envxml=(EnvBuild(".",infile=xml_file[0])) -compiler=envxml.get_value("COMPILER") -mpilib=envxml.get_value("MPILIB") - -# -# Retrieve machine name -# -xml_file=glob.glob("*"+args.testid+"/env_case.xml") -envxml=(EnvCase(".",infile=xml_file[0])) -machine=envxml.get_value("MACH") - -# -# Retrieve baseline tag to compare to -# -xml_file=glob.glob("*"+args.testid+"/env_test.xml") -envxml=(EnvTest(".",infile=xml_file[0])) -baseline = envxml.get_value("BASELINE_NAME_CMP") - -# -# Create XML header -# - -testxml=TestReporter() -testrecord=None -testxml.setup_header(args.tagname,machine,compiler,mpilib,args.testroot,args.testtype,baseline) - -# -# Create lists on tests based on the testid in the testroot directory. -# -test_names=glob.glob("*"+args.testid) -# -# Loop over all tests and parse the test results -# -test_status={} -for test_name in test_names: - if test_name == "cs.status."+args.testid: - continue - test_status['COMMENT']="" - test_status['BASELINE']='----' - test_status['MEMCOMP']='----' - test_status['MEMLEAK']='----' - test_status['NLCOMP']='----' - test_status['STATUS']='----' - test_status['TPUTCOMP']='----' - # - # Check to see if TestStatus is present, if not then continue - # I might want to set the status to fail - # - try: - lines = [line.rstrip('\n') for line in open(test_name+"/TestStatus")] - except: - test_status['STATUS']="FAIL" - test_status['COMMENT']="TestStatus missing. " - continue - # - # Loop over each line of TestStatus, and check for different types of failures. - # - for line in lines: - if "NLCOMP" in line: - test_status['NLCOMP']=line[0:4] - if "MEMLEAK" in line: - test_status['MEMLEAK']=line[0:4] - if "MEMCOMP" in line: - test_status['MEMCOMP']=line[0:4] - if "BASELINE" in line: - test_status['BASELINE']=line[0:4] - if "TPUTCOMP" in line: - test_status['TPUTCOMP']=line[0:4] - if "INIT" in line: - test_status['INIT']=line[0:4] - if line[0:4] == "FAIL": - test_status['STATUS']="SFAIL" - test_status['COMMENT']+="INIT fail! " - break - if "CREATE_NEWCASE" in line: - test_status['CREATE_NEWCASE']=line[0:4] - if line[0:4] == "FAIL": - test_status['STATUS']="SFAIL" - test_status['COMMENT']+="CREATE_NEWCASE fail! " - break - if "XML" in line: - test_status['XML']=line[0:4] - if line[0:4] == "FAIL": - test_status['STATUS']="SFAIL" - test_status['COMMENT']+="XML fail! " - break - if "SETUP" in line: - test_status['SETUP']=line[0:4] - if line[0:4] == "FAIL": - test_status['STATUS']="SFAIL" - test_status['COMMENT']+="SETUP fail! " - break - if "SHAREDLIB_BUILD" in line: - test_status['SHAREDLIB_BUILD']=line[0:4] - if line[0:4] == "FAIL": - test_status['STATUS']="CFAIL" - test_status['COMMENT']+="SHAREDLIB_BUILD fail! " - break - if "MODEL_BUILD" in line: - test_status['MODEL_BUILD']=line[0:4] - if line[0:4] == "FAIL": - test_status['STATUS']="CFAIL" - test_status['COMMENT']+="MODEL_BUILD fail! " - break - if "SUBMIT" in line: - test_status['STATUS']=line[0:4] - if line[0:4] == "FAIL": - test_status['COMMENT']+="SUBMIT fail! " - break - if "RUN" in line: - test_status['STATUS']=line[0:4] - if line[0:4] == "FAIL": - test_status['COMMENT']+="RUN fail! " - break - if "COMPARE_base_rest" in line: - test_status['STATUS']=line[0:4] - if line[0:4] == "FAIL": - test_status['COMMENT']+="Restart fail! " - break - if "COMPARE_base_hybrid" in line: - test_status['STATUS']=line[0:4] - if line[0:4] == "FAIL": - test_status['COMMENT']+="Hybrid fail! " - break + args = parser.parse_args() + CIME.utils.handle_standard_logging_options(args) + + return args.testroot, args.testid, args.tagname, args.testtype, args.dryrun, args.dumpxml + +############################################################################### +def get_testreporter_xml(testroot, testid, tagname, testtype): +############################################################################### + os.chdir(testroot) + + # + # Retrieve compiler name and mpi library + # + xml_file=glob.glob("*"+testid+"/env_build.xml") + expect(len(xml_file) > 0, "Tests not found. It's possible your testid, %s is wrong." %testid ) + envxml=(EnvBuild(".",infile=xml_file[0])) + compiler=envxml.get_value("COMPILER") + mpilib=envxml.get_value("MPILIB") + + # + # Retrieve machine name + # + xml_file=glob.glob("*"+testid+"/env_case.xml") + envxml=(EnvCase(".",infile=xml_file[0])) + machine=envxml.get_value("MACH") + + # + # Retrieve baseline tag to compare to + # + xml_file=glob.glob("*"+testid+"/env_test.xml") + envxml=(EnvTest(".",infile=xml_file[0])) + baseline = envxml.get_value("BASELINE_NAME_CMP") + + # + # Create XML header + # + + testxml=TestReporter() + testxml.setup_header(tagname,machine,compiler,mpilib,testroot,testtype,baseline) + + # + # Create lists on tests based on the testid in the testroot directory. + # + test_names=glob.glob("*"+testid) + # + # Loop over all tests and parse the test results + # + test_status={} + for test_name in test_names: + if not os.path.isfile(test_name+"/TestStatus"): + continue + test_status['COMMENT']="" + test_status['BASELINE']='----' + test_status['MEMCOMP']='----' + test_status['MEMLEAK']='----' + test_status['NLCOMP']='----' + test_status['STATUS']='----' + test_status['TPUTCOMP']='----' # - # Do not include time comments. Just a preference to have cleaner comments in the test database + # Check to see if TestStatus is present, if not then continue + # I might want to set the status to fail # try: - if 'time=' not in line: - test_status['COMMENT']+=line.split(' ',3)[3]+' ' + lines = [line.rstrip('\n') for line in open(test_name+"/TestStatus")] except: - pass + test_status['STATUS']="FAIL" + test_status['COMMENT']="TestStatus missing. " + continue + # + # Loop over each line of TestStatus, and check for different types of failures. + # + for line in lines: + if "NLCOMP" in line: + test_status['NLCOMP']=line[0:4] + if "MEMLEAK" in line: + test_status['MEMLEAK']=line[0:4] + if "MEMCOMP" in line: + test_status['MEMCOMP']=line[0:4] + if "BASELINE" in line: + test_status['BASELINE']=line[0:4] + if "TPUTCOMP" in line: + test_status['TPUTCOMP']=line[0:4] + if "INIT" in line: + test_status['INIT']=line[0:4] + if line[0:4] == "FAIL": + test_status['STATUS']="SFAIL" + test_status['COMMENT']+="INIT fail! " + break + if "CREATE_NEWCASE" in line: + test_status['CREATE_NEWCASE']=line[0:4] + if line[0:4] == "FAIL": + test_status['STATUS']="SFAIL" + test_status['COMMENT']+="CREATE_NEWCASE fail! " + break + if "XML" in line: + test_status['XML']=line[0:4] + if line[0:4] == "FAIL": + test_status['STATUS']="SFAIL" + test_status['COMMENT']+="XML fail! " + break + if "SETUP" in line: + test_status['SETUP']=line[0:4] + if line[0:4] == "FAIL": + test_status['STATUS']="SFAIL" + test_status['COMMENT']+="SETUP fail! " + break + if "SHAREDLIB_BUILD" in line: + test_status['SHAREDLIB_BUILD']=line[0:4] + if line[0:4] == "FAIL": + test_status['STATUS']="CFAIL" + test_status['COMMENT']+="SHAREDLIB_BUILD fail! " + break + if "MODEL_BUILD" in line: + test_status['MODEL_BUILD']=line[0:4] + if line[0:4] == "FAIL": + test_status['STATUS']="CFAIL" + test_status['COMMENT']+="MODEL_BUILD fail! " + break + if "SUBMIT" in line: + test_status['STATUS']=line[0:4] + if line[0:4] == "FAIL": + test_status['COMMENT']+="SUBMIT fail! " + break + if "RUN" in line: + test_status['STATUS']=line[0:4] + if line[0:4] == "FAIL": + test_status['COMMENT']+="RUN fail! " + break + if "COMPARE_base_rest" in line: + test_status['STATUS']=line[0:4] + if line[0:4] == "FAIL": + test_status['COMMENT']+="Restart fail! " + break + if "COMPARE_base_hybrid" in line: + test_status['STATUS']=line[0:4] + if line[0:4] == "FAIL": + test_status['COMMENT']+="Hybrid fail! " + break + + # + # Do not include time comments. Just a preference to have cleaner comments in the test database + # + try: + if 'time=' not in line: + test_status['COMMENT']+=line.split(' ',3)[3]+' ' + except: + pass + + # + # Fill in the xml with the test results + # + testxml.add_result(test_name,test_status) + + return testxml + + +############################################################################## +def _main_func(): +############################################################################### + + testroot, testid, tagname, testtype, dryrun, dumpxml = parse_command_line(sys.argv) + + testxml = get_testreporter_xml(testroot, testid, tagname, testtype) # - # Fill in the xml with the test results + # Dump xml to a file. # - testxml.add_result(test_name,test_status) + if dumpxml: + GenericXML.write(testxml,outfile="TestRecord.xml") + + # + # Prompt for username and password, then post the XML string to the test database website + # + if not dryrun: + testxml.push2testdb() -# -# Dump xml to the screen. -# -if args.dumpxml: - testxml.dumpxml() +############################################################################### -# -# Prompt for username and password, then post the XML string to the test database website -# -if not args.dryrun: - testxml.push2testdb() +if __name__ == "__main__": + _main_func() diff --git a/scripts/lib/CIME/XML/test_reporter.py b/scripts/lib/CIME/XML/test_reporter.py index 5b950557b9e5..261a13a9d14f 100644 --- a/scripts/lib/CIME/XML/test_reporter.py +++ b/scripts/lib/CIME/XML/test_reporter.py @@ -52,13 +52,6 @@ def setup_header(self, tagname,machine,compiler,mpilib,testroot,testtype,baselin self.root=tlelem - - - def dumpxml(self): - # - # Print testreport XML string to screen - # - GenericXML.write(self,outfile="TestRecord.xml") def add_result(self,test_name,test_status): # From 5472351309915ae710092943b49a5cfeca34b169 Mon Sep 17 00:00:00 2001 From: Francis Vitt Date: Fri, 21 Apr 2017 16:39:55 -0600 Subject: [PATCH 117/219] Use ESMF7 on yellowstone and cheyenne modified: config/cesm/machines/config_compilers.xml modified: config/cesm/machines/config_machines.xml --- config/cesm/machines/config_compilers.xml | 2 ++ config/cesm/machines/config_machines.xml | 25 +++++++++++++++++++---- 2 files changed, 23 insertions(+), 4 deletions(-) diff --git a/config/cesm/machines/config_compilers.xml b/config/cesm/machines/config_compilers.xml index 3a5a0d0471b8..02d5eda32278 100644 --- a/config/cesm/machines/config_compilers.xml +++ b/config/cesm/machines/config_compilers.xml @@ -995,6 +995,8 @@ using a fortran linker. $ENV{CESMDATAROOT}/tools/pFUnit/pFUnit3.2.8_cheyenne_Intel17.0.1_MPI_openMP FALSE + /glade/u/home/fvitt/esmf_7_0_0_intel17/esmf/lib/libO/Linux.intel.64.mpi.default + /glade/u/home/fvitt/esmf_7_0_0_intel17/esmf/lib/libg/Linux.intel.64.mpi.default diff --git a/config/cesm/machines/config_machines.xml b/config/cesm/machines/config_machines.xml index 6b25f04610ef..6350eaf8da4b 100644 --- a/config/cesm/machines/config_machines.xml +++ b/config/cesm/machines/config_machines.xml @@ -251,6 +251,23 @@ intel/17.0.1 mkl + gnu/6.3.0 @@ -1694,16 +1711,16 @@ esmf - esmf-6.3.0rp1-defio-mpi-g + esmf-7.0.0-ncdfio-mpi-g - esmf-6.3.0rp1-defio-mpi-O + esmf-7.0.0-defio-mpi-O - esmf-6.3.0rp1-ncdfio-uni-g + esmf-7.0.0-ncdfio-uni-g - esmf-6.3.0rp1-ncdfio-uni-O + esmf-7.0.0-ncdfio-uni-O pgi/15.10 From 2d0fbe53f1fb2fa02072dc022c0e8ab32fd8367d Mon Sep 17 00:00:00 2001 From: James Foucar Date: Sun, 23 Apr 2017 16:00:28 -0600 Subject: [PATCH 118/219] Add input-dir to create_newcase --- scripts/create_newcase | 13 ++++++++++--- scripts/lib/CIME/case.py | 6 +++++- scripts/lib/CIME/test_scheduler.py | 5 ++--- 3 files changed, 17 insertions(+), 7 deletions(-) diff --git a/scripts/create_newcase b/scripts/create_newcase index a28f8c8d1292..1e094c3a7a22 100755 --- a/scripts/create_newcase +++ b/scripts/create_newcase @@ -118,6 +118,9 @@ OR help="Pre-answer what to do pre-existing bld/exe dirs. Valid answers are (a)bort (r)eplace or (u)se existing." "This can be useful if you need to run create_newcase non-iteractively.") + parser.add_argument("-i", "--input-dir", + help="Use a non-default location for input files") + args = parser.parse_args() CIME.utils.handle_standard_logging_options(args) @@ -150,11 +153,14 @@ OR expect(CIME.utils.check_name(args.case, fullpath=True), "Illegal case name argument provided") + if args.input_dir is not None: + args.input_dir = os.path.abspath(args.input_dir) + return args.case, args.compset, args.res, args.machine, args.compiler,\ args.mpilib, args.project, args.pecount, \ args.user_mods_dir, args.user_compset, args.pesfile, \ args.user_grid, args.gridfile, args.srcroot, args.test, args.ninst, \ - args.walltime, args.queue, args.output_root, run_unsupported, args.answer + args.walltime, args.queue, args.output_root, run_unsupported, args.answer, args.input_dir ############################################################################### def _main_func(description): @@ -165,7 +171,7 @@ def _main_func(description): mpilib, project, pecount, \ user_mods_dir, user_compset, pesfile, \ user_grid, gridfile, srcroot, test, ninst, walltime, queue, \ - output_root, run_unsupported, answer = parse_command_line(sys.argv, cimeroot, description) + output_root, run_unsupported, answer, input_dir = parse_command_line(sys.argv, cimeroot, description) caseroot = os.path.abspath(caseroot) @@ -186,7 +192,8 @@ def _main_func(description): user_compset=user_compset, pesfile=pesfile, user_grid=user_grid, gridfile=gridfile, ninst=ninst, test=test, walltime=walltime, queue=queue, output_root=output_root, - run_unsupported=run_unsupported, answer=answer) + run_unsupported=run_unsupported, answer=answer, + input_dir=input_dir) case.create_caseroot() diff --git a/scripts/lib/CIME/case.py b/scripts/lib/CIME/case.py index a427ef4fb9b3..7a5e53466e13 100644 --- a/scripts/lib/CIME/case.py +++ b/scripts/lib/CIME/case.py @@ -552,7 +552,8 @@ def configure(self, compset_name, grid_name, machine_name=None, project=None, pecount=None, compiler=None, mpilib=None, user_compset=False, pesfile=None, user_grid=False, gridfile=None, ninst=1, test=False, - walltime=None, queue=None, output_root=None, run_unsupported=False, answer=None): + walltime=None, queue=None, output_root=None, run_unsupported=False, answer=None, + input_dir=None): #-------------------------------------------- # compset, pesfile, and compset components @@ -821,6 +822,9 @@ def configure(self, compset_name, grid_name, machine_name=None, # Set TOTAL_CORES self.set_value("TOTAL_CORES", self.total_tasks * self.cores_per_task ) + if input_dir is not None: + self.set_value("DIN_LOC_ROOT", os.path.abspath(input_dir)) + def get_compset_var_settings(self): compset_obj = Compsets(infile=self.get_value("COMPSETS_SPEC_FILE")) matches = compset_obj.get_compset_var_settings(self._compsetname, self._gridname) diff --git a/scripts/lib/CIME/test_scheduler.py b/scripts/lib/CIME/test_scheduler.py index dd97d93d301b..83a9933599e2 100644 --- a/scripts/lib/CIME/test_scheduler.py +++ b/scripts/lib/CIME/test_scheduler.py @@ -365,6 +365,8 @@ def _create_newcase_phase(self, test): create_newcase_cmd += " --project %s " % self._project if self._output_root is not None: create_newcase_cmd += " --output-root %s " % self._output_root + if self._input_dir is not None: + create_newcase_cmd += " --input-dir %s " % self._input_dir if test_mods is not None: files = Files() @@ -537,9 +539,6 @@ def _xml_phase(self, test): case.set_value("TEST", True) case.set_value("SAVE_TIMING", self._save_timing) - if self._input_dir is not None: - case.set_value("DIN_LOC_ROOT", self._input_dir) - return True, "" ########################################################################### From 9a1d1eba504bed8e4b4261845028a7ab828da179 Mon Sep 17 00:00:00 2001 From: James Foucar Date: Sun, 23 Apr 2017 16:38:51 -0600 Subject: [PATCH 119/219] Print time built per model --- scripts/lib/CIME/build.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/scripts/lib/CIME/build.py b/scripts/lib/CIME/build.py index 74b34c2ef0f0..111c629f742a 100644 --- a/scripts/lib/CIME/build.py +++ b/scripts/lib/CIME/build.py @@ -301,6 +301,7 @@ def _build_model_thread(config_dir, compclass, caseroot, libroot, bldroot, incro thread_bad_results, smp, compiler): ############################################################################### logger.info("Building %s with output to %s"%(compclass, file_build)) + t1 = time.time() with open(file_build, "w") as fd: stat = run_cmd("MODEL=%s SMP=%s %s/buildlib %s %s %s " % (compclass, stringify_bool(smp), config_dir, caseroot, libroot, bldroot), @@ -313,6 +314,8 @@ def _build_model_thread(config_dir, compclass, caseroot, libroot, bldroot, incro for mod_file in glob.glob(os.path.join(bldroot, "*_[Cc][Oo][Mm][Pp]_*.mod")): shutil.copy(mod_file, incroot) + t2 = time.time() + logger.info("%s built in %f seconds" % (compclass, (t2 - t1))) ############################################################################### def _clean_impl(case, cleanlist, clean_all): From bbd69553d38ca9e90a5026bc0784d16afa29451b Mon Sep 17 00:00:00 2001 From: James Foucar Date: Sun, 23 Apr 2017 17:03:13 -0600 Subject: [PATCH 120/219] Minor changes to xmlchange Add test for append. Xmlchange append was acting like prepend. --- scripts/Tools/xmlchange | 14 +++++++------- scripts/tests/scripts_regression_tests.py | 18 ++++++++++++++++++ 2 files changed, 25 insertions(+), 7 deletions(-) diff --git a/scripts/Tools/xmlchange b/scripts/Tools/xmlchange index 57581e506f5c..57af3bbeacba 100755 --- a/scripts/Tools/xmlchange +++ b/scripts/Tools/xmlchange @@ -92,8 +92,8 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter return args.caseroot, listofsettings, args.file, args.id, args.val, args.subgroup, args.append, args.noecho, args.warn, args.force , args.dryrun -def xmlchange(caseroot, listofsettings=None, xmlid=None, xmlval=None, subgroup=None, - append=None, noecho=False, force=False , dryrun=False): +def xmlchange(caseroot, listofsettings, xmlid, xmlval, subgroup, + append, noecho, force, dryrun): with Case(caseroot, read_only=False) as case: env_mach_pes = case.get_env("mach_pes") env_mach_pes.set_components(case.get_values("COMP_CLASSES")) @@ -107,10 +107,10 @@ def xmlchange(caseroot, listofsettings=None, xmlid=None, xmlval=None, subgroup=N expect(len(pair) == 2 , "Expecting a key value pair in the form of key=value. Got %s" % (pair) ) (xmlid, xmlval) = pair type_str = case.get_type_info(xmlid) - if(append is True): + if append: value = case.get_value(xmlid, resolved=False, subgroup=subgroup) - xmlval = "%s %s" %(xmlval, value) + xmlval = "%s %s" % (value, xmlval) if not force: xmlval = convert_to_type(xmlval, type_str, xmlid) @@ -120,9 +120,9 @@ def xmlchange(caseroot, listofsettings=None, xmlid=None, xmlval=None, subgroup=N else : logger.warning("'%s' = '%s'" , xmlid , xmlval ) else: - if append is True: + if append: value = case.get_value(xmlid, resolved=False, subgroup=subgroup) - xmlval = "%s %s" %(xmlval, value) + xmlval = "%s %s" % (value, xmlval) type_str = case.get_type_info(xmlid) if not force: xmlval = convert_to_type(xmlval, type_str, xmlid) @@ -144,7 +144,7 @@ def _main_func(description): # pylint: disable=unused-variable caseroot, listofsettings, xmlfile, xmlid, xmlval, subgroup, append, noecho, warn, force , dry = parse_command_line(sys.argv, description) - xmlchange(caseroot, listofsettings, xmlid, xmlval, subgroup, append, noecho, force , dry) + xmlchange(caseroot, listofsettings, xmlid, xmlval, subgroup, append, noecho, force, dry) if (__name__ == "__main__"): _main_func(__doc__) diff --git a/scripts/tests/scripts_regression_tests.py b/scripts/tests/scripts_regression_tests.py index 1d759cab0ef8..31a2d0475e98 100755 --- a/scripts/tests/scripts_regression_tests.py +++ b/scripts/tests/scripts_regression_tests.py @@ -1384,6 +1384,24 @@ def test_cime_case_force_pecount(self): expected_cores = 16 * case.cores_per_task self.assertEqual(case.get_value("TOTAL_CORES"), expected_cores) + ########################################################################### + def test_cime_case_xmlchange_append(self): + ########################################################################### + run_cmd_assert_result(self, "%s/create_test TESTRUNPASS_Mmpi-serial.f19_g16_rx1.A -t %s --no-build --test-root %s --output-root %s --force-procs 16 --force-threads 8" + % (SCRIPT_DIR, self._baseline_name, self._testroot, self._testroot)) + + casedir = os.path.join(self._testroot, + "%s.%s" % (CIME.utils.get_full_test_name("TESTRUNPASS_Mmpi-serial_P16x8.f19_g16_rx1.A", machine=self._machine, compiler=self._compiler), self._baseline_name)) + self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir) + + run_cmd_assert_result(self, "./xmlchange --id PIO_CONFIG_OPTS --val='-opt1'", from_dir=casedir) + result = run_cmd_assert_result(self, "./xmlquery --value PIO_CONFIG_OPTS", from_dir=casedir) + self.assertEqual(result, "-opt1") + + run_cmd_assert_result(self, "./xmlchange --id PIO_CONFIG_OPTS --val='-opt2' --append", from_dir=casedir) + result = run_cmd_assert_result(self, "./xmlquery --value PIO_CONFIG_OPTS", from_dir=casedir) + self.assertEqual(result, "-opt1 -opt2") + ############################################################################### class X_TestSingleSubmit(TestCreateTestCommon): ############################################################################### From 0aedec30cd49c881d871e80203cf456dcd7a49ff Mon Sep 17 00:00:00 2001 From: James Foucar Date: Mon, 24 Apr 2017 10:54:32 -0600 Subject: [PATCH 121/219] Fix minor bug in test_scheduler --- scripts/lib/CIME/test_scheduler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/lib/CIME/test_scheduler.py b/scripts/lib/CIME/test_scheduler.py index 9ab9184be576..7de420c615fd 100644 --- a/scripts/lib/CIME/test_scheduler.py +++ b/scripts/lib/CIME/test_scheduler.py @@ -585,7 +585,7 @@ def _run_catch_exceptions(self, test, phase, run): except (SystemExit, Exception) as e: exc_tb = sys.exc_info()[2] errput = "Test '%s' failed in phase '%s' with exception '%s'\n" % (test, phase, str(e)) - errput += traceback.format_tb(exc_tb) + errput += ''.join(traceback.format_tb(exc_tb)) self._log_output(test, errput) return False, errput From cc6d449500d76150841142dde6e372d968fb7781 Mon Sep 17 00:00:00 2001 From: Bill Sacks Date: Mon, 24 Apr 2017 11:35:57 -0600 Subject: [PATCH 122/219] Fix merge problem --- src/drivers/mct/shr/seq_infodata_mod.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/drivers/mct/shr/seq_infodata_mod.F90 b/src/drivers/mct/shr/seq_infodata_mod.F90 index 0d874d503856..b54e1f8e0577 100644 --- a/src/drivers/mct/shr/seq_infodata_mod.F90 +++ b/src/drivers/mct/shr/seq_infodata_mod.F90 @@ -2576,7 +2576,7 @@ subroutine seq_infodata_Exchange(infodata,ID,type) call shr_mpi_bcast(infodata%precip_fact, mpicom, pebcast=cplpe) call shr_mpi_bcast(infodata%glcrun_alarm, mpicom, pebcast=cplpe) call shr_mpi_bcast(infodata%glc_g2lupdate, mpicom, pebcast=cplpe) - call shr_mpi_bcast(infodata%glc_valid_input, mpicom,pebcast=pebcast) + call shr_mpi_bcast(infodata%glc_valid_input, mpicom, pebcast=cplpe) call seq_infodata_pauseresume_bcast(infodata, mpicom, pebcast=cplpe) endif From 6ccce68d0d3ef44655d815a2ec7ee575024b2e55 Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Mon, 24 Apr 2017 15:28:41 -0600 Subject: [PATCH 123/219] handle case when executable is none (scripts_regression_tests) --- scripts/lib/CIME/case.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/lib/CIME/case.py b/scripts/lib/CIME/case.py index 7a5e53466e13..d10088fe396d 100644 --- a/scripts/lib/CIME/case.py +++ b/scripts/lib/CIME/case.py @@ -149,7 +149,7 @@ def _initialize_derived_attributes(self): } executable = env_mach_spec.get_mpirun(self, mpi_attribs, job="case.run", exe_only=True)[0] - if "aprun" in executable: + if executable is not None and "aprun" in executable: self.num_nodes = get_aprun_cmd_for_case(self, "acme.exe")[1] self.spare_nodes = env_mach_pes.get_spare_nodes(self.num_nodes) self.num_nodes += self.spare_nodes @@ -1123,7 +1123,7 @@ def get_mpirun_cmd(self, job="case.run"): executable, args = env_mach_specific.get_mpirun(self, mpi_attribs, job=job) # special case for aprun - if "aprun" in executable: + if executable is not None and "aprun" in executable: aprun_args, num_nodes = get_aprun_cmd_for_case(self, run_exe) expect(num_nodes == self.num_nodes, "Not using optimized num nodes") return executable + aprun_args + " " + run_misc_suffix From 919b7700c476b697af7aeb4752620c26e9ef1b01 Mon Sep 17 00:00:00 2001 From: James Foucar Date: Mon, 24 Apr 2017 15:36:39 -0600 Subject: [PATCH 124/219] Fix big mistake in scripts_regression_tests Indent error lead to cdash always showing OK :( --- scripts/tests/scripts_regression_tests.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/tests/scripts_regression_tests.py b/scripts/tests/scripts_regression_tests.py index 31a2d0475e98..d5e99911a16e 100755 --- a/scripts/tests/scripts_regression_tests.py +++ b/scripts/tests/scripts_regression_tests.py @@ -2241,7 +2241,8 @@ def _main_func(): print "All pass, removing directory:", TEST_ROOT if os.path.exists(TEST_ROOT): shutil.rmtree(TEST_ROOT) - raise + + raise if (__name__ == "__main__"): _main_func() From de5b98897373ed9fe0d95f87bc41e2f32f099c5f Mon Sep 17 00:00:00 2001 From: James Foucar Date: Mon, 24 Apr 2017 16:56:59 -0600 Subject: [PATCH 125/219] PET must halve TASKS when doubling THRDS This is the only way both the single and 2-threads cases can run on titan under the same batch submission. Titan does not let you overload nodes. Also, ACME is making heavy use of PET, so it needs to be tested in cime_developer. --- scripts/lib/CIME/SystemTests/pet.py | 6 ++++++ scripts/lib/update_acme_tests.py | 1 + 2 files changed, 7 insertions(+) diff --git a/scripts/lib/CIME/SystemTests/pet.py b/scripts/lib/CIME/SystemTests/pet.py index d6a6718464ba..636f42b3e64b 100644 --- a/scripts/lib/CIME/SystemTests/pet.py +++ b/scripts/lib/CIME/SystemTests/pet.py @@ -30,6 +30,12 @@ def _case_one_setup(self): if self._case.get_value("NTHRDS_%s"%comp) <= 1: self._case.set_value("NTHRDS_%s"%comp, 2) + # Need to halve NTASKS since we have double the threads + ntasks = self._case.get_value("NTASKS_%s" % comp) + if ntasks > 1: + ntasks /= 2 + self._case.set_value("NTASKS_%s" % comp, ntasks) + # Need to redo case_setup because we may have changed the number of threads case_setup(self._case, reset=True) diff --git a/scripts/lib/update_acme_tests.py b/scripts/lib/update_acme_tests.py index e7592980beec..7b407ca88d04 100644 --- a/scripts/lib/update_acme_tests.py +++ b/scripts/lib/update_acme_tests.py @@ -48,6 +48,7 @@ "ERP.f45_g37_rx1.A", "SMS_D_Ln9.f19_g16_rx1.A", "DAE.f19_f19.A", + "PET.f19_f19.A", "SMS.T42_T42.S", "PRE.f45_g37_rx1.ADESP") ), From f7f0ff6019a24a83f454b660c35e6db047271617 Mon Sep 17 00:00:00 2001 From: Bill Sacks Date: Tue, 25 Apr 2017 09:14:04 -0600 Subject: [PATCH 126/219] Remove glcrun_alarm from seq_infodata This used to be used by CLM, but is no longer needed. --- src/components/data_comps/dice/dice_comp_mod.F90 | 1 - src/components/data_comps/docn/docn_comp_mod.F90 | 1 - src/drivers/mct/main/cesm_comp_mod.F90 | 4 ---- src/drivers/mct/shr/seq_infodata_mod.F90 | 13 ++----------- 4 files changed, 2 insertions(+), 17 deletions(-) diff --git a/src/components/data_comps/dice/dice_comp_mod.F90 b/src/components/data_comps/dice/dice_comp_mod.F90 index ec31b04cdb7f..692b07c2c645 100644 --- a/src/components/data_comps/dice/dice_comp_mod.F90 +++ b/src/components/data_comps/dice/dice_comp_mod.F90 @@ -621,7 +621,6 @@ subroutine dice_comp_run( EClock, cdata, x2i, i2x) integer(IN) :: nl ! ice frac index integer(IN) :: lsize ! size of attr vect integer(IN) :: shrlogunit, shrloglev ! original log unit and level - logical :: glcrun_alarm ! is glc going to run now logical :: newdata ! has newdata been read logical :: mssrmlf ! remove old data integer(IN) :: idt ! integer timestep diff --git a/src/components/data_comps/docn/docn_comp_mod.F90 b/src/components/data_comps/docn/docn_comp_mod.F90 index 88cd3bc70a48..6a2bf18f4a00 100644 --- a/src/components/data_comps/docn/docn_comp_mod.F90 +++ b/src/components/data_comps/docn/docn_comp_mod.F90 @@ -539,7 +539,6 @@ subroutine docn_comp_run( EClock, cdata, x2o, o2x) integer(IN) :: nl ! ocn frac index integer(IN) :: lsize ! size of attr vect integer(IN) :: shrlogunit, shrloglev ! original log unit and level - logical :: glcrun_alarm ! is glc going to run now logical :: newdata ! has newdata been read logical :: mssrmlf ! remove old data integer(IN) :: idt ! integer timestep diff --git a/src/drivers/mct/main/cesm_comp_mod.F90 b/src/drivers/mct/main/cesm_comp_mod.F90 index f4c1d517e39e..f9a8637fa1ad 100644 --- a/src/drivers/mct/main/cesm_comp_mod.F90 +++ b/src/drivers/mct/main/cesm_comp_mod.F90 @@ -2190,10 +2190,6 @@ subroutine cesm_run() if (tod == 0) t24hr_alarm = .true. if (month==1 .and. day==1 .and. tod==0) t1yr_alarm = .true. - ! TODO(wjs, 2017-04-05) I think glcrun_alarm can be removed from infodata: It used - ! to be needed by CLM, but no longer is needed. - call seq_infodata_putData(infodata, glcrun_alarm=glcrun_alarm) - if (seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_datestop)) then if (iamroot_CPLID) then write(logunit,*) ' ' diff --git a/src/drivers/mct/shr/seq_infodata_mod.F90 b/src/drivers/mct/shr/seq_infodata_mod.F90 index b54e1f8e0577..d62014fa87f4 100644 --- a/src/drivers/mct/shr/seq_infodata_mod.F90 +++ b/src/drivers/mct/shr/seq_infodata_mod.F90 @@ -237,7 +237,6 @@ MODULE seq_infodata_mod integer(SHR_KIND_IN) :: wav_phase ! wav phase integer(SHR_KIND_IN) :: esp_phase ! esp phase logical :: atm_aero ! atmosphere aerosols - logical :: glcrun_alarm ! glc run alarm logical :: glc_g2lupdate ! update glc2lnd fields in lnd model type(seq_pause_resume_type), pointer :: pause_resume => NULL() real(shr_kind_r8) :: max_cplstep_time ! abort if cplstep time exceeds this value @@ -711,7 +710,6 @@ SUBROUTINE seq_infodata_Init( infodata, nmlfile, ID, pioid) infodata%rof_phase = 1 infodata%wav_phase = 1 infodata%atm_aero = .false. - infodata%glcrun_alarm = .false. infodata%glc_g2lupdate = .false. infodata%glc_valid_input = .true. if (associated(infodata%pause_resume)) then @@ -916,7 +914,7 @@ SUBROUTINE seq_infodata_GetData_explicit( infodata, cime_model, case_name, case_ ice_gnam, rof_gnam, glc_gnam, wav_gnam, & atm_gnam, ocn_gnam, info_debug, dead_comps, read_restart, & shr_map_dopole, vect_map, aoflux_grid, flux_epbalfact, & - nextsw_cday, precip_fact, flux_epbal, flux_albav, glcrun_alarm, & + nextsw_cday, precip_fact, flux_epbal, flux_albav, & glc_g2lupdate, atm_aero, run_barriers, esmf_map_flag, & do_budgets, do_histinit, drv_threading, flux_diurnal, gust_fac, & budget_inst, budget_daily, budget_month, wall_time_limit, & @@ -1097,7 +1095,6 @@ SUBROUTINE seq_infodata_GetData_explicit( infodata, cime_model, case_name, case_ integer(SHR_KIND_IN), optional, intent(OUT) :: wav_phase ! wav phase integer(SHR_KIND_IN), optional, intent(OUT) :: esp_phase ! wav phase logical, optional, intent(OUT) :: atm_aero ! atmosphere aerosols - logical, optional, intent(OUT) :: glcrun_alarm ! glc run alarm logical, optional, intent(OUT) :: glc_g2lupdate ! update glc2lnd fields in lnd model real(shr_kind_r8), optional, intent(out) :: max_cplstep_time logical, optional, intent(OUT) :: glc_valid_input @@ -1280,7 +1277,6 @@ SUBROUTINE seq_infodata_GetData_explicit( infodata, cime_model, case_name, case_ if ( present(wav_phase) ) wav_phase = infodata%wav_phase if ( present(esp_phase) ) esp_phase = infodata%esp_phase if ( present(atm_aero) ) atm_aero = infodata%atm_aero - if ( present(glcrun_alarm) ) glcrun_alarm = infodata%glcrun_alarm if ( present(glc_g2lupdate) ) glc_g2lupdate = infodata%glc_g2lupdate if ( present(atm_resume) ) then if (associated(infodata%pause_resume)) then @@ -1478,7 +1474,7 @@ SUBROUTINE seq_infodata_PutData_explicit( infodata, cime_model, case_name, case_ ice_gnam, rof_gnam, glc_gnam, wav_gnam, & atm_gnam, ocn_gnam, info_debug, dead_comps, read_restart, & shr_map_dopole, vect_map, aoflux_grid, run_barriers, & - nextsw_cday, precip_fact, flux_epbal, flux_albav, glcrun_alarm, & + nextsw_cday, precip_fact, flux_epbal, flux_albav, & glc_g2lupdate, atm_aero, esmf_map_flag, wall_time_limit, & do_budgets, do_histinit, drv_threading, flux_diurnal, gust_fac, & budget_inst, budget_daily, budget_month, force_stop_at, & @@ -1658,7 +1654,6 @@ SUBROUTINE seq_infodata_PutData_explicit( infodata, cime_model, case_name, case_ integer(SHR_KIND_IN), optional, intent(IN) :: wav_phase ! wav phase integer(SHR_KIND_IN), optional, intent(IN) :: esp_phase ! esp phase logical, optional, intent(IN) :: atm_aero ! atm aerosols - logical, optional, intent(IN) :: glcrun_alarm ! glc run alarm logical, optional, intent(IN) :: glc_g2lupdate ! update glc2lnd fields in lnd model logical, optional, intent(IN) :: glc_valid_input character(SHR_KIND_CL), optional, intent(IN) :: atm_resume(:) ! atm resume @@ -1828,7 +1823,6 @@ SUBROUTINE seq_infodata_PutData_explicit( infodata, cime_model, case_name, case_ if ( present(wav_phase) ) infodata%wav_phase = wav_phase if ( present(esp_phase) ) infodata%esp_phase = esp_phase if ( present(atm_aero) ) infodata%atm_aero = atm_aero - if ( present(glcrun_alarm) ) infodata%glcrun_alarm = glcrun_alarm if ( present(glc_g2lupdate) ) infodata%glc_g2lupdate = glc_g2lupdate if ( present(glc_valid_input) ) infodata%glc_valid_input = glc_valid_input if ( present(atm_resume) ) then @@ -2247,7 +2241,6 @@ subroutine seq_infodata_bcast(infodata,mpicom) call shr_mpi_bcast(infodata%rof_phase, mpicom) call shr_mpi_bcast(infodata%wav_phase, mpicom) call shr_mpi_bcast(infodata%atm_aero, mpicom) - call shr_mpi_bcast(infodata%glcrun_alarm, mpicom) call shr_mpi_bcast(infodata%glc_g2lupdate, mpicom) call shr_mpi_bcast(infodata%glc_valid_input, mpicom) @@ -2574,7 +2567,6 @@ subroutine seq_infodata_Exchange(infodata,ID,type) if (cpl2r) then call shr_mpi_bcast(infodata%nextsw_cday, mpicom, pebcast=cplpe) call shr_mpi_bcast(infodata%precip_fact, mpicom, pebcast=cplpe) - call shr_mpi_bcast(infodata%glcrun_alarm, mpicom, pebcast=cplpe) call shr_mpi_bcast(infodata%glc_g2lupdate, mpicom, pebcast=cplpe) call shr_mpi_bcast(infodata%glc_valid_input, mpicom, pebcast=cplpe) call seq_infodata_pauseresume_bcast(infodata, mpicom, pebcast=cplpe) @@ -2919,7 +2911,6 @@ SUBROUTINE seq_infodata_print( infodata ) write(logunit,F0S) subname,'rof_phase = ', infodata%rof_phase write(logunit,F0S) subname,'wav_phase = ', infodata%wav_phase - write(logunit,F0L) subname,'glcrun_alarm = ', infodata%glcrun_alarm write(logunit,F0L) subname,'glc_g2lupdate = ', infodata%glc_g2lupdate if (associated(infodata%pause_resume)) then do ind = 1, num_inst_atm From 0ed54b217bbae3b384a952c355b0a34c607719cb Mon Sep 17 00:00:00 2001 From: James Foucar Date: Tue, 25 Apr 2017 12:49:13 -0600 Subject: [PATCH 127/219] Double nodes instead of halving tasks --- scripts/lib/CIME/SystemTests/pet.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/scripts/lib/CIME/SystemTests/pet.py b/scripts/lib/CIME/SystemTests/pet.py index 636f42b3e64b..3557420189e5 100644 --- a/scripts/lib/CIME/SystemTests/pet.py +++ b/scripts/lib/CIME/SystemTests/pet.py @@ -30,11 +30,9 @@ def _case_one_setup(self): if self._case.get_value("NTHRDS_%s"%comp) <= 1: self._case.set_value("NTHRDS_%s"%comp, 2) - # Need to halve NTASKS since we have double the threads - ntasks = self._case.get_value("NTASKS_%s" % comp) - if ntasks > 1: - ntasks /= 2 - self._case.set_value("NTASKS_%s" % comp, ntasks) + # Subtle: machines like titan will not let you overload the number of threads + # on a node. + self._case.num_nodes *= 2 # Need to redo case_setup because we may have changed the number of threads case_setup(self._case, reset=True) From 07194c738585ecb8dc7099433ec9227c3c6c2aa0 Mon Sep 17 00:00:00 2001 From: Erich L Foster Date: Tue, 25 Apr 2017 15:17:50 -0600 Subject: [PATCH 128/219] Fixed Some Issues Brought Up in Discussion I cleaned up some of the logic as per @billsacks. Fixed some flawed logic which resulted in wrong value assignment. restored original function call for _expect_char Fixed #1397 --- scripts/lib/CIME/namelist.py | 56 ++++++++++++++++++++---------------- 1 file changed, 32 insertions(+), 24 deletions(-) diff --git a/scripts/lib/CIME/namelist.py b/scripts/lib/CIME/namelist.py index 52a25b44fc8f..26f70d4f810f 100644 --- a/scripts/lib/CIME/namelist.py +++ b/scripts/lib/CIME/namelist.py @@ -987,9 +987,12 @@ def set_variable_value(self, group_name, variable_name, value, var_size=1): >>> x.set_variable_value('foo', 'bar(2)', [u'3'], var_size=4) >>> x.get_variable_value('foo', 'bar') [u'1', u'3'] - >>> x.set_variable_value('foo', 'bar', [u'2']) + >>> x.set_variable_value('foo', 'bar(1)', [u'2']) >>> x.get_variable_value('foo', 'bar') [u'2', u'3'] + >>> x.set_variable_value('foo', 'bar', [u'1']) + >>> x.get_variable_value('foo', 'bar') + [u'1'] >>> x.set_variable_value('foo', 'bazz', [u'3']) >>> x.set_variable_value('Brack', 'baR', [u'4']) >>> x.get_variable_value('foo', 'bazz') @@ -1003,6 +1006,7 @@ def set_variable_value(self, group_name, variable_name, value, var_size=1): group_name = group_name.lower() minindex, maxindex, step = get_fortran_variable_indices(variable_name, var_size) + original_var = variable_name variable_name = get_fortran_name_only(variable_name.lower()) expect(minindex > 0, "Indices < 1 not supported in CIME interface to fortran namelists... lower bound=%s"%minindex) @@ -1019,12 +1023,16 @@ def set_variable_value(self, group_name, variable_name, value, var_size=1): if minindex > tlen: self._groups[group_name][variable_name].extend(['']*(minindex-tlen-1)) - for i in range(minindex, maxindex+2*step, step): - while len(self._groups[group_name][variable_name]) < i: - self._groups[group_name][variable_name].append('') - self._groups[group_name][variable_name][i-1] = value.pop(0) - if len(value) == 0: - break + # only replace items which are in index notation + if FORTRAN_NAME_REGEX.search(original_var).group(2) is not None: + for i in range(minindex, maxindex+2*step, step): + while len(self._groups[group_name][variable_name]) < i: + self._groups[group_name][variable_name].append('') + self._groups[group_name][variable_name][i-1] = value.pop(0) + if len(value) == 0: + break + else: + self._groups[group_name][variable_name] = value def delete_variable(self, group_name, variable_name): @@ -1440,7 +1448,7 @@ def _eat_comment(self): self._advance() return True - def _expect_char(self, chars, RETURN=False): + def _expect_char(self, chars): """Raise an error if the wrong character is present. Does not return anything, but raises a `_NamelistParseError` if `chars` @@ -1458,17 +1466,13 @@ def _expect_char(self, chars, RETURN=False): _NamelistParseError: Error in parsing namelist: expected 'a' but found 'b' >>> x._expect_char('ab') """ - if self._curr() not in chars and not RETURN: + if self._curr() not in chars: if len(chars) == 1: char_description = repr(str(chars)) else: char_description = "one of the characters in %r" % str(chars) raise _NamelistParseError("expected %s but found %r" % (char_description, str(self._curr()))) - elif self._curr() in chars and RETURN: - return True - elif self._curr() not in chars and RETURN: - return False def _parse_namelist_group_name(self): r"""Parses and returns a namelist group name at the current position. @@ -1667,7 +1671,7 @@ def _look_ahead_for_equals(self, pos): return False def _look_ahead_for_plusequals(self, pos): - r"""Look ahead to see if the next whitespace character is '='. + r"""Look ahead to see if the next two non-whitespace character are '+='. The `pos` argument is the position in the text to start from while looking. This function returns a boolean. @@ -1824,7 +1828,7 @@ def _parse_literal(self, allow_name=False, allow_eof_end=False): separators = [' ', '\n', ',', '/'] if allow_name: separators.append('=') - separators.append('+=') + separators.append('+') while new_pos != self._len and self._text[new_pos] not in separators: # allow commas if they are inside () if self._text[new_pos] == '(': @@ -1957,7 +1961,8 @@ def _parse_name_and_values(self, allow_eof_end=False): r"""Parse and return a variable name and values assigned to that name. The return value of this function is a tuple containing (a) the name of - the variable in a string, and (b) a list of the variable's values. Null + the variable in a string, (b) a list of the variable's values, and + (c) whether or not to add the found value to existing variable. Null values are represented by the empty string. If `allow_eof_end=True`, the end of the sequence of values might come @@ -2018,7 +2023,7 @@ def _parse_name_and_values(self, allow_eof_end=False): self._eat_whitespace() # check to see if we have a "+=" - if self._expect_char("+", RETURN=True): + if self._curr() == '+': self._advance() addto=True # tell parser that we want to add to dictionary values self._expect_char("=") @@ -2103,9 +2108,11 @@ def _parse_namelist_group(self): name, values, addto = self._parse_name_and_values() dsettings = [] if self._groupless: - if name in self._settings and not addto: - values = merge_literal_lists(self._settings[name], values) - elif name in self._settings and addto: + if name in self._settings: + dsettings = self._settings[name] + if not addto: + values = merge_literal_lists(dsettings, values) + else: values = self._settings[name] + values self._settings[name] = values else: @@ -2169,10 +2176,11 @@ def parse_namelist(self): if self._groupless and self._curr() != '&': while self._pos < self._len: name, values, addto = self._parse_name_and_values(allow_eof_end=True) - if name in self._settings and not addto: - values = merge_literal_lists(self._settings[name], values) - elif name in self._settings and addto: - values = self._settings[name] + values + if name in self._settings: + if addto: + values = self._settings[name] + values + else: + values = merge_literal_lists(self._settings[name], values) self._settings[name] = values return self._settings # Loop over namelist groups in the file. From 975958dcaeff22680dd96a279986c860d7ace8cc Mon Sep 17 00:00:00 2001 From: James Foucar Date: Tue, 25 Apr 2017 15:30:26 -0600 Subject: [PATCH 129/219] Re-initialize key case values upon case.setup --- scripts/lib/CIME/SystemTests/pet.py | 4 ---- scripts/lib/CIME/case.py | 6 +++--- scripts/lib/CIME/case_setup.py | 8 +++++--- 3 files changed, 8 insertions(+), 10 deletions(-) diff --git a/scripts/lib/CIME/SystemTests/pet.py b/scripts/lib/CIME/SystemTests/pet.py index 3557420189e5..d6a6718464ba 100644 --- a/scripts/lib/CIME/SystemTests/pet.py +++ b/scripts/lib/CIME/SystemTests/pet.py @@ -30,10 +30,6 @@ def _case_one_setup(self): if self._case.get_value("NTHRDS_%s"%comp) <= 1: self._case.set_value("NTHRDS_%s"%comp, 2) - # Subtle: machines like titan will not let you overload the number of threads - # on a node. - self._case.num_nodes *= 2 - # Need to redo case_setup because we may have changed the number of threads case_setup(self._case, reset=True) diff --git a/scripts/lib/CIME/case.py b/scripts/lib/CIME/case.py index d10088fe396d..8d7308054aa1 100644 --- a/scripts/lib/CIME/case.py +++ b/scripts/lib/CIME/case.py @@ -107,7 +107,7 @@ def __init__(self, case_root=None, read_only=True): self.cores_per_task = None # check if case has been configured and if so initialize derived if self.get_value("CASEROOT") is not None: - self._initialize_derived_attributes() + self.initialize_derived_attributes() def check_if_comp_var(self, vid): vid = vid @@ -119,7 +119,7 @@ def check_if_comp_var(self, vid): return vid, comp, iscompvar return vid, comp, iscompvar - def _initialize_derived_attributes(self): + def initialize_derived_attributes(self): """ These are derived variables which can be used in the config_* files for variable substitution using the {{ var }} syntax @@ -809,7 +809,7 @@ def configure(self, compset_name, grid_name, machine_name=None, if test: self.set_value("TEST",True) - self._initialize_derived_attributes() + self.initialize_derived_attributes() # Make sure that parallel IO is not specified if total_tasks==1 if self.total_tasks == 1: diff --git a/scripts/lib/CIME/case_setup.py b/scripts/lib/CIME/case_setup.py index 087c5999b381..bbd743501a1e 100644 --- a/scripts/lib/CIME/case_setup.py +++ b/scripts/lib/CIME/case_setup.py @@ -136,9 +136,6 @@ def _case_setup_impl(case, caseroot, clean=False, test_mode=False, reset=False, else: expect(False, "NINST_%s value %d greater than NTASKS_%s %d" % (comp, ninst, comp, ntasks)) - # Set TOTAL_CORES - case.set_value("TOTAL_CORES", case.total_tasks * case.cores_per_task ) - if os.path.exists("case.run"): logger.info("Machine/Decomp/Pes configuration has already been done ...skipping") else: @@ -187,6 +184,11 @@ def _case_setup_impl(case, caseroot, clean=False, test_mode=False, reset=False, logger.debug("at copy TOTALPES = %s"%case.get_value("TOTALPES")) lock_file("env_mach_pes.xml") + # Set TOTAL_CORES + case.set_value("TOTAL_CORES", case.total_tasks * case.cores_per_task ) + + case.initialize_derived_attributes() + # Create user_nl files for the required number of instances if not os.path.exists("user_nl_cpl"): logger.info("Creating user_nl_xxx files for components and cpl") From 4cb307589f7bb6180d3cf26f9c2a02055e2dbff6 Mon Sep 17 00:00:00 2001 From: James Foucar Date: Tue, 25 Apr 2017 16:00:31 -0600 Subject: [PATCH 130/219] Order of operations was not correct --- scripts/lib/CIME/case_setup.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/scripts/lib/CIME/case_setup.py b/scripts/lib/CIME/case_setup.py index bbd743501a1e..9baa2d215a77 100644 --- a/scripts/lib/CIME/case_setup.py +++ b/scripts/lib/CIME/case_setup.py @@ -138,6 +138,8 @@ def _case_setup_impl(case, caseroot, clean=False, test_mode=False, reset=False, if os.path.exists("case.run"): logger.info("Machine/Decomp/Pes configuration has already been done ...skipping") + + case.initialize_derived_attributes() else: check_pelayouts_require_rebuild(case, models) @@ -156,6 +158,8 @@ def _case_setup_impl(case, caseroot, clean=False, test_mode=False, reset=False, cost_pes = env_mach_pes.get_cost_pes(pestot, thread_count, machine=case.get_value("MACH")) case.set_value("COST_PES", cost_pes) + case.initialize_derived_attributes() + # create batch files logger.info("Creating batch script case.run") env_batch = case.get_env("batch") @@ -187,8 +191,6 @@ def _case_setup_impl(case, caseroot, clean=False, test_mode=False, reset=False, # Set TOTAL_CORES case.set_value("TOTAL_CORES", case.total_tasks * case.cores_per_task ) - case.initialize_derived_attributes() - # Create user_nl files for the required number of instances if not os.path.exists("user_nl_cpl"): logger.info("Creating user_nl_xxx files for components and cpl") From 56aba434897ac6df2c8b255ac45a1976359c6d2a Mon Sep 17 00:00:00 2001 From: James Foucar Date: Tue, 25 Apr 2017 16:03:41 -0600 Subject: [PATCH 131/219] PIO settings need to happen before init_derived_attributes --- scripts/lib/CIME/case_setup.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/scripts/lib/CIME/case_setup.py b/scripts/lib/CIME/case_setup.py index 9baa2d215a77..5afc3979bf01 100644 --- a/scripts/lib/CIME/case_setup.py +++ b/scripts/lib/CIME/case_setup.py @@ -158,6 +158,10 @@ def _case_setup_impl(case, caseroot, clean=False, test_mode=False, reset=False, cost_pes = env_mach_pes.get_cost_pes(pestot, thread_count, machine=case.get_value("MACH")) case.set_value("COST_PES", cost_pes) + # Make sure pio settings are consistent + if adjust_pio: + adjust_pio_layout(case, tasks_per_node) + case.initialize_derived_attributes() # create batch files @@ -177,10 +181,6 @@ def _case_setup_impl(case, caseroot, clean=False, test_mode=False, reset=False, logger.info("Writing %s script from input template %s" % (job, input_batch_script)) env_batch.make_batch_script(input_batch_script, job, case, pestot, tasks_per_node, num_nodes, thread_count) - # Make sure pio settings are consistant - if adjust_pio: - adjust_pio_layout(case, tasks_per_node) - # Make a copy of env_mach_pes.xml in order to be able # to check that it does not change once case.setup is invoked logger.info("Locking file env_mach_pes.xml") From 97450b033df7c2015bbd2eae08d2cbd7a386ac50 Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Tue, 25 Apr 2017 16:05:37 -0600 Subject: [PATCH 132/219] xmlchange now supports file option --- scripts/Tools/xmlchange | 24 ++++++++++----- scripts/lib/CIME/XML/generic_xml.py | 3 +- scripts/lib/CIME/case.py | 46 +++++++++++++++++++++++++++-- 3 files changed, 61 insertions(+), 12 deletions(-) diff --git a/scripts/Tools/xmlchange b/scripts/Tools/xmlchange index 57af3bbeacba..31fbe7559d5e 100755 --- a/scripts/Tools/xmlchange +++ b/scripts/Tools/xmlchange @@ -14,6 +14,7 @@ from standard_script_setup import * from CIME.utils import expect, convert_to_type, append_case_status from CIME.case import Case +from CIME.XML.generic_xml import GenericXML import re @@ -24,7 +25,7 @@ logger = logging.getLogger("xmlchange") def parse_command_line(args, description): ############################################################################### parser = argparse.ArgumentParser( - usage="""\n%s [] [--verbose][--file file][--id id][--val value][--noecho][--append][--warn][--force] + usage="""\n%s [] [--verbose][--file file][--id id][--val value][--noecho][--append][--force] OR %s --help OR @@ -72,9 +73,6 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("-append","--append", action="store_true", help="append to the existing value") - parser.add_argument("-warn","--warn", action="store_true", - help="not implemented") - parser.add_argument("-subgroup","--subgroup", help="apply to this subgroup only") @@ -90,11 +88,21 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter delimiter = re.escape(args.delimiter) listofsettings = re.split(r'(? 0 else 0) # pylint: disable=unused-variable - caseroot, listofsettings, xmlfile, xmlid, xmlval, subgroup, append, noecho, warn, force , dry = parse_command_line(sys.argv, description) + caseroot, listofsettings, xmlfile, xmlid, xmlval, subgroup, append, noecho, force , dry = parse_command_line(sys.argv, description) - xmlchange(caseroot, listofsettings, xmlid, xmlval, subgroup, append, noecho, force, dry) + xmlchange(caseroot, listofsettings, xmlfile, xmlid, xmlval, subgroup, append, noecho, force, dry) if (__name__ == "__main__"): _main_func(__doc__) diff --git a/scripts/lib/CIME/XML/generic_xml.py b/scripts/lib/CIME/XML/generic_xml.py index d40736d6e9b5..717e4b17bdd0 100644 --- a/scripts/lib/CIME/XML/generic_xml.py +++ b/scripts/lib/CIME/XML/generic_xml.py @@ -294,4 +294,5 @@ def get_raw_record(self, root=None): expect(False, "Could not write file %s, xml formatting error '%s'" % (self.filename, e)) return xmlstr - + def get_id(self): + return self.root.get("id") diff --git a/scripts/lib/CIME/case.py b/scripts/lib/CIME/case.py index d10088fe396d..579485b948da 100644 --- a/scripts/lib/CIME/case.py +++ b/scripts/lib/CIME/case.py @@ -200,13 +200,14 @@ def get_case_root(self): """Returns the root directory for this case.""" return self._caseroot - def get_env(self, short_name): + def get_env(self, short_name, allow_missing=False): full_name = "env_%s.xml" % (short_name) for env_file in self._files: if os.path.basename(env_file.filename) == full_name: return env_file - - expect(False, "Could not find object for %s in case"%full_name) + if allow_missing: + return None + expect(False,"Could not find object for %s in case"%full_name) def copy(self, newcasename, newcaseroot, newcimeroot=None, newsrcroot=None): newcase = deepcopy(self) @@ -370,6 +371,11 @@ def set_value(self, item, value, subgroup=None, ignore_type=False): if item == "CASEROOT": self._caseroot = value result = None + + env_test = self.get_env("test", allow_missing=True) + if env_test: + chkval = env_test.get_test_parameter(item) + for env_file in self._env_entryid_files: result = env_file.set_value(item, value, subgroup, ignore_type) if (result is not None): @@ -1192,3 +1198,37 @@ def _check_testlists(self, compset_alias, grid_name, files): expect(False, "\nThis compset and grid combination is untested in CESM. " "Override this warning with the --run-unsupported option to create_newcase.", error_prefix="STOP: ") + + def set_file(self, xmlfile, ftype): + new_env_file = None + for idx, env_file in enumerate(self._env_entryid_files): + if os.path.basename(env_file.filename) == ftype: + if ftype == "env_run.xml": + new_env_file = EnvRun(infile=xmlfile) + elif ftype == "env_build.xml": + new_env_file = EnvBuild(infile=xmlfile) + elif ftype == "env_case.xml": + new_env_file = EnvCase(infile=xmlfile) + elif ftype == "env_mach_pes.xml": + new_env_file = EnvMachPes(infile=xmlfile) + elif ftype == "env_batch.xml": + new_env_file = EnvBatch(infile=xmlfile) + elif ftype == "env_test.xml": + new_env_file = EnvTest(infile=xmlfile) + if new_env_file is not None: + self._env_entryid_files[idx] = new_env_file + break + if new_env_file is None: + for idx, env_file in enumerate(self._env_generic_files): + if os.path.basename(env_file.filename) == ftype: + if ftype == "env_archive.xml": + new_env_file = EnvArchive(infile=xmlfile) + elif ftype == "env_mach_specific.xml": + new_env_file = EnvMachSpecific(infile=xmlfile) + else: + expect(False, "No match found for file type %s"%ftype) + if new_env_file is not None: + self._env_generic_files[idx] = new_env_file + break + + self._files = self._env_entryid_files + self._env_generic_files From 970452cd2bc834cc67402d2ddc8f7600b24165bf Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Tue, 25 Apr 2017 17:33:25 -0600 Subject: [PATCH 133/219] working with xmlquery --- scripts/Tools/xmlchange | 8 +------- scripts/Tools/xmlquery | 8 +++++--- scripts/lib/CIME/case.py | 8 +++++++- 3 files changed, 13 insertions(+), 11 deletions(-) diff --git a/scripts/Tools/xmlchange b/scripts/Tools/xmlchange index 31fbe7559d5e..234c14d94efb 100755 --- a/scripts/Tools/xmlchange +++ b/scripts/Tools/xmlchange @@ -14,7 +14,6 @@ from standard_script_setup import * from CIME.utils import expect, convert_to_type, append_case_status from CIME.case import Case -from CIME.XML.generic_xml import GenericXML import re @@ -96,12 +95,7 @@ def xmlchange(caseroot, listofsettings, xmlfile, xmlid, xmlval, subgroup, with Case(caseroot, read_only=False) as case: if xmlfile: - expect(os.path.isfile(xmlfile), "Could not find file %s"%xmlfile) - gfile = GenericXML(infile=xmlfile) - ftype = gfile.get_id() - if os.path.abspath(os.path.join(caseroot,ftype)) != os.path.abspath(xmlfile): - logger.warn("setting case file to %s"%xmlfile) - case.set_file(xmlfile, ftype) + case.set_file(xmlfile) env_mach_pes = case.get_env("mach_pes") env_mach_pes.set_components(case.get_values("COMP_CLASSES")) diff --git a/scripts/Tools/xmlquery b/scripts/Tools/xmlquery index 0b3177004dc4..53db37526422 100755 --- a/scripts/Tools/xmlquery +++ b/scripts/Tools/xmlquery @@ -63,7 +63,7 @@ epilog=textwrap.dedent(__doc__)) help="variable name in env_*.xml file ( value )") parser.add_argument("-file" , "--file", - help="deprecated option, do not use") + help="specify the file you want to query") parser.add_argument("-subgroup","--subgroup", help="apply to this subgroup only") @@ -120,7 +120,7 @@ epilog=textwrap.dedent(__doc__)) return variables, args.subgroup, args.caseroot, args.listall, args.fileonly, \ args.value, args.no_resolve, args.raw, args.description, args.group, args.full, \ - args.type, args.valid_values, args.partial_match + args.type, args.valid_values, args.partial_match, args.file def get_value_as_string(case, var, attribute=None, resolved=False, subgroup=None): thistype = case.get_type_info(var) @@ -189,10 +189,12 @@ def _main_func(): # Initialize command line parser and get command line options variables, subgroup, caseroot, listall, fileonly, \ value, no_resolve, raw, description, group, full, dtype, \ - valid_values, partial_match = parse_command_line(sys.argv) + valid_values, partial_match, xmlfile = parse_command_line(sys.argv) # Initialize case ; read in all xml files from caseroot with Case(caseroot) as case: + if xmlfile: + case.set_file(xmlfile) if listall or partial_match: all_variables = sorted(case.get_record_fields(None, "varid")) if partial_match: diff --git a/scripts/lib/CIME/case.py b/scripts/lib/CIME/case.py index 579485b948da..68b972686467 100644 --- a/scripts/lib/CIME/case.py +++ b/scripts/lib/CIME/case.py @@ -30,6 +30,7 @@ from CIME.XML.env_run import EnvRun from CIME.XML.env_archive import EnvArchive from CIME.XML.env_batch import EnvBatch +from CIME.XML.generic_xml import GenericXML from CIME.user_mod_support import apply_user_mods from CIME.case_setup import case_setup from CIME.aprun import get_aprun_cmd_for_case @@ -1199,7 +1200,12 @@ def _check_testlists(self, compset_alias, grid_name, files): "Override this warning with the --run-unsupported option to create_newcase.", error_prefix="STOP: ") - def set_file(self, xmlfile, ftype): + def set_file(self, xmlfile): + expect(os.path.isfile(xmlfile), "Could not find file %s"%xmlfile) + gfile = GenericXML(infile=xmlfile) + ftype = gfile.get_id() + + logger.warn("setting case file to %s"%xmlfile) new_env_file = None for idx, env_file in enumerate(self._env_entryid_files): if os.path.basename(env_file.filename) == ftype: From 6401d135c1226b4ee14c53f33d386793347ae32b Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Tue, 25 Apr 2017 17:54:48 -0600 Subject: [PATCH 134/219] if --file option is used that should be the only file considered --- scripts/lib/CIME/case.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/scripts/lib/CIME/case.py b/scripts/lib/CIME/case.py index 68b972686467..3cfd2e15ca64 100644 --- a/scripts/lib/CIME/case.py +++ b/scripts/lib/CIME/case.py @@ -373,10 +373,6 @@ def set_value(self, item, value, subgroup=None, ignore_type=False): self._caseroot = value result = None - env_test = self.get_env("test", allow_missing=True) - if env_test: - chkval = env_test.get_test_parameter(item) - for env_file in self._env_entryid_files: result = env_file.set_value(item, value, subgroup, ignore_type) if (result is not None): @@ -1201,7 +1197,11 @@ def _check_testlists(self, compset_alias, grid_name, files): error_prefix="STOP: ") def set_file(self, xmlfile): + """ + force the case object to consider only xmlfile + """ expect(os.path.isfile(xmlfile), "Could not find file %s"%xmlfile) + gfile = GenericXML(infile=xmlfile) ftype = gfile.get_id() @@ -1222,7 +1222,9 @@ def set_file(self, xmlfile): elif ftype == "env_test.xml": new_env_file = EnvTest(infile=xmlfile) if new_env_file is not None: - self._env_entryid_files[idx] = new_env_file + self._env_entryid_files = [] + self._env_generic_files = [] + self._env_entryid_files.append(new_env_file) break if new_env_file is None: for idx, env_file in enumerate(self._env_generic_files): @@ -1234,7 +1236,9 @@ def set_file(self, xmlfile): else: expect(False, "No match found for file type %s"%ftype) if new_env_file is not None: - self._env_generic_files[idx] = new_env_file + self._env_entryid_files = [] + self._env_generic_files = [] + self._env_generic_files.append(new_env_file) break self._files = self._env_entryid_files + self._env_generic_files From a8d726234442d1a4b075c37fc02646fe570c7107 Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Tue, 25 Apr 2017 19:53:08 -0600 Subject: [PATCH 135/219] xmlquery works --- scripts/Tools/xmlchange | 8 ++++---- scripts/Tools/xmlquery | 12 ++++++++---- scripts/lib/CIME/XML/env_test.py | 19 +++++++++++++++++++ scripts/lib/CIME/case.py | 3 ++- 4 files changed, 33 insertions(+), 9 deletions(-) diff --git a/scripts/Tools/xmlchange b/scripts/Tools/xmlchange index 234c14d94efb..a5b2ddcefa38 100755 --- a/scripts/Tools/xmlchange +++ b/scripts/Tools/xmlchange @@ -96,9 +96,9 @@ def xmlchange(caseroot, listofsettings, xmlfile, xmlid, xmlval, subgroup, if xmlfile: case.set_file(xmlfile) - - env_mach_pes = case.get_env("mach_pes") - env_mach_pes.set_components(case.get_values("COMP_CLASSES")) + else: + env_mach_pes = case.get_env("mach_pes") + env_mach_pes.set_components(case.get_values("COMP_CLASSES")) if len(listofsettings): logger.debug("List of attributes to change: %s" , listofsettings) @@ -113,7 +113,7 @@ def xmlchange(caseroot, listofsettings, xmlfile, xmlid, xmlval, subgroup, value = case.get_value(xmlid, resolved=False, subgroup=subgroup) xmlval = "%s %s" % (value, xmlval) - if not force: + if type_str is not None and not force: xmlval = convert_to_type(xmlval, type_str, xmlid) if not dryrun : diff --git a/scripts/Tools/xmlquery b/scripts/Tools/xmlquery index 53db37526422..ac26bb769553 100755 --- a/scripts/Tools/xmlquery +++ b/scripts/Tools/xmlquery @@ -131,7 +131,7 @@ def get_value_as_string(case, var, attribute=None, resolved=False, subgroup=None def xmlquery(case, variables, subgroup=None, fileonly=False, resolved=True, raw=False, description=False, group=False, - full=False, dtype=False, valid_values=False): + full=False, dtype=False, valid_values=False, xmlfile=None): """ Return list of attributes and their values, print formatted @@ -143,8 +143,12 @@ def xmlquery(case, variables, subgroup=None, fileonly=False, groups = [subgroup] else: groups = case.get_record_fields(var, "group") - - expect(groups, " No results found for variable %s"%var) + if xmlfile and not groups: + results['none'] = {} + results['none'][var] = {} + results['none'][var]['value'] = case.get_value(var, resolved=resolved) + else: + expect(groups, " No results found for variable %s"%var) for group in groups: if not group in results: results[group] = {} @@ -228,7 +232,7 @@ def _main_func(): expect(variables, "No variables found") results = xmlquery(case, variables, subgroup, fileonly, resolved=not no_resolve, raw=raw, description=description, group=group, full=full, - dtype=dtype, valid_values=valid_values) + dtype=dtype, valid_values=valid_values, xmlfile=xmlfile) if full or description: wrapper=textwrap.TextWrapper() diff --git a/scripts/lib/CIME/XML/env_test.py b/scripts/lib/CIME/XML/env_test.py index 44f947ec47f3..3c7461c74f44 100644 --- a/scripts/lib/CIME/XML/env_test.py +++ b/scripts/lib/CIME/XML/env_test.py @@ -103,3 +103,22 @@ def cleanupnode(self, node): if dnode is not None: node.remove(dnode) return node + + def set_value(self, vid, value, subgroup=None, ignore_type=False): + """ + check if vid is in test section of file + """ + newval = EnvBase.set_value(self, vid, value, subgroup, ignore_type) + if newval is None: + tnode = self.get_optional_node("test") + if tnode is not None: + newval = self.set_element_text(vid, value, root=tnode) + return newval + + def get_value(self, vid, attribute=None, resolved=True, subgroup=None): + value = EnvBase.get_value(self, vid, attribute, resolved, subgroup) + if value is None: + tnode = self.get_optional_node("test") + if tnode is not None: + value = self.get_element_text(vid, root=tnode) + return value diff --git a/scripts/lib/CIME/case.py b/scripts/lib/CIME/case.py index 3cfd2e15ca64..2dfba1ff7589 100644 --- a/scripts/lib/CIME/case.py +++ b/scripts/lib/CIME/case.py @@ -283,7 +283,8 @@ def get_value(self, item, attribute=None, resolved=True, subgroup=None): if resolved and type(result) is str: result = self.get_resolved_value(result) vtype = env_file.get_type_info(item) - result = convert_to_type(result, vtype, item) + if vtype is not None: + result = convert_to_type(result, vtype, item) return result for env_file in self._env_generic_files: From 1ac37dd9f484cc33831abd4a040e6e0511e42745 Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Wed, 26 Apr 2017 06:20:35 -0600 Subject: [PATCH 136/219] fix pylint issue --- scripts/lib/CIME/XML/env_mach_specific.py | 2 +- scripts/lib/CIME/case.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/lib/CIME/XML/env_mach_specific.py b/scripts/lib/CIME/XML/env_mach_specific.py index 226f199c377e..8e3ef188c71e 100644 --- a/scripts/lib/CIME/XML/env_mach_specific.py +++ b/scripts/lib/CIME/XML/env_mach_specific.py @@ -13,7 +13,7 @@ # get_type) otherwise need to implement own functions and make GenericXML parent class class EnvMachSpecific(EnvBase): # pylint: disable=unused-argument - def __init__(self, caseroot, infile="env_mach_specific.xml", + def __init__(self, caseroot=None, infile="env_mach_specific.xml", components=None, unit_testing=False): """ initialize an object interface to file env_mach_specific.xml in the case directory diff --git a/scripts/lib/CIME/case.py b/scripts/lib/CIME/case.py index 2dfba1ff7589..96c23fa42698 100644 --- a/scripts/lib/CIME/case.py +++ b/scripts/lib/CIME/case.py @@ -1208,7 +1208,7 @@ def set_file(self, xmlfile): logger.warn("setting case file to %s"%xmlfile) new_env_file = None - for idx, env_file in enumerate(self._env_entryid_files): + for env_file in self._env_entryid_files: if os.path.basename(env_file.filename) == ftype: if ftype == "env_run.xml": new_env_file = EnvRun(infile=xmlfile) @@ -1228,7 +1228,7 @@ def set_file(self, xmlfile): self._env_entryid_files.append(new_env_file) break if new_env_file is None: - for idx, env_file in enumerate(self._env_generic_files): + for env_file in self._env_generic_files: if os.path.basename(env_file.filename) == ftype: if ftype == "env_archive.xml": new_env_file = EnvArchive(infile=xmlfile) From d75641a2615b1f04f8c7729057b3871a1d5010d4 Mon Sep 17 00:00:00 2001 From: James Foucar Date: Wed, 26 Apr 2017 11:13:01 -0600 Subject: [PATCH 137/219] Fix mistake --- scripts/lib/CIME/case_setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/lib/CIME/case_setup.py b/scripts/lib/CIME/case_setup.py index 5afc3979bf01..ee45013755d0 100644 --- a/scripts/lib/CIME/case_setup.py +++ b/scripts/lib/CIME/case_setup.py @@ -159,6 +159,7 @@ def _case_setup_impl(case, caseroot, clean=False, test_mode=False, reset=False, case.set_value("COST_PES", cost_pes) # Make sure pio settings are consistent + tasks_per_node = env_mach_pes.get_tasks_per_node(pestot, thread_count) if adjust_pio: adjust_pio_layout(case, tasks_per_node) @@ -168,7 +169,6 @@ def _case_setup_impl(case, caseroot, clean=False, test_mode=False, reset=False, logger.info("Creating batch script case.run") env_batch = case.get_env("batch") num_nodes = case.num_nodes - tasks_per_node = env_mach_pes.get_tasks_per_node(pestot, thread_count) for job in env_batch.get_jobs(): input_batch_script = os.path.join(case.get_value("MACHDIR"), env_batch.get_value('template', subgroup=job)) if job == "case.test" and testcase is not None and not test_mode: From d9d0f664aa143c61fb0b03808e9538f5eff1b3ce Mon Sep 17 00:00:00 2001 From: James Foucar Date: Wed, 26 Apr 2017 11:45:54 -0600 Subject: [PATCH 138/219] TOTAL_CORES needs to be set before xml files are locked --- scripts/lib/CIME/case_setup.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/scripts/lib/CIME/case_setup.py b/scripts/lib/CIME/case_setup.py index ee45013755d0..e12db54cb3b8 100644 --- a/scripts/lib/CIME/case_setup.py +++ b/scripts/lib/CIME/case_setup.py @@ -140,6 +140,9 @@ def _case_setup_impl(case, caseroot, clean=False, test_mode=False, reset=False, logger.info("Machine/Decomp/Pes configuration has already been done ...skipping") case.initialize_derived_attributes() + + # Set TOTAL_CORES + case.set_value("TOTAL_CORES", case.total_tasks * case.cores_per_task ) else: check_pelayouts_require_rebuild(case, models) @@ -181,6 +184,9 @@ def _case_setup_impl(case, caseroot, clean=False, test_mode=False, reset=False, logger.info("Writing %s script from input template %s" % (job, input_batch_script)) env_batch.make_batch_script(input_batch_script, job, case, pestot, tasks_per_node, num_nodes, thread_count) + # Set TOTAL_CORES + case.set_value("TOTAL_CORES", case.total_tasks * case.cores_per_task ) + # Make a copy of env_mach_pes.xml in order to be able # to check that it does not change once case.setup is invoked logger.info("Locking file env_mach_pes.xml") @@ -188,9 +194,6 @@ def _case_setup_impl(case, caseroot, clean=False, test_mode=False, reset=False, logger.debug("at copy TOTALPES = %s"%case.get_value("TOTALPES")) lock_file("env_mach_pes.xml") - # Set TOTAL_CORES - case.set_value("TOTAL_CORES", case.total_tasks * case.cores_per_task ) - # Create user_nl files for the required number of instances if not os.path.exists("user_nl_cpl"): logger.info("Creating user_nl_xxx files for components and cpl") From b84e255ee766367ee36486c8413748b55eeb9548 Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Wed, 26 Apr 2017 11:40:10 -0600 Subject: [PATCH 139/219] nag needs a lib pointer --- config/cesm/machines/config_compilers.xml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/config/cesm/machines/config_compilers.xml b/config/cesm/machines/config_compilers.xml index 2abaae9853af..2c659ea47095 100644 --- a/config/cesm/machines/config_compilers.xml +++ b/config/cesm/machines/config_compilers.xml @@ -701,6 +701,10 @@ using a fortran linker. -lpthread + /home/santos/pFUnit/pFUnit_NAG_3_0 + + -L/usr/local/nag/lib/NAG_Fortran + From 14eeffba8a9d68db91db893857793fd368310d2c Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Wed, 26 Apr 2017 13:00:44 -0600 Subject: [PATCH 140/219] remove broken pfunit directory --- config/cesm/machines/config_compilers.xml | 1 - 1 file changed, 1 deletion(-) diff --git a/config/cesm/machines/config_compilers.xml b/config/cesm/machines/config_compilers.xml index 2c659ea47095..e33594c56ef2 100644 --- a/config/cesm/machines/config_compilers.xml +++ b/config/cesm/machines/config_compilers.xml @@ -701,7 +701,6 @@ using a fortran linker. -lpthread - /home/santos/pFUnit/pFUnit_NAG_3_0 -L/usr/local/nag/lib/NAG_Fortran From a5cfd299eb628ac406bc5ef3baaece6b13f0adc7 Mon Sep 17 00:00:00 2001 From: James Foucar Date: Wed, 26 Apr 2017 13:03:28 -0600 Subject: [PATCH 141/219] Change PET test to fit on desktops --- scripts/lib/update_acme_tests.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/lib/update_acme_tests.py b/scripts/lib/update_acme_tests.py index 7b407ca88d04..e9ff9f4e583f 100644 --- a/scripts/lib/update_acme_tests.py +++ b/scripts/lib/update_acme_tests.py @@ -48,7 +48,7 @@ "ERP.f45_g37_rx1.A", "SMS_D_Ln9.f19_g16_rx1.A", "DAE.f19_f19.A", - "PET.f19_f19.A", + "PET_P32.f19_f19.A", "SMS.T42_T42.S", "PRE.f45_g37_rx1.ADESP") ), From e8212b4c13517a8d1c89d4cd1e41c473f31377f7 Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Wed, 26 Apr 2017 13:18:24 -0600 Subject: [PATCH 142/219] fix query function for --file option and iscompvar --- scripts/Tools/xmlchange | 8 ++++---- scripts/Tools/xmlquery | 14 +++++++++----- 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/scripts/Tools/xmlchange b/scripts/Tools/xmlchange index a5b2ddcefa38..69a97840949e 100755 --- a/scripts/Tools/xmlchange +++ b/scripts/Tools/xmlchange @@ -93,12 +93,12 @@ def xmlchange(caseroot, listofsettings, xmlfile, xmlid, xmlval, subgroup, append, noecho, force, dryrun): with Case(caseroot, read_only=False) as case: - if xmlfile: case.set_file(xmlfile) - else: - env_mach_pes = case.get_env("mach_pes") - env_mach_pes.set_components(case.get_values("COMP_CLASSES")) + + env_mach_pes = case.get_env("mach_pes") + env_mach_pes.set_components(case.get_values("COMP_CLASSES")) + if len(listofsettings): logger.debug("List of attributes to change: %s" , listofsettings) diff --git a/scripts/Tools/xmlquery b/scripts/Tools/xmlquery index ac26bb769553..8a1931ffc789 100755 --- a/scripts/Tools/xmlquery +++ b/scripts/Tools/xmlquery @@ -138,15 +138,21 @@ def xmlquery(case, variables, subgroup=None, fileonly=False, """ results = {} comp_classes = case.get_values("COMP_CLASSES") + if xmlfile: + case.set_file(xmlfile) for var in variables: - if subgroup is not None: + if xmlfile: + groups = ['none'] + elif subgroup is not None: groups = [subgroup] else: groups = case.get_record_fields(var, "group") - if xmlfile and not groups: + + if xmlfile: + value = case.get_value(var, resolved=resolved) results['none'] = {} results['none'][var] = {} - results['none'][var]['value'] = case.get_value(var, resolved=resolved) + results['none'][var]['value'] = value else: expect(groups, " No results found for variable %s"%var) for group in groups: @@ -197,8 +203,6 @@ def _main_func(): # Initialize case ; read in all xml files from caseroot with Case(caseroot) as case: - if xmlfile: - case.set_file(xmlfile) if listall or partial_match: all_variables = sorted(case.get_record_fields(None, "varid")) if partial_match: From 9e01f712e75e1a6e38bd4fbccca3fe93adf381f3 Mon Sep 17 00:00:00 2001 From: Erich L Foster Date: Wed, 26 Apr 2017 13:30:11 -0600 Subject: [PATCH 143/219] Removed Assignment Override and Fixed Some Logic *********1*********2*********3*********4*********5*********6*********7** [ Description of the changes in this Pull Request. It should be enough information for someone not following this development to understand. Lines should be wrapped at about 72 characters. ] Test suite: `scripts_regression_tests.py --fast` and unit tests Test baseline: Test namelist changes: Test status: bit for bit Fixes #839 User interface changes?: add '+=' to namelist Code review: @billhicks, @jgfoucar --- scripts/lib/CIME/namelist.py | 33 +++++++++++++-------------------- 1 file changed, 13 insertions(+), 20 deletions(-) diff --git a/scripts/lib/CIME/namelist.py b/scripts/lib/CIME/namelist.py index 26f70d4f810f..bdc0495231b8 100644 --- a/scripts/lib/CIME/namelist.py +++ b/scripts/lib/CIME/namelist.py @@ -992,7 +992,7 @@ def set_variable_value(self, group_name, variable_name, value, var_size=1): [u'2', u'3'] >>> x.set_variable_value('foo', 'bar', [u'1']) >>> x.get_variable_value('foo', 'bar') - [u'1'] + [u'1', u'3'] >>> x.set_variable_value('foo', 'bazz', [u'3']) >>> x.set_variable_value('Brack', 'baR', [u'4']) >>> x.get_variable_value('foo', 'bazz') @@ -1023,16 +1023,12 @@ def set_variable_value(self, group_name, variable_name, value, var_size=1): if minindex > tlen: self._groups[group_name][variable_name].extend(['']*(minindex-tlen-1)) - # only replace items which are in index notation - if FORTRAN_NAME_REGEX.search(original_var).group(2) is not None: - for i in range(minindex, maxindex+2*step, step): - while len(self._groups[group_name][variable_name]) < i: - self._groups[group_name][variable_name].append('') - self._groups[group_name][variable_name][i-1] = value.pop(0) - if len(value) == 0: - break - else: - self._groups[group_name][variable_name] = value + for i in range(minindex, maxindex+2*step, step): + while len(self._groups[group_name][variable_name]) < i: + self._groups[group_name][variable_name].append('') + self._groups[group_name][variable_name][i-1] = value.pop(0) + if len(value) == 0: + break def delete_variable(self, group_name, variable_name): @@ -1454,9 +1450,6 @@ def _expect_char(self, chars): Does not return anything, but raises a `_NamelistParseError` if `chars` does not contain the character at the current position. - The RETURN optional is used to allow for checking of consecutive - characters such as '+=' - >>> x = _NamelistParser('ab') >>> x._expect_char('a') >>> x._advance() @@ -2110,10 +2103,10 @@ def _parse_namelist_group(self): if self._groupless: if name in self._settings: dsettings = self._settings[name] + if addto: + values = self._settings[name] + values if not addto: values = merge_literal_lists(dsettings, values) - else: - values = self._settings[name] + values self._settings[name] = values else: group = self._settings[group_name] @@ -2146,10 +2139,6 @@ def parse_namelist(self): OrderedDict([(u'foo', [u"'bar'", u"'bazz'", u'']), (u'foo2', [u'2*5', u'6'])]) >>> _NamelistParser("!blah \n foo='bar'", groupless=True).parse_namelist() OrderedDict([(u'foo', [u"'bar'"])]) - >>> _NamelistParser("foo='bar', foo='bazz'", groupless=True).parse_namelist() - OrderedDict([(u'foo', [u"'bazz'"])]) - >>> _NamelistParser("foo='bar', foo=", groupless=True).parse_namelist() - OrderedDict([(u'foo', [u"'bar'"])]) >>> _NamelistParser("foo='bar', foo(3)='bazz'", groupless=True).parse_namelist() OrderedDict([(u'foo', [u"'bar'"]), (u'foo(3)', [u"'bazz'"])]) >>> _NamelistParser("foo(2)='bar'", groupless=True).parse_namelist() @@ -2160,6 +2149,10 @@ def parse_namelist(self): OrderedDict([(u'foo', [u"'bazz'"])]) >>> _NamelistParser("foo='bar'\n foo+='bazz'", groupless=True).parse_namelist() OrderedDict([(u'foo', [u"'bar'", u"'bazz'"])]) + >>> _NamelistParser("foo='bar', foo='bazz'", groupless=True).parse_namelist() + OrderedDict([(u'foo', [u"'bazz'"])]) + >>> _NamelistParser("foo='bar', foo=", groupless=True).parse_namelist() + OrderedDict([(u'foo', [u"'bar'"])]) >>> _NamelistParser("foo='bar', 'bazz'\n foo+='ban'", groupless=True).parse_namelist() OrderedDict([(u'foo', [u"'bar'", u"'bazz'", u"'ban'"])]) """ From 39289cc9e7653a3f566f5086707d738a79cbdf84 Mon Sep 17 00:00:00 2001 From: Bill Sacks Date: Wed, 26 Apr 2017 15:00:13 -0600 Subject: [PATCH 144/219] Move tools/unit_testing to scripts/fortran_unit_testing Rationale: tools are supposed to be optional tools, whereas the Fortran unit tests are a more required part of the testing workflow. --- .../Examples/circle_area/src/CMakeLists.txt | 0 .../fortran_unit_testing}/Examples/circle_area/src/circle.F90 | 0 .../Examples/circle_area/tests/CMakeLists.txt | 0 .../Examples/circle_area/tests/CTest/CMakeLists.txt | 0 .../Examples/circle_area/tests/CTest/test_driver.F90 | 0 .../Examples/circle_area/tests/pFUnit/CMakeLists.txt | 0 .../Examples/circle_area/tests/pFUnit/test_circle.pf | 0 .../Examples/interpolate_1d/src/CMakeLists.txt | 0 .../Examples/interpolate_1d/src/interpolate_1d.F90 | 0 .../Examples/interpolate_1d/tests/CMakeLists.txt | 0 .../Examples/interpolate_1d/tests/CTest/CMakeLists.txt | 0 .../Examples/interpolate_1d/tests/CTest/test_driver.F90 | 0 .../Examples/interpolate_1d/tests/pFUnit/CMakeLists.txt | 0 .../interpolate_1d/tests/pFUnit/test_interpolate_point.pf | 0 .../fortran_unit_testing}/Examples/test_list.xml | 0 {tools/unit_testing => scripts/fortran_unit_testing}/README | 0 .../fortran_unit_testing}/python/.gitignore | 0 .../fortran_unit_testing}/python/comparable.py | 0 .../fortran_unit_testing}/python/environment.py | 0 .../fortran_unit_testing}/python/machine_setup.py | 0 .../fortran_unit_testing}/python/printer.py | 0 .../fortran_unit_testing}/python/test_environment.py | 0 .../fortran_unit_testing}/python/test_xml_test_list.py | 0 .../fortran_unit_testing}/python/test_xml_utils.py | 0 .../fortran_unit_testing}/python/xml_test_list.py | 0 .../fortran_unit_testing}/python/xml_utils.py | 0 .../fortran_unit_testing}/run_tests.py | 2 +- scripts/lib/CIME/code_checker.py | 2 +- scripts/tests/scripts_regression_tests.py | 4 ++-- 29 files changed, 4 insertions(+), 4 deletions(-) rename {tools/unit_testing => scripts/fortran_unit_testing}/Examples/circle_area/src/CMakeLists.txt (100%) rename {tools/unit_testing => scripts/fortran_unit_testing}/Examples/circle_area/src/circle.F90 (100%) rename {tools/unit_testing => scripts/fortran_unit_testing}/Examples/circle_area/tests/CMakeLists.txt (100%) rename {tools/unit_testing => scripts/fortran_unit_testing}/Examples/circle_area/tests/CTest/CMakeLists.txt (100%) rename {tools/unit_testing => scripts/fortran_unit_testing}/Examples/circle_area/tests/CTest/test_driver.F90 (100%) rename {tools/unit_testing => scripts/fortran_unit_testing}/Examples/circle_area/tests/pFUnit/CMakeLists.txt (100%) rename {tools/unit_testing => scripts/fortran_unit_testing}/Examples/circle_area/tests/pFUnit/test_circle.pf (100%) rename {tools/unit_testing => scripts/fortran_unit_testing}/Examples/interpolate_1d/src/CMakeLists.txt (100%) rename {tools/unit_testing => scripts/fortran_unit_testing}/Examples/interpolate_1d/src/interpolate_1d.F90 (100%) rename {tools/unit_testing => scripts/fortran_unit_testing}/Examples/interpolate_1d/tests/CMakeLists.txt (100%) rename {tools/unit_testing => scripts/fortran_unit_testing}/Examples/interpolate_1d/tests/CTest/CMakeLists.txt (100%) rename {tools/unit_testing => scripts/fortran_unit_testing}/Examples/interpolate_1d/tests/CTest/test_driver.F90 (100%) rename {tools/unit_testing => scripts/fortran_unit_testing}/Examples/interpolate_1d/tests/pFUnit/CMakeLists.txt (100%) rename {tools/unit_testing => scripts/fortran_unit_testing}/Examples/interpolate_1d/tests/pFUnit/test_interpolate_point.pf (100%) rename {tools/unit_testing => scripts/fortran_unit_testing}/Examples/test_list.xml (100%) rename {tools/unit_testing => scripts/fortran_unit_testing}/README (100%) rename {tools/unit_testing => scripts/fortran_unit_testing}/python/.gitignore (100%) rename {tools/unit_testing => scripts/fortran_unit_testing}/python/comparable.py (100%) rename {tools/unit_testing => scripts/fortran_unit_testing}/python/environment.py (100%) rename {tools/unit_testing => scripts/fortran_unit_testing}/python/machine_setup.py (100%) rename {tools/unit_testing => scripts/fortran_unit_testing}/python/printer.py (100%) rename {tools/unit_testing => scripts/fortran_unit_testing}/python/test_environment.py (100%) rename {tools/unit_testing => scripts/fortran_unit_testing}/python/test_xml_test_list.py (100%) rename {tools/unit_testing => scripts/fortran_unit_testing}/python/test_xml_utils.py (100%) rename {tools/unit_testing => scripts/fortran_unit_testing}/python/xml_test_list.py (100%) rename {tools/unit_testing => scripts/fortran_unit_testing}/python/xml_utils.py (100%) rename {tools/unit_testing => scripts/fortran_unit_testing}/run_tests.py (99%) diff --git a/tools/unit_testing/Examples/circle_area/src/CMakeLists.txt b/scripts/fortran_unit_testing/Examples/circle_area/src/CMakeLists.txt similarity index 100% rename from tools/unit_testing/Examples/circle_area/src/CMakeLists.txt rename to scripts/fortran_unit_testing/Examples/circle_area/src/CMakeLists.txt diff --git a/tools/unit_testing/Examples/circle_area/src/circle.F90 b/scripts/fortran_unit_testing/Examples/circle_area/src/circle.F90 similarity index 100% rename from tools/unit_testing/Examples/circle_area/src/circle.F90 rename to scripts/fortran_unit_testing/Examples/circle_area/src/circle.F90 diff --git a/tools/unit_testing/Examples/circle_area/tests/CMakeLists.txt b/scripts/fortran_unit_testing/Examples/circle_area/tests/CMakeLists.txt similarity index 100% rename from tools/unit_testing/Examples/circle_area/tests/CMakeLists.txt rename to scripts/fortran_unit_testing/Examples/circle_area/tests/CMakeLists.txt diff --git a/tools/unit_testing/Examples/circle_area/tests/CTest/CMakeLists.txt b/scripts/fortran_unit_testing/Examples/circle_area/tests/CTest/CMakeLists.txt similarity index 100% rename from tools/unit_testing/Examples/circle_area/tests/CTest/CMakeLists.txt rename to scripts/fortran_unit_testing/Examples/circle_area/tests/CTest/CMakeLists.txt diff --git a/tools/unit_testing/Examples/circle_area/tests/CTest/test_driver.F90 b/scripts/fortran_unit_testing/Examples/circle_area/tests/CTest/test_driver.F90 similarity index 100% rename from tools/unit_testing/Examples/circle_area/tests/CTest/test_driver.F90 rename to scripts/fortran_unit_testing/Examples/circle_area/tests/CTest/test_driver.F90 diff --git a/tools/unit_testing/Examples/circle_area/tests/pFUnit/CMakeLists.txt b/scripts/fortran_unit_testing/Examples/circle_area/tests/pFUnit/CMakeLists.txt similarity index 100% rename from tools/unit_testing/Examples/circle_area/tests/pFUnit/CMakeLists.txt rename to scripts/fortran_unit_testing/Examples/circle_area/tests/pFUnit/CMakeLists.txt diff --git a/tools/unit_testing/Examples/circle_area/tests/pFUnit/test_circle.pf b/scripts/fortran_unit_testing/Examples/circle_area/tests/pFUnit/test_circle.pf similarity index 100% rename from tools/unit_testing/Examples/circle_area/tests/pFUnit/test_circle.pf rename to scripts/fortran_unit_testing/Examples/circle_area/tests/pFUnit/test_circle.pf diff --git a/tools/unit_testing/Examples/interpolate_1d/src/CMakeLists.txt b/scripts/fortran_unit_testing/Examples/interpolate_1d/src/CMakeLists.txt similarity index 100% rename from tools/unit_testing/Examples/interpolate_1d/src/CMakeLists.txt rename to scripts/fortran_unit_testing/Examples/interpolate_1d/src/CMakeLists.txt diff --git a/tools/unit_testing/Examples/interpolate_1d/src/interpolate_1d.F90 b/scripts/fortran_unit_testing/Examples/interpolate_1d/src/interpolate_1d.F90 similarity index 100% rename from tools/unit_testing/Examples/interpolate_1d/src/interpolate_1d.F90 rename to scripts/fortran_unit_testing/Examples/interpolate_1d/src/interpolate_1d.F90 diff --git a/tools/unit_testing/Examples/interpolate_1d/tests/CMakeLists.txt b/scripts/fortran_unit_testing/Examples/interpolate_1d/tests/CMakeLists.txt similarity index 100% rename from tools/unit_testing/Examples/interpolate_1d/tests/CMakeLists.txt rename to scripts/fortran_unit_testing/Examples/interpolate_1d/tests/CMakeLists.txt diff --git a/tools/unit_testing/Examples/interpolate_1d/tests/CTest/CMakeLists.txt b/scripts/fortran_unit_testing/Examples/interpolate_1d/tests/CTest/CMakeLists.txt similarity index 100% rename from tools/unit_testing/Examples/interpolate_1d/tests/CTest/CMakeLists.txt rename to scripts/fortran_unit_testing/Examples/interpolate_1d/tests/CTest/CMakeLists.txt diff --git a/tools/unit_testing/Examples/interpolate_1d/tests/CTest/test_driver.F90 b/scripts/fortran_unit_testing/Examples/interpolate_1d/tests/CTest/test_driver.F90 similarity index 100% rename from tools/unit_testing/Examples/interpolate_1d/tests/CTest/test_driver.F90 rename to scripts/fortran_unit_testing/Examples/interpolate_1d/tests/CTest/test_driver.F90 diff --git a/tools/unit_testing/Examples/interpolate_1d/tests/pFUnit/CMakeLists.txt b/scripts/fortran_unit_testing/Examples/interpolate_1d/tests/pFUnit/CMakeLists.txt similarity index 100% rename from tools/unit_testing/Examples/interpolate_1d/tests/pFUnit/CMakeLists.txt rename to scripts/fortran_unit_testing/Examples/interpolate_1d/tests/pFUnit/CMakeLists.txt diff --git a/tools/unit_testing/Examples/interpolate_1d/tests/pFUnit/test_interpolate_point.pf b/scripts/fortran_unit_testing/Examples/interpolate_1d/tests/pFUnit/test_interpolate_point.pf similarity index 100% rename from tools/unit_testing/Examples/interpolate_1d/tests/pFUnit/test_interpolate_point.pf rename to scripts/fortran_unit_testing/Examples/interpolate_1d/tests/pFUnit/test_interpolate_point.pf diff --git a/tools/unit_testing/Examples/test_list.xml b/scripts/fortran_unit_testing/Examples/test_list.xml similarity index 100% rename from tools/unit_testing/Examples/test_list.xml rename to scripts/fortran_unit_testing/Examples/test_list.xml diff --git a/tools/unit_testing/README b/scripts/fortran_unit_testing/README similarity index 100% rename from tools/unit_testing/README rename to scripts/fortran_unit_testing/README diff --git a/tools/unit_testing/python/.gitignore b/scripts/fortran_unit_testing/python/.gitignore similarity index 100% rename from tools/unit_testing/python/.gitignore rename to scripts/fortran_unit_testing/python/.gitignore diff --git a/tools/unit_testing/python/comparable.py b/scripts/fortran_unit_testing/python/comparable.py similarity index 100% rename from tools/unit_testing/python/comparable.py rename to scripts/fortran_unit_testing/python/comparable.py diff --git a/tools/unit_testing/python/environment.py b/scripts/fortran_unit_testing/python/environment.py similarity index 100% rename from tools/unit_testing/python/environment.py rename to scripts/fortran_unit_testing/python/environment.py diff --git a/tools/unit_testing/python/machine_setup.py b/scripts/fortran_unit_testing/python/machine_setup.py similarity index 100% rename from tools/unit_testing/python/machine_setup.py rename to scripts/fortran_unit_testing/python/machine_setup.py diff --git a/tools/unit_testing/python/printer.py b/scripts/fortran_unit_testing/python/printer.py similarity index 100% rename from tools/unit_testing/python/printer.py rename to scripts/fortran_unit_testing/python/printer.py diff --git a/tools/unit_testing/python/test_environment.py b/scripts/fortran_unit_testing/python/test_environment.py similarity index 100% rename from tools/unit_testing/python/test_environment.py rename to scripts/fortran_unit_testing/python/test_environment.py diff --git a/tools/unit_testing/python/test_xml_test_list.py b/scripts/fortran_unit_testing/python/test_xml_test_list.py similarity index 100% rename from tools/unit_testing/python/test_xml_test_list.py rename to scripts/fortran_unit_testing/python/test_xml_test_list.py diff --git a/tools/unit_testing/python/test_xml_utils.py b/scripts/fortran_unit_testing/python/test_xml_utils.py similarity index 100% rename from tools/unit_testing/python/test_xml_utils.py rename to scripts/fortran_unit_testing/python/test_xml_utils.py diff --git a/tools/unit_testing/python/xml_test_list.py b/scripts/fortran_unit_testing/python/xml_test_list.py similarity index 100% rename from tools/unit_testing/python/xml_test_list.py rename to scripts/fortran_unit_testing/python/xml_test_list.py diff --git a/tools/unit_testing/python/xml_utils.py b/scripts/fortran_unit_testing/python/xml_utils.py similarity index 100% rename from tools/unit_testing/python/xml_utils.py rename to scripts/fortran_unit_testing/python/xml_utils.py diff --git a/tools/unit_testing/run_tests.py b/scripts/fortran_unit_testing/run_tests.py similarity index 99% rename from tools/unit_testing/run_tests.py rename to scripts/fortran_unit_testing/run_tests.py index 422ef15765fc..f3b0c1deeba0 100755 --- a/tools/unit_testing/run_tests.py +++ b/scripts/fortran_unit_testing/run_tests.py @@ -4,7 +4,7 @@ _CIMEROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../..") sys.path.append(os.path.join(_CIMEROOT, "scripts", "Tools")) sys.path.append(os.path.join(_CIMEROOT, "scripts", "utils", "python")) -sys.path.append(os.path.join(_CIMEROOT, "tools", "unit_testing", "python")) +sys.path.append(os.path.join(_CIMEROOT, "scripts", "fortran_unit_testing", "python")) from standard_script_setup import * from CIME.BuildTools.configure import configure diff --git a/scripts/lib/CIME/code_checker.py b/scripts/lib/CIME/code_checker.py index 2984df3c2fde..9576e3b9c603 100644 --- a/scripts/lib/CIME/code_checker.py +++ b/scripts/lib/CIME/code_checker.py @@ -26,7 +26,7 @@ def _run_pylint(on_file, interactive): cmd_options += " --init-hook='sys.path.extend((\"%s\",\"%s\",\"%s\"))'"%\ (os.path.join(cimeroot,"scripts","lib"), os.path.join(cimeroot,"scripts","Tools"), - os.path.join(cimeroot,"tools","unit_testing","python")) + os.path.join(cimeroot,"scripts","fortran_unit_testing","python")) cmd = "%s %s %s" % (pylint, cmd_options, on_file) logger.debug("pylint command is %s"%cmd) diff --git a/scripts/tests/scripts_regression_tests.py b/scripts/tests/scripts_regression_tests.py index d5e99911a16e..7dac780e46d2 100755 --- a/scripts/tests/scripts_regression_tests.py +++ b/scripts/tests/scripts_regression_tests.py @@ -196,7 +196,7 @@ def test_a_unit_test(self): test_dir = os.path.join(cls._testroot,"unit_tester_test") cls._testdirs.append(test_dir) os.makedirs(test_dir) - unit_test_tool = os.path.abspath(os.path.join(CIME.utils.get_cime_root(),"tools","unit_testing","run_tests.py")) + unit_test_tool = os.path.abspath(os.path.join(CIME.utils.get_cime_root(),"scripts","fortran_unit_testing","run_tests.py")) test_spec_dir = os.path.join(os.path.dirname(unit_test_tool),"Examples", "interpolate_1d", "tests") run_cmd_no_fail("%s --build-dir %s --test-spec-dir %s"\ %(unit_test_tool,test_dir,test_spec_dir)) @@ -214,7 +214,7 @@ def test_b_cime_f90_unit_tests(self): cls._testdirs.append(test_dir) os.makedirs(test_dir) test_spec_dir = CIME.utils.get_cime_root() - unit_test_tool = os.path.abspath(os.path.join(test_spec_dir,"tools","unit_testing","run_tests.py")) + unit_test_tool = os.path.abspath(os.path.join(test_spec_dir,"scripts","fortran_unit_testing","run_tests.py")) run_cmd_no_fail("%s --build-dir %s --test-spec-dir %s"\ %(unit_test_tool,test_dir,test_spec_dir)) From 203b3b8f13411a34ac5bef043a4af423e2c81c3d Mon Sep 17 00:00:00 2001 From: Bill Sacks Date: Wed, 26 Apr 2017 15:03:27 -0600 Subject: [PATCH 145/219] Remove README.unit_testing The relevant information is now in the cime documentation --- README.unit_testing | 15 --------------- 1 file changed, 15 deletions(-) delete mode 100644 README.unit_testing diff --git a/README.unit_testing b/README.unit_testing deleted file mode 100644 index 168e7ed302a2..000000000000 --- a/README.unit_testing +++ /dev/null @@ -1,15 +0,0 @@ -# To run all the CIME Fortran unit tests, run the following command: -# On yellowstone, this requires module load all-python-libs -# -# The creation of a temporary directory ensures that you are doing a completely -# clean build of the unit tests. (The use of the --clean flag to run_tests.py -# cleans most, but not all of the files created by the unit test build.) For -# rerunning the tests after an incremental change, you can instead use an -# existing build directory. -# -# We would encourage you to port these tests to other platforms. -# The test requires an install of pFunit available from -# https://sourceforge.net/projects/pfunit/ -# - -tools/unit_testing/run_tests.py --build-dir `mktemp -d ./unit_tests.XXXXXXXX` From 75ff37682f7a60b62af700197e8bdea3ca246e1f Mon Sep 17 00:00:00 2001 From: Bill Sacks Date: Wed, 26 Apr 2017 15:28:28 -0600 Subject: [PATCH 146/219] Clean up python in fortran_unit_testing directory - Remove dead code (which has been replaced by use of the standard CIME utilities) - Clean up pylint problems --- .../fortran_unit_testing/python/comparable.py | 29 -- .../python/environment.py | 263 ---------------- .../python/machine_setup.py | 296 ------------------ .../python/test_environment.py | 237 -------------- .../python/test_xml_test_list.py | 5 +- .../python/test_xml_utils.py | 294 ----------------- .../python/xml_test_list.py | 5 +- .../fortran_unit_testing/python/xml_utils.py | 202 ------------ scripts/fortran_unit_testing/run_tests.py | 1 - 9 files changed, 5 insertions(+), 1327 deletions(-) delete mode 100644 scripts/fortran_unit_testing/python/comparable.py delete mode 100644 scripts/fortran_unit_testing/python/environment.py delete mode 100644 scripts/fortran_unit_testing/python/machine_setup.py delete mode 100755 scripts/fortran_unit_testing/python/test_environment.py delete mode 100755 scripts/fortran_unit_testing/python/test_xml_utils.py delete mode 100644 scripts/fortran_unit_testing/python/xml_utils.py diff --git a/scripts/fortran_unit_testing/python/comparable.py b/scripts/fortran_unit_testing/python/comparable.py deleted file mode 100644 index 20e52697dba7..000000000000 --- a/scripts/fortran_unit_testing/python/comparable.py +++ /dev/null @@ -1,29 +0,0 @@ -"""Portable implementation of comparisons for total ordering. - -THIS MODULE IS OBSOLETE AS OF PYTHON 2.7. - -This module provides a "Comparable" class for subclassing; it provides the -various "rich comparison" functions (e.g. __le__) based on the subclass's -__eq__ and __lt__ methods (so this is a Template Method pattern). - -Python 2.7 and Python 3 implement a class decorator for this in the -standard libraries (functools.total_ordering), and that decorator is much -more flexible (besides being standard), so this should only be used for -compatibility with older implementations. -""" - -class Comparable(object): - - """Provides rich comparisons for subclasses that have __eq__ and __lt__.""" - - def __le__(self, other): - return (self == other) and (self < other) - - def __gt__(self, other): - return not (self <= other) - - def __ge__(self, other): - return not (self < other) - - def __ne__(self, other): - return not (self == other) diff --git a/scripts/fortran_unit_testing/python/environment.py b/scripts/fortran_unit_testing/python/environment.py deleted file mode 100644 index 62972c6ad574..000000000000 --- a/scripts/fortran_unit_testing/python/environment.py +++ /dev/null @@ -1,263 +0,0 @@ -"""Produce commands for environment module systems. - -Public classes: -EnvSystemInterface - Abstract base class for interfaces. -NoModuleInterface - Can be constructed, but raises exception if used. -ModuleInterface - For standard module systems (including Lmod). -SoftEnvInterface - For SoftEnv, the ANL MCS environment system. - -Public routines: -expand_env - Expand shell variables (like "${FOO}"). -""" - -import os -import re -import subprocess - -__all__ = ("EnvSystemInterface", "NoModuleInterface", "ModuleInterface", - "SoftEnvInterface", "expand_env") - -class EnvSystemInterface(object): - - """Abstract base class for environment system interfaces. - - This class exists only to document the interface of subclasses, and to - provide error messages if an unimplemented function is called by - accident. - - Normally, none of these methods should actually be called. All of them - raise NotImplementedError. - - Public methods: - is_loaded - purge - load - unload - purge_str - load_str - unload_str - list - """ - - @classmethod - def _raise_not_implemented(cls, method_name): - raise NotImplementedError( - cls.__name__ +" does not implement method "+method_name+".") - - def is_loaded(self, modname): - """Method not implemented.""" - self._raise_not_implemented("is_loaded") - - def purge(self): - """Method not implemented.""" - self._raise_not_implemented("purge") - - def list(self): - """Method not implemented.""" - self._raise_not_implemented("list") - - def load(self, modname): - """Method not implemented.""" - self._raise_not_implemented("load") - - def unload(self, modname): - """Method not implemented.""" - self._raise_not_implemented("unload") - - def purge_str(self): - """Method not implemented.""" - self._raise_not_implemented("purge_str") - - def load_str(self, modname): - """Method not implemented.""" - self._raise_not_implemented("load_str") - - def unload_str(self, modname): - """Method not implemented.""" - self._raise_not_implemented("unload_str") - - -class NoModuleInterface(EnvSystemInterface): - - """Module interface for systems with no module system. - - The purpose of this class is to allow code to construct a module - interface regardless of system, while still raising an error if the - user attempts to actually interact with the non-existent interface. - - As a result, only purge_str is implemented. - - Public methods: - purge_str - Returns null command. - """ - - def purge_str(self): - """Returns ":", the null shell command.""" - return ":" - - -class ModuleInterface(EnvSystemInterface): - - """Module interface for systems with a typical module system. - - Class methods: - python_init - Initialize python interface. - - Public methods: - is_loaded - Tests if a module is currently loaded. - purge - Purge all modules from environment. - load - Load a module. - unload - Unload a module. - list - list currently loaded modules. - purge_str - Return command to purge modules. - load_str - Return command to load the given module. - unload_str - Return command to unload the given module. - """ - - # Singleton declaring whether or not we have the module interface - # started up yet. - _python_initialized = False - - # Magic string to grep for in "module list" output to determine whether - # or not a module is present. - _not_loaded_string = "None found" - - @classmethod - def python_init(cls, filename): - """Initialize python to module system interface. - - This must be called before any commands that are intended to - modify python's environment. The *_str methods can still be called - without this. Only the first call has any effect. - - Arguments: - filename - Python file to execute to load the interface. - """ - if not cls._python_initialized: - execfile(filename, globals()) - cls._python_initialized = True - - def is_loaded(self, modname): - """"Whether the module is loaded in the current environment.""" - # This assertion message isn't quite true, but this behavior is - # convenient for testing purposes, which is why is_loaded exists. - assert self._python_initialized, \ - "Can't test modules without initializing the python interface!" - process = subprocess.Popen("module list "+modname, - shell=True, - env=os.environ, - stderr=subprocess.PIPE) - stderr_output = process.communicate()[1] - # Handle unexpected error in subprocess. - if process.returncode not in (0,1): - raise Exception("module list command failed with code: "+ - str(process.returncode)) - # Hack! Assume that process.returncode == 1 means that no modules - # are loaded, rather than some other possible error. - return process.returncode != 1 and \ - re.search(self._not_loaded_string, stderr_output) is None - - def purge(self): - """"Purge all modules from the current environment.""" - assert self._python_initialized, \ - "Can't purge modules without initializing the python interface!" - module("purge") - - def list(self): - """"List all modules from the current environment.""" - assert self._python_initialized, \ - "Can't list modules without initializing the python interface!" - module("list") - - def load(self, modname): - """"Load a module in the current environment.""" - assert self._python_initialized, \ - "Can't load modules without initializing the python interface!" - module("load", modname) - - def unload(self, modname): - """"Unload a module from the current environment.""" - assert self._python_initialized, \ - "Can't unload modules without initializing the python interface!" - module("unload", modname) - - def purge_str(self): - """Returns the shell command to purge modules as a string.""" - return "module purge" - - def load_str(self, modname): - """Returns the shell command to load the module as a string.""" - return "module load "+modname - - def unload_str(self, modname): - """Returns the shell command to unload the module as a string.""" - return "module unload "+modname - - -class SoftEnvInterface(EnvSystemInterface): - - """Module interface for systems with SoftEnv. - - Public methods: - purge_str - Return command to reset SoftEnv. - load_str - Return command to add the given keyword/macro. - unload_str - Return command to delete the given keyword/macro. - """ - - def purge_str(self): - """Returns the shell command to reset SoftEnv as a string. - - Unfortunately, there's not a straightforward way to clear - everything, so right now we just run "resoft". If it becomes - necessary in the future, this may change to do something else (e.g. - resoft using a custom file with only default settings). - """ - return "resoft" - - def load_str(self, modname): - """Returns the shell command to add the keyword as a string.""" - return "soft add "+modname - - def unload_str(self, modname): - """Returns the shell command to delete the keyword as a string.""" - return "soft delete "+modname - - -# Regex that matches an environment variable reference, putting the name in -# the "name" group of the match. -_env_re = re.compile( - """\$ # Initial "$" - (?P\{)? # Open brace if present - (?P[A-Za-z0-9_]+) # Variable name - (?(brace)\}) # Close brace if necessary""", re.X) - - -def expand_env(string, env): - """Expand a shell string with given environment. - - Arguments: - string - String to expand as if in a shell. - env - dict specifying environment variables. - - This is very limited; right now it only handles variable substitution, - and only with "${FOO}" or "$FOO" style syntax. The variable name must - contain only alphanumeric characters and "_". There is no way to escape - a "$". - - Expansion is recursive; the output of this function is its fixed point. - """ - def expand_func(match): - """Given an environment variable match, return replacement text.""" - var_name = match.group("name") - if var_name in env: - return env[var_name] - else: - # If there's no match in the environment, return original text. - return match.group(0) - old_string = "" - new_string = string - while new_string != old_string: - old_string = new_string - new_string = _env_re.sub(expand_func, new_string) - - return new_string diff --git a/scripts/fortran_unit_testing/python/machine_setup.py b/scripts/fortran_unit_testing/python/machine_setup.py deleted file mode 100644 index 1059beca258c..000000000000 --- a/scripts/fortran_unit_testing/python/machine_setup.py +++ /dev/null @@ -1,296 +0,0 @@ -"""Functions to set up machine-specific settings. - -Currently, this module is not under unit test, because it interacts too -directly with the OS of the machine it is on. Any changes to address this -would be welcome (this would likely require the use of mock modules, -and/or a specific set of machines to test on). - -Public classes: -MachineCompilerSettings - Set up machine/compiler specific environment. -""" - -# Python 3 compatible printing in Python 2. -from __future__ import print_function - -from itertools import chain -from os import environ -import os.path -import platform -import re -import subprocess -from xml.etree.ElementTree import ElementTree - -import environment as ENV -from printer import ScriptPrinter -from xml_utils import best_match, all_matches -__all__ = ("MachineCompilerSettings") - - -def get_machine_name(): - """Returns a canonical version of the machine name.""" - name = platform.node() - # Strip everything after the first ".", and whitespace. - name = name.split(".")[0].strip() - if re.match("^yslogin[0-9]+", name): - name = "yellowstone" - elif re.match("^miralac[0-9]+", name): - name = "mira" - elif re.match("^cetuslac[0-9]+", name): - name = "mira" - elif re.match("^caldera.*", name) or re.match("^pronghorn.*", name): - # Use yellowstone settings for caldera/pronghorn, since the mahcines - # files aren't set up explicitly for those machines, and they have the - # same configuration as yellowstone. - name = "yellowstone" - return name - -def load_machine_env(compiler): - """Add machine environment variables not in config_compilers.xml - - Besides simply setting variables, this may also load some modules. - """ - - mach = get_machine_name() - - if mach == "yellowstone": - mod = ENV.ModuleInterface() - mod.python_init("/glade/apps/opt/lmod/lmod/init/env_modules_python.py") - mod.purge() - mod.load("ncarenv/1.0") - mod.load("ncarbinlibs/1.0") - if compiler == "intel": - mod.load("intel/15.0.1") - mod.load("mkl/11.1.2") - elif compiler == "pgi": - mod.load("pgi/13.9") - mod.load("ncarcompilers/1.0") - mod.load("cmake/2.8.10.2") - mod.load("netcdf/4.3.0") - - -class MachineCompilerSettings(object): - - """Encapsulate machine settings and set environment from them. - - Public methods: - __init__ - Discover information about local machine. - compiler_xml_to_env - Set a specific variable using config_compilers. - set_compiler_env - Set up machine-specific environment. - write_cmake_macros - Create CMake "Macros" file. - """ - - def __init__(self, compiler, compiler_xml_path, - machine=None, - use_env_compiler=False, - mpilib=None, use_openmp=False): - """Discover information about the machine and compiler. - - Arguments: - compiler - String naming the compiler vendor. - compiler_xml_path - Path to config_compilers.xml file. - machine - String naming the machine (guessed if not provided) - use_env_compiler - Force use of environment variable FC instead of - using the compiler in config_compilers.xml. - mpilib - String naming the mpi library if any. - use_openmp - Boolean option to use OpenMP compiler flags. - Currently only works for CESM/CESM_DEBUG builds. - """ - if machine is None: - machine = get_machine_name() - self.machine_dict = { - "COMPILER": compiler, - "MACH": machine, - "OS": platform.system(), - "compile_threaded": "true" if use_openmp else "false", - } - if mpilib: - self.machine_dict["MPILIB"] = mpilib - else: - self.machine_dict["MPILIB"] = 'mpi-serial' - - self.compiler_xml_tree = ElementTree() - self.compiler_xml_tree.parse(compiler_xml_path) - self.use_env_compiler = use_env_compiler - self.mpilib = mpilib - - def compiler_xml_to_env(self, xml_path, var_name): - """Look up a config_compilers entry and set a variable from it. - - Arguments: - xml_path - Path within the xml file (e.g. "compiler/SFC"). - var_name - Name of environment variable (e.g. "FC"). - """ - match = best_match(self.compiler_xml_tree, xml_path, - self.machine_dict) - assert match is not None, "Could not determine "+var_name+ \ - " from compiler/machines xml file." - environ[var_name] = match.text.strip() - - def add_path(self, name, macros_printer): - match = best_match(self.compiler_xml_tree, "compiler/"+name+"_PATH", - self.machine_dict) - if match is not None: - macros_printer.print_header(name + " location.") - libpath = match.text - _make_env_re = re.compile( - """\$\( # Initial "$" and brace - (?P[A-Za-z0-9_]+) # Variable name - \) # Close brace""", re.X) - libpath = _make_env_re.sub( "$ENV{\g}",libpath) - print("libpath = "+libpath) - macros_printer.print( - "list(APPEND CMAKE_PREFIX_PATH "+libpath+")" - ) - - def set_compiler_env(self): - """Set up the environment for this machine and compiler.""" - if not self.use_env_compiler: - if (self.mpilib == "mpi-serial" or self.mpilib is None): - self.compiler_xml_to_env("compiler/SFC", "FC") - self.compiler_xml_to_env("compiler/SCC", "CC") - else: - self.compiler_xml_to_env("compiler/MPIFC", "FC") - self.compiler_xml_to_env("compiler/MPICC", "CC") - load_machine_env(self.machine_dict["COMPILER"]) - - def write_cmake_macros(self, macros_file, model="CESM"): - """Write CMake macros file using config_compilers.xml - - Arguments: - macros_file - File object to write to. - """ - - # Print header to file. - macros_printer = ScriptPrinter(macros_file) - header_lines = [model+ - " build flags for:", - " Compiler = "+self.machine_dict["COMPILER"], - " Machine = "+self.machine_dict["MACH"], - " OS = "+self.machine_dict["OS"], - " MPILIB = "+self.machine_dict["MPILIB"], - ] - - for line in header_lines: - macros_printer.comment(line) - - macros_printer.print("include(Compilers)") - macros_printer.print( - "set(CMAKE_Fortran_FLAGS_"+model+" \"\" CACHE STRING \"Flags used by the Fortran compiler during builds.\" FORCE)") - macros_printer.print( - "set(CMAKE_Fortran_FLAGS_"+model+"_DEBUG \"\" CACHE STRING \"Flags used by the Fortran compiler during DEBUG builds.\" FORCE)") - - - macros_printer.print( - "set(CMAKE_C_FLAGS_"+model+" \"\" CACHE STRING \"Flags used by the C compiler during builds.\" FORCE)") - macros_printer.print( - "set(CMAKE_C_FLAGS_"+model+"_DEBUG \"\" CACHE STRING \"Flags used by the C compiler during DEBUG builds.\" FORCE)") - - macros_printer.print( - "mark_as_advanced(CMAKE_Fortran_FLAGS_"+model+" CMAKE_Fortran_FLAGS_" - +model+"_DEBUG)") - macros_printer.print( - "mark_as_advanced(CMAKE_C_FLAGS_"+model+" CMAKE_C_FLAGS_"+model+"_DEBUG)") - macros_printer.print( - "set(all_build_types \"None Debug Release RelWithDebInfo MinSizeRel "+model+ - " "+model+"_DEBUG\")") - macros_printer.print( - "set(CMAKE_BUILD_TYPE \"${CMAKE_BUILD_TYPE}\" CACHE STRING \"Choose the type of build, options are: ${all_build_types}.\" FORCE)") - - - - - # pFUnit location if it exists. - self.add_path("PFUNIT",macros_printer) - # NETCDF location if it exists. - self.add_path("NETCDF",macros_printer) - # PNETCDF location if it exists. - self.add_path("PNETCDF", macros_printer) - # HDF5 location if it exists. - self.add_path("HDF5", macros_printer) - # MPI location if it exists. - self.add_path("MPI", macros_printer) - # PETSc location if it exists. - self.add_path("PETSC", macros_printer) - # TRILINOS location if it exists. - self.add_path("TRILINOS", macros_printer) - # ALBANY location if it exists. - self.add_path("ALBANY", macros_printer) - - - # Normal and debug dictionaries for looking things up in - # config_compilers. - normal_dict = self.machine_dict.copy() - normal_dict["DEBUG"] = "FALSE" - - debug_dict = self.machine_dict.copy() - debug_dict["DEBUG"] = "TRUE" - - def add_formatted_flags(flags_name, format): - """Print CMake flags using macros_printer. - - Arguments: - flags_name - Name to search for in config_compilers. - format - Function that takes a build type and flag match, and - returns the string to print out. - """ - - paths = ["compiler/"+flags_name, "compiler/ADD_"+flags_name] - - # This creates an iterable over elements in config_compilers - # that match in non-debug mode. - normal_matches = chain.from_iterable( - all_matches(self.compiler_xml_tree, path, normal_dict) - for path in paths - ) - for match in normal_matches: - macros_printer.print(format(model, match.text)) - - # Now the same for debug mode. - debug_matches = chain.from_iterable( - all_matches(self.compiler_xml_tree, path, debug_dict) - for path in paths - ) - for match in debug_matches: - macros_printer.print(format(model+"_DEBUG", match.text)) - - # Below, we just use a bunch of lambda functions to describe how - # the build type and a matching element (e.g. an FFLAGS entry) are - # turned into a CMake function call. - - macros_printer.print_header("CPP definitions.") - add_formatted_flags( - "CPPDEFS", - lambda b, m: "add_config_definitions("+b+" "+m+")" - ) - def format_contiguous(build_type, match): - comma = "," if self.machine_dict["COMPILER"] != "ibm" else "\\\," - contig_def = "contiguous"+comma if match == "TRUE" else "" - return "add_config_definitions("+build_type+\ - " -DUSE_CONTIGUOUS="+contig_def+")" - add_formatted_flags( - "HAS_F2008_CONTIGUOUS", - format_contiguous - ) - - macros_printer.print_header("Fortran flags.") - add_formatted_flags( - "FFLAGS", - lambda b, m: "add_flags(CMAKE_Fortran_FLAGS_"+b+" "+m+")" - ) - - macros_printer.print_header("C flags.") - add_formatted_flags( - "CFLAGS", - lambda b, m: "add_flags(CMAKE_C_FLAGS_"+b+" "+m+")" - ) - - macros_printer.print_header("Linker flags.") - add_formatted_flags( - "LDFLAGS", - lambda b, m: "add_flags(CMAKE_EXE_LINKER_FLAGS_"+b+" "+m+")" - ) - macros_printer.print_header("External library flags.") - add_formatted_flags( - "SLIBS", - lambda b, m: "add_flags(CMAKE_EXE_LINKER_FLAGS_"+b+" "+m+")" - ) diff --git a/scripts/fortran_unit_testing/python/test_environment.py b/scripts/fortran_unit_testing/python/test_environment.py deleted file mode 100755 index a7cbaa731024..000000000000 --- a/scripts/fortran_unit_testing/python/test_environment.py +++ /dev/null @@ -1,237 +0,0 @@ -#!/usr/bin/env python -"""Unit tests for the environment module. - -Public classes: -TestEnvSys - Test the EnvSystemInterface abstract base class. -TestNoMod - NoModuleInterface tests. -TestMod - ModuleInterface tests. -TestSoft - SoftEnvInterface tests. -TestExpandEnv - expand_env tests. - -To test the actual module system, you must specify these environment -variables when running the test: -MODULE_SYSTEM - Type of module system available locally. -TEST_MODULE - Name of a module which is *not* loaded, to use for testing - purposes. -MODULE_FILE - File that must be executed to load the interface to python - (if any). -""" - -from os import environ -import unittest - -from environment import * - -__all__ = ("TestEnvSys", "TestNoMod", "TestMod", "TestSoft", - "TestExpandEnv") - -# Test data gleaned from the environment. -module_system = "none" -test_module = "foo" -module_file = None -if "MODULE_SYSTEM" in environ: - module_system = environ["MODULE_SYSTEM"] - assert "TEST_MODULE" in environ, \ - "MODULE_SYSTEM is set, but TEST_MODULE is not defined." - test_module = environ["TEST_MODULE"] - if module_system == "module": - assert "MODULE_FILE" in environ, \ - "MODULE_SYSTEM is module, but MODULE_FILE is not defined." - module_file = environ["MODULE_FILE"] - - -class TestEnvSys(unittest.TestCase): - - """Tests for the EnvSystemInterface class. - - This tests that each method is NotImplemented. Other test classes in - this module inherit from this one in order to detect when a method has - been implemented but no test is defined for the new method. - - However, figuring out which class causes the error is a bit of a - guessing game. Addressing that issue is difficult in Python 2.6 because - of how assertRaises works. - """ - - test_class = EnvSystemInterface - - def setUp(self): - """Set up by creating an instance of the class under test.""" - self.test_obj = self.test_class() - - def test_is_loaded(self): - """is_loaded raises NotImplementedError.""" - self.assertRaises(NotImplementedError, - self.test_obj.is_loaded, test_module) - - def test_purge(self): - """purge raises NotImplementedError.""" - self.assertRaises(NotImplementedError, - self.test_obj.purge) - - def test_load(self): - """load raises NotImplementedError.""" - self.assertRaises(NotImplementedError, - self.test_obj.load, test_module) - - def test_purge_str(self): - """purge_str raises NotImplementedError.""" - self.assertRaises(NotImplementedError, - self.test_obj.purge_str) - - def test_load_str(self): - """load_str raises NotImplementedError.""" - self.assertRaises(NotImplementedError, - self.test_obj.load_str, test_module) - - def test_unload_str(self): - """unload_str raises NotImplementedError.""" - self.assertRaises(NotImplementedError, - self.test_obj.unload_str, test_module) - - -class TestNoMod(TestEnvSys): - - """Tests for the NoModuleInterface class.""" - - test_class = NoModuleInterface - - def test_purge_str(self): - """User can call NoModuleInterface.purge_str and get a ":".""" - self.assertEqual(self.test_obj.purge_str(), ":") - - -class TestMod(TestEnvSys): - - """Tests for the ModuleInterface class. - - It's not easy to verify that this behaves correctly, so in many cases - we do some kind of check for self consistency only. If module_system - is not "module", then most of the tests look for an exception. - """ - - test_class = ModuleInterface - - def setUp(self): - """Set up module system if present, then call parent setUp.""" - if module_system == "module": - ModuleInterface.python_init(module_file) - super(TestMod, self).setUp() - - def tearDown(self): - """Remove the test module from the current environment.""" - if module_system == "module": - self.test_obj.unload(test_module) - - def test_is_loaded(self): - """ModuleInterface.is_loaded returns false for unloaded module.""" - if module_system == "module": - self.assertFalse(self.test_obj.is_loaded(test_module)) - else: - self.assertRaises(AssertionError, - self.test_obj.is_loaded, test_module) - - def test_purge(self): - """ModuleInterface.purge removes the test module.""" - if module_system == "module": - self.test_obj.load(test_module) - self.test_obj.purge() - self.assertFalse(self.test_obj.is_loaded(test_module)) - else: - self.assertRaises(AssertionError, - self.test_obj.purge) - - def test_load(self): - """ModuleInterface.load loads a module.""" - if module_system == "module": - self.test_obj.load(test_module) - self.assertTrue(self.test_obj.is_loaded(test_module)) - else: - self.assertRaises(AssertionError, - self.test_obj.load, test_module) - - def test_unload(self): - """ModuleInterface.unload unloads a module.""" - if module_system == "module": - self.test_obj.load(test_module) - self.test_obj.unload(test_module) - self.assertFalse(self.test_obj.is_loaded(test_module)) - else: - self.assertRaises(AssertionError, - self.test_obj.unload, test_module) - - def test_purge_str(self): - """User gets non-null string from ModuleInterface.purge_str.""" - self.assertNotEqual(self.test_obj.purge_str().strip(), - "") - - def test_load_str(self): - """User gets non-null string from ModuleInterface.load_str.""" - self.assertNotEqual(self.test_obj.load_str(test_module).strip(), - "") - - def test_unload_str(self): - """User gets non-null string from ModuleInterface.unload_str.""" - self.assertNotEqual(self.test_obj.unload_str(test_module).strip(), - "") - - -class TestSoft(TestEnvSys): - - """Tests for the SoftEnvInterface class. - - It's not easy to verify that this behaves correctly without a specific - test machine in mind. As with the module tests, we really just verify - that no exceptions are raised and the return isn't the null string. - """ - - test_class = SoftEnvInterface - - def test_purge_str(self): - """User gets non-null string from SoftEnvInterface.purge_str.""" - self.assertNotEqual(self.test_obj.purge_str().strip(), "") - - def test_load_str(self): - """User gets non-null string from SoftEnvInterface.load_str.""" - self.assertNotEqual(self.test_obj.load_str(test_module).strip(), - "") - - def test_unload_str(self): - """User gets non-null string from SoftEnvInterface.unload_str.""" - self.assertNotEqual(self.test_obj.unload_str(test_module).strip(), - "") - - -class TestExpandEnv(unittest.TestCase): - - """Tests for the expand_env function.""" - - def test_no_variable(self): - """With no variables, expand_env is a no-op.""" - self.assertEqual(expand_env("foo", {"UNUSED": "not used"}), "foo") - - def test_variable_missing(self): - """With variables not present, expand_env is a no-op.""" - self.assertEqual(expand_env("${NOT_HERE}", {}), "${NOT_HERE}") - - def test_brace_expansion(self): - """Test that an expansion works with curly braces.""" - self.assertEqual(expand_env("${FOO}", {"FOO": "bar"}), "bar") - - def test_bare_expansion(self): - """Test that an expansion works with no braces.""" - self.assertEqual(expand_env("$FOO", {"FOO": "bar"}), "bar") - - def test_recursive_expansion(self): - """Test that expansion is done recursively.""" - self.assertEqual(expand_env("${FOO}", - {"FOO": "${FOO2}", "FOO2": "bar"}), - "bar") - - def test_brace_closing(self): - """Test that braces must be closed for expansion to occur.""" - self.assertEqual(expand_env("${FOO", {"FOO": "bar"}), "${FOO") - - -if __name__ == "__main__": - unittest.main() diff --git a/scripts/fortran_unit_testing/python/test_xml_test_list.py b/scripts/fortran_unit_testing/python/test_xml_test_list.py index 2aecd4d83987..565f12d89f6a 100755 --- a/scripts/fortran_unit_testing/python/test_xml_test_list.py +++ b/scripts/fortran_unit_testing/python/test_xml_test_list.py @@ -58,7 +58,7 @@ class TestSuitesFromXML(unittest.TestCase): """Tests for the suites_from_xml function.""" def check_spec_list(self, xml_str, names, directories, - known_paths={}, labels=None): + known_paths=None, labels=None): """Check that a spec list matches input names and directories. This is used by the following tests to do the dirty work of making @@ -129,7 +129,6 @@ def test_multiple_suites(self): def test_path_relative_to_known(self): """suites_from_xml handles a relative_to directory attribute.""" - from os.path import abspath xml_str = """ @@ -145,7 +144,6 @@ def test_path_relative_to_known(self): def test_path_with_whitespace(self): """suites_from_xml handles a directory with whitespace added.""" - from os.path import abspath xml_str = """ @@ -160,7 +158,6 @@ def test_path_with_whitespace(self): def test_path_with_label(self): """suites_from_xml handles a directory with a label correctly.""" - from os.path import abspath xml_str = """ diff --git a/scripts/fortran_unit_testing/python/test_xml_utils.py b/scripts/fortran_unit_testing/python/test_xml_utils.py deleted file mode 100755 index f05a6bf72f1a..000000000000 --- a/scripts/fortran_unit_testing/python/test_xml_utils.py +++ /dev/null @@ -1,294 +0,0 @@ -#!/usr/bin/env python -"""Unit tests for the xml_utils module. - -Public classes: -TestBestMatch - best_match tests. -TestAllMatches - all_matches tests. -TestElementsToDict - elements_to_dict tests. -""" - -import unittest -from xml.etree.ElementTree import XML, ElementTree - -from xml_utils import best_match, all_matches, elements_to_dict - -__all__ = ("TestBestMatch", "TestAllMatches", "TestElementsToDict") - -class TestBestMatch(unittest.TestCase): - - """Tests for the best_match function.""" - - def setUp(self): - """Create an ElementTree object as test data. - - This is a complex set of data, but the tests on it are - comparatively simple. Some of the data (such as the empty "foo" - tags) are present purely to prove that they are never matched. - """ - - root = XML(""" - - - a - a - - - generic - generic - generic - - - b - b - - - ab - - - - - - - abar - - - bbar - - -""") - - self.xml_tree = ElementTree(root) - - def test_no_match(self): - """best_match returns None when no paths match.""" - - self.assertTrue(best_match(self.xml_tree, "invalid") - is None) - - def test_simple_match(self): - """best_match can find the only match, when no attributes are used.""" - - self.assertEqual( - "generic", - best_match(self.xml_tree, "foo/data1").text - ) - - def test_no_match_attr(self): - """best_match returns None when there are no attribute matches.""" - - self.assertTrue(best_match(self.xml_tree, "bar/data1") - is None) - self.assertTrue(best_match(self.xml_tree, "bar/data1", {"a": "1"}) - is None) - - def test_match_with_attr(self): - """best_match returns the only path match, with matching attribute.""" - - self.assertEqual( - "abar", - best_match(self.xml_tree, "bar/data1", {"a": "2"}).text - ) - - def test_match_on_attr(self): - """best_match returns the only path and attribute match.""" - - self.assertEqual( - "b", - best_match(self.xml_tree, "foo/data2", - {"a": "2", "b": "2"}).text - ) - - def test_match_most_specific(self): - """best_match returns the most specific match for each path.""" - - self.assertEqual( - "generic", - best_match(self.xml_tree, "foo/data1", - {"a": "1", "b": "2"}).text - ) - self.assertEqual( - "ab", - best_match(self.xml_tree, "foo/data3", - {"a": "1", "b": "2"}).text - ) - - def test_match_first(self): - """best_match returns the first matching entry.""" - - self.assertEqual( - "abar", - best_match(self.xml_tree, "bar/data1", - {"a": "2", "b": "1"}).text - ) - - -class TestAllMatches(unittest.TestCase): - - """Tests for the all_matches function.""" - - def setUp(self): - """Create an ElementTree for use as test data.""" - - root = XML(""" - - - the_text - - - first_text - second_text - - - - - first_text - second_text - second_text - - - first_text - second_text - third_text - fourth_text - bad_text - - - the_text - - -""") - - self.xml_tree = ElementTree(root) - - def test_one_match(self): - """all_matches returns one element successfully.""" - self.assertEqual( - [e.text for e in all_matches(self.xml_tree, "test1/data")], - ["the_text"], - ) - - def test_multiple_matches(self): - """all_matches returns many elements successfully. - - Elements must be returned in the right order as well. - """ - self.assertEqual( - [e.text for e in all_matches(self.xml_tree, "test2/data")], - ["first_text", "second_text"], - ) - - def test_no_matches(self): - """all_matches returns no elements if none are found.""" - self.assertEqual( - [e.text for e in all_matches(self.xml_tree, "test3/data")], - [], - ) - - def test_no_attr_matches(self): - """all_matches returns no elements if none match attributes.""" - self.assertEqual( - [e.text for e in all_matches(self.xml_tree, "test4/data", - {"valid": "true"})], - [], - ) - - def test_attr_matches(self): - """all_matches returns elements that match attributes.""" - self.assertEqual( - [e.text for e in - all_matches(self.xml_tree, "test5/data", - {"valid": "true", "invalid": "false"})], - ["first_text", "second_text", "third_text", "fourth_text"], - ) - - def test_ignore_attribute(self): - """all_matches can ignore "extra" attributes.""" - self.assertEqual( - [e.text for e in - all_matches(self.xml_tree, "test6/data", - ignore=["ignore"])], - ["the_text"] - ) - - -# In true TDD form, these tests are far longer than the code they apply to! -# Of course, when they were written, the first version of the code was -# longer. -class TestElementsToDict(unittest.TestCase): - - """Tests for the elements_to_dict function.""" - - @staticmethod - def string_to_elements(xml_str, ignore="key"): - """Converts an XML string to a list of its "ENV" elements.""" - return all_matches(ElementTree(XML(xml_str)), "ENV", - ignore=ignore) - - def test_no_elements(self): - """elements_to_dict with no elements produces an empty dict.""" - self.assertEqual(elements_to_dict([]), {}) - - def test_one_element(self): - """elements_to_dict can handle one element.""" - elements = self.string_to_elements(""" - - bar - -""") - self.assertEqual(elements_to_dict(elements), {"foo": "bar"}) - - def test_multiple_elements(self): - """elements_to_dict can handle multiple elements.""" - elements = self.string_to_elements(""" - - bar1 - bar2 - -""") - self.assertEqual(elements_to_dict(elements), - {"foo1": "bar1", "foo2": "bar2"}) - - def test_key_attr(self): - """elements_to_dict uses key_attr as key attribute.""" - key_attr = "name" - xml_str = """ - - bar - -""" - elements = self.string_to_elements(xml_str, ignore=key_attr) - self.assertEqual(elements_to_dict(elements, key_attr=key_attr), - {"foo": "bar"}) - - def test_multiple_key(self): - """elements_to_dict lets later values overwrite earlier ones. - - This might not actually be the best behavior, but it's the easiest - to implement; this test is really just to prevent *accidentally* - changing the design. - """ - elements = self.string_to_elements(""" - - bar1 - bar2 - -""") - self.assertEqual(elements_to_dict(elements), - {"foo": "bar2"}) - - def test_keyless_match(self): - """elements_to_dict with a match with no key returns empty dict. - - It might be better to raise an exception; this is a robustness and - simplicity vs. correctness tradeoff. - """ - elements = self.string_to_elements(""" - - bar - -""") - self.assertEqual(elements_to_dict(elements), {}) - - -if __name__ == "__main__": - unittest.main() diff --git a/scripts/fortran_unit_testing/python/xml_test_list.py b/scripts/fortran_unit_testing/python/xml_test_list.py index 9e7007f93e87..ec0cf68796e4 100644 --- a/scripts/fortran_unit_testing/python/xml_test_list.py +++ b/scripts/fortran_unit_testing/python/xml_test_list.py @@ -58,7 +58,7 @@ def __iter__(self): """ return ( (l, d) for l, d in zip(self.labels, self.directories) ) -def suites_from_xml(xml_tree, known_paths={}): +def suites_from_xml(xml_tree, known_paths=None): """Generate test suite descriptions from XML. Returns a TestSuiteSpec for each suite description in the XML input. @@ -77,6 +77,9 @@ def suites_from_xml(xml_tree, known_paths={}): the known_paths dict. """ + if known_paths is None: + known_paths = {} + elements = xml_tree.findall("suite") for elem in elements: diff --git a/scripts/fortran_unit_testing/python/xml_utils.py b/scripts/fortran_unit_testing/python/xml_utils.py deleted file mode 100644 index 4089c669afee..000000000000 --- a/scripts/fortran_unit_testing/python/xml_utils.py +++ /dev/null @@ -1,202 +0,0 @@ -"""XML search utilities. - -These are based on the standard library xml.etree.ElementTree module. -However, that module is not explicitly imported at this time, so it is in -principle possible to pass arguments that imitate the interface of that -module. - -Exported functions: -best_match - Search for a specific element in an XML tree. -all_matches - Search for all matching elements in an XML tree. -elements_to_dict - Create a dict from XML entries with a key attribute. -""" - -from comparable import Comparable - -__all__ = ("best_match", "all_matches", "elements_to_dict") - -class ElementMatch(Comparable): - - """Class to aid searching for/matching an xml element. - - Public methods: - __init__ - Create an ElementMatch. - __iadd__ - Add in "quality" of other match. - __eq__ - Test if quality is same as another match. - __lt__ - Compare quality to another match. - __nonzero__/__bool__ - Test for a valid match. - - Public data: - element - Element for the match (None if no match). Read-only. - - Note that using __eq__ to test quality interferes with hashable - collections, so for now, __hash__ = None. - """ - - def __init__(self, element=None, quality=0): - """Define a element match. - - Arguments: - element - The element that was found. - quality - Abstract measure of "quality" (specificity) of the match. - - With both options left out, this will return a null ElementMatch. - """ - self._element = element - self._quality = quality - - def __iadd__(self, other): - """Add the quality from another match to that of this match.""" - self._quality += other._quality - return self - - __hash__ = None - """Not implemented, because __eq__ is overridden.""" - - def __eq__(self, other): - """Check whether two matches have the same quality.""" - return (not self and not other) or \ - (self and other and self._quality == other._quality) - - def __lt__(self, other): - """Compare matches by their quality; null matches always lose.""" - return (not self and other) or \ - (self and other and self._quality < other._quality) - - def __bool__(self): - """Test if this is a real match or just the null one.""" - return self._element is not None - - __nonzero__ = __bool__ - - @property - def element(self): - """Element from a valid match, or None for a null match.""" - return self._element - -# Right now this is treated as private to the module. But it's getting big -# enough to justify having its own tests, maybe. -def _element_attribute_match(element, attributes, ignore=[]): - """Check an element to see if it matches the given attributes. - - If an element passes the check, give it a "quality" corresponding to - the number of matched attributes. Otherwise, return null match. - - The ignore attribute specifies attributes to ignore. - """ - match_quality = 0 - for key in element.keys(): - if key in ignore: - continue - if key in attributes and attributes[key] == element.get(key): - match_quality += 1 - else: - return ElementMatch() - return ElementMatch(element, match_quality) - -def best_match(xml_tree, path, attributes={}): - """Find the best match for a path with attributes in an XML tree. - - The return value is the matched element. - - Arguments: - xml_tree - A tree from the xml.etree.ElementTree module. - path - The path to search for. - attributes - A dict containing attributes to match. Not all attributes - must be present on an element to get a match, but any - attributes present must match this input, and the "best" - match has the most matches. - - The attributes argument defaults to an empty dict. - """ - - # Recursive function to find a match. - def find_best_below(element, path): - # Done when there's no more of the path to match. - if len(path) == 0: - return ElementMatch(element) - - # Otherwise, get the beginning part of the path. - path_head, slash, path_tail = path.partition("/") - - # Search through subelements that match the next part of the path. - # For each one, get the quality of the match based on the number - # of matching attributes, then call self recursively to get - # best match from subelements. - best_match = ElementMatch() - for trial_element in element.findall(path_head): - local_match = _element_attribute_match(trial_element, - attributes) - if local_match: - new_match = \ - find_best_below(trial_element, path_tail) - new_match += local_match - if best_match < new_match: - best_match = new_match - - return best_match - - # Do the search. - element = xml_tree.getroot() - match = find_best_below(element, path) - - return match.element - -def all_matches(xml_tree, path, attributes={}, ignore=[]): - """Find all matches for a path with attributes in an XML tree. - - This is a generator. Each of the returned values will be a matching - element. - - Arguments: - xml_tree - A tree from the xml.etree.ElementTree module. - path - The path to search for. - attributes - A dict containing attributes to match. Not all attributes - must be present on an element to get a match, but any - attributes present must match this input. - ignore - A list of attributes to ignore when matching. Attributes in - this list are ignored regardless of whether or not they are in - the attributes dict. - - The attributes argument defaults to an empty dict. - """ - - # Recursive function to find all matches. - def find_matches_below(element, path): - # Done when there's no more of the path to match. - if len(path) == 0: - yield ElementMatch(element) - return - - # Otherwise, get the beginning part of the path. - path_head, slash, path_tail = path.partition("/") - - # Search through subelements that match the next part of the path. - # For each one, call self recursively to get all subelement matches. - for trial_element in element.findall(path_head): - if _element_attribute_match(trial_element, attributes, ignore): - for match in find_matches_below(trial_element, path_tail): - yield match - - element = xml_tree.getroot() - for match in find_matches_below(element, path): - yield match.element - -def elements_to_dict(elements, key_attr="key"): - """Uses a key attribute to produce a dict from ElementTree elements. - - For each element, it creates an entry in the returned dict. The key is - determined by key_attr, and the value is the text of the element. - - Arguments: - elements - An iterable of elements to convert. - key_attr - The name of an attribute. Each element must have this - attribute to be included in the dict, and the value of this - attribute will be used as the key. - """ - - return dict( - (elem.get(key_attr), elem.text) - for elem in elements - if key_attr in elem.keys() - ) diff --git a/scripts/fortran_unit_testing/run_tests.py b/scripts/fortran_unit_testing/run_tests.py index f3b0c1deeba0..715a98f7fdec 100755 --- a/scripts/fortran_unit_testing/run_tests.py +++ b/scripts/fortran_unit_testing/run_tests.py @@ -20,7 +20,6 @@ #================================================= from printer import Printer from shutil import rmtree -from distutils.spawn import find_executable # This violates CIME policy - move to CIME/XML directory from xml.etree.ElementTree import ElementTree From 9e8584d4ca2515863e3b72df09943b7b61eb37a6 Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Wed, 26 Apr 2017 15:53:14 -0600 Subject: [PATCH 147/219] remove debug print statement, fix netcdf4p support --- src/externals/pio1/pio/CMakeLists.txt | 4 +--- src/externals/pio1/pio/pionfput_mod.F90.in | 1 - 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/src/externals/pio1/pio/CMakeLists.txt b/src/externals/pio1/pio/CMakeLists.txt index f871ca245a40..d2c162bb71d1 100644 --- a/src/externals/pio1/pio/CMakeLists.txt +++ b/src/externals/pio1/pio/CMakeLists.txt @@ -30,7 +30,7 @@ endif() # Netcdf is required #SET (NETCDF_FIND_COMPONENTS F90) -FIND_PACKAGE(NetCDF "4.3.3" COMPONENTS Fortran) +FIND_PACKAGE(NetCDF "4.3.3" COMPONENTS C Fortran) IF (${NetCDF_Fortran_FOUND}) MESSAGE("Building PIO with netcdf support ") SET(pio_include_dirs_ ${pio_include_dirs_} ${NetCDF_Fortran_INCLUDE_DIR}) @@ -189,5 +189,3 @@ endif() if(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/../testpio) ADD_SUBDIRECTORY(../testpio testpio) endif() - - diff --git a/src/externals/pio1/pio/pionfput_mod.F90.in b/src/externals/pio1/pio/pionfput_mod.F90.in index 1ce9353ba2aa..02e87db6000e 100644 --- a/src/externals/pio1/pio/pionfput_mod.F90.in +++ b/src/externals/pio1/pio/pionfput_mod.F90.in @@ -125,7 +125,6 @@ contains ierr = nfmpi_begin_indep_data(File%fh) if(Ios%io_rank==0 .and. (ierr==NF_EINDEP .or. ierr==PIO_NOERR)) then - print *,__PIO_FILE__,__LINE__,index,count,trim(ival) ierr = nfmpi_put_vara (File%fh, varid, int(index,kind=PIO_OFFSET), & int(count,kind=PIO_OFFSET), ival, int(count,kind=PIO_OFFSET), & MPI_CHARACTER) From 3894f114cff4d51021c50c30312489eae4eb776e Mon Sep 17 00:00:00 2001 From: Bill Sacks Date: Wed, 26 Apr 2017 15:57:49 -0600 Subject: [PATCH 148/219] Update unit testing README file --- scripts/fortran_unit_testing/README | 64 +++++++++-------------------- 1 file changed, 20 insertions(+), 44 deletions(-) diff --git a/scripts/fortran_unit_testing/README b/scripts/fortran_unit_testing/README index 4b1435ff9043..8bd194ce57d6 100644 --- a/scripts/fortran_unit_testing/README +++ b/scripts/fortran_unit_testing/README @@ -1,20 +1,10 @@ -*- mode:org -*- #+startup: showall -Quick guide to the CESM unit testing framework +Quick guide to the CIME unit testing framework * Requirements - - Machines :: Basic compiler information is retrieved from - config_compilers.xml in Machines. Even if running on an - unsupported machine, it is necessary to have this file to - get some basic information about the compiler. - - As of this writing, it is possible to run with Machines, - this directory, the CMake module directory, and the - directory that specifies tests. A full CESM checkout is - more convenient but not needed. - - Python 2.6 or later :: Required for scripts. Python 3.2 is also known to work. @@ -40,7 +30,7 @@ Quick guide to the CESM unit testing framework ** Quick start In the simplest case, you may want to run tests from a single directory. - In that case, there are just three pieces of information that you usually + In that case, there are just two pieces of information that you usually need to specify: 1. The location of the directory where you want to build and run the @@ -48,23 +38,11 @@ Quick guide to the CESM unit testing framework 2. The location of the directory specifying the test via a CMakeLists.txt file ("TEST_SPEC_DIR"). - - 3. The compiler vendor ("COMPILER"). + (Optional: Defaults to the current directory) The following command can run the tests: - run_tests.py --build-dir=BUILD_DIR --test-spec-dir=TEST_SPEC_DIR \ - --compiler=COMPILER - - The current compiler options are: - - - GNU - - IBM - - Intel - - NAG - - PGI - - COMPILER defaults to GNU. + run_tests.py --build-dir=BUILD_DIR --test-spec-dir=TEST_SPEC_DIR If you change a source file, you can run the same command again, and run_tests.py will do the bare minimum of work necessary to re-run the @@ -79,18 +57,9 @@ Quick guide to the CESM unit testing framework Some particularly useful options are as follows: - + --build-type :: Setting this option to "CESM" will build the tests - with CESM compiler options (or the most similar - pFUnit-compatible flags). Setting this option to - "CESM_DEBUG" will build with CESM's DEBUG mode - options. - + --clean :: Cleans the directory and re-runs both CMake and make when running the tests. - + -R :: This option allows you to specify a regular expression; only - tests with names matching that expression will run. - + -v :: Verbose output, including the commands used to compile, any compiler warnings, and CTest output from tests that pass. To get CTest output from tests that fail, set the environment variable @@ -103,34 +72,41 @@ Quick guide to the CESM unit testing framework * Defining new tests using CMake - This README focuses on integration between the python scripts here, the - CMake modules, and CESM Machines. Further information, such as detailed - APIs, can be found in the documentation for pFUnit and the CMake modules - themselves. + This README focuses on integration between the python scripts here, + the CMake modules, and machine information. Further information, such + as detailed APIs, can be found in the documentation for pFUnit and the + CMake modules themselves. If you are new to this system, it is *highly* recommended that you look through the examples first. -** CIME_utils - run_tests.py interface +** CIME_initial_setup and CIME_utils - run_tests.py interface - The following CMake snippet will include the CESM utilities module. The + The following CMake snippet will include the CIME utilities module. The variable ${CIME_CMAKE_MODULE_DIRECTORY} is defined by run_tests.py, or by hand if you choose not to use run_tests.py and instead invoke cmake directly. list(APPEND CMAKE_MODULE_PATH ${CIME_CMAKE_MODULE_DIRECTORY}) + include(CIME_initial_setup) + project(cime_tests Fortran C) include(CIME_utils) + The project name does not need to be 'cime_tests'. The key point of + the above snippet is that you need to include CIME_initial_setup + before the project line, and CIME_utils after the project line. + CIME_utils processes a few options set by run_tests.py (e.g. "USE_COLOR"), and includes all of the following modules as well. Projects that do development without run_tests.py may choose to include only the modules below that they need. -** Compilers - CESM compiler options +** Compilers - CIME compiler options This module is also part of the run_tests.py interface; the primary - purpose is to read in flags generated from CESM's config_compilers.xml - file. However, it's also a catch-all for compiler-specific information. + purpose is to read in flags generated from the config_compilers.xml + file. However, it's also a catch-all for compiler-specific + information. The details of this module shouldn't be important to most users, most of the time, but it does provide one utility function that you may want to From 540a9b05e0ae8049bc5024bcc29d19b17ce09767 Mon Sep 17 00:00:00 2001 From: Robert Jacob Date: Thu, 27 Apr 2017 11:29:45 -0500 Subject: [PATCH 149/219] Remove top level README This was just a list of svn dirs CIME started from. Moved to the github wiki. --- README | 31 ------------------------------- 1 file changed, 31 deletions(-) delete mode 100644 README diff --git a/README b/README deleted file mode 100644 index ec161702495d..000000000000 --- a/README +++ /dev/null @@ -1,31 +0,0 @@ -The cime directory structure (other than the externals/ directory) -was generated from the following trunk tags: - -cime/driver_cpl https://svn-ccsm-models.cgd.ucar.edu/drv/seq_mct/trunk_tags/drvseq5_1_15 - -cime/components/data_comps/datm https://svn-ccsm-models.cgd.ucar.edu/datm7/trunk_tags/datm8_150310 -cime/components/data_comps/dice https://svn-ccsm-models.cgd.ucar.edu/dice7/trunk_tags/dice8_150310 -cime/components/data_comps/dlnd https://svn-ccsm-models.cgd.ucar.edu/dlnd7/trunk_tags/dlnd8_150310 -cime/components/data_comps/docn https://svn-ccsm-models.cgd.ucar.edu/docn7/trunk_tags/docn8_150310 -cime/components/data_comps/drof https://svn-ccsm-models.cgd.ucar.edu/drof/trunk_tags/drof_150310 -cime/components/stub_comps https://svn-ccsm-models.cgd.ucar.edu/stubs/trunk_tags/stubs1_4_08 -cime/components/xcpl_comps https://svn-ccsm-models.cgd.ucar.edu/dead7/trunk_tags/dead7_8_04 - -cime/machines https://svn-ccsm-models.cgd.ucar.edu/Machines/trunk_tags/Machines_150309 - -cime/scripts https://svn-ccsm-models.cgd.ucar.edu/scripts/trunk_tags/scripts_150309 - -cime/share/csm_share https://svn-ccsm-models.cgd.ucar.edu/csm_share/trunk_tags/share3_150116 -cime/share/esmf_wrf_timemgr https://svn-ccsm-models.cgd.ucar.edu/esmf_wrf_timemgr/trunk_tags/esmf_wrf_timemgr_141217 -cime/share/timing https://svn-ccsm-models.cgd.ucar.edu/timing/trunk_tags/timing_150302 - -cime/utils/pythonlib https://svn-ccsm-models.cgd.ucar.edu/scripts/trunk_tags/scripts4_150204/scripts/ccsm_utils/Tools/pythonlib -cime/utils/perl5lib https://svn-ccsm-models.cgd.ucar.edu/perl5lib/trunk_tags/perl5lib_150302 - -cime/tools/load_balancing_tool https://svn-ccsm-models.cgd.ucar.edu/tools/load_balancing_tool/trunk_tags/load_balancing_tool_141008 -cime/tools/unit_testing https://svn-ccsm-models.cgd.ucar.edu/unit_testing/trunk_tags/unit_testing_0_16 -cime/tools/statistical_ensemble_test https://svn-ccsm-models.cgd.ucar.edu/validation_testing/trunk_tags/validation_20140708/run_CESM/ -cime/tools/mapping https://svn-ccsm-models.cgd.ucar.edu/tools/mapping/trunk_tags/mapping_141106 -cime/tools/cprnc https://svn-ccsm-models.cgd.ucar.edu/tools/cprnc/trunk_tags/cprnc_150301 - - From 94286731223b7a094a7f2647f415b8a621098c78 Mon Sep 17 00:00:00 2001 From: Robert Jacob Date: Thu, 27 Apr 2017 11:45:12 -0500 Subject: [PATCH 150/219] Add links names to README.md Add links for ACME and documentation. Add Michael, Erich and Bill as developers. Move documenation link up. --- README.md | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index 73e19ea18d85..05a643e2f477 100644 --- a/README.md +++ b/README.md @@ -3,11 +3,16 @@ Common Infrastructure for Modeling the Earth **cime** (pronounced: seem) represents the infrastructure code for the Community Earth System Model - (CESM) and the Accelerated Climate Model for Energy (ACME). -*cime* includes scripts for configuration, build, and testing of -models, as well as data and stub models for climate components, -and a code for coupling different climate components together. - + (CESM) and the +Accelerated Climate Model for Energy (ACME). +*cime* providess scripts for configuration, build, and testing of +models, as well as code for data and stub models for climate components, +and a driver code for bringing all the climate components together in a single executable. + +# Documentation + +See esmci.github.io/cime + # Developers *cime* was initially extracted from CESM as a stand-alone capability in 2015 @@ -28,18 +33,12 @@ Mariana Vertenstein | NCAR | 1 - D | NSF, DOE Jim Edwards | NCAR | 1 - D | NSF (1-D), DOE(1-2) Jim Foucar | SNL | 5 - D | DOE Rob Jacob | ANL | 5 - D | DOE +Bill Sacks | NCAR | 1 - D | NSF Andreas Wilke | ANL | 5 - D | DOE Jason Sarich | ANL | 5 - D | DOE -??Sean Santos | NCAR | 1 - 4 | NSF +Michael Deakin | SNL | 5 - D | DOE +Erich Foster | SNL | 5 - D | DOE +Sean Santos | NCAR | 1 - 4 | NSF _Key: D = Current development version (i.e. still active on project)_ -# Documentation - -Currently, this is a work in progress. - - - - - - From 2826d9c4fc20cedcf04adac1b7a4834e0af1e7f9 Mon Sep 17 00:00:00 2001 From: Erich L Foster Date: Thu, 27 Apr 2017 11:15:29 -0600 Subject: [PATCH 151/219] Removed Tabs from create_newcase There were tabs in create_newcase. None prevented the script from running, but should be removed for consistency. Test suite: scripts_regression_tests.py --fast Test baseline: Test namelist changes: Test status: bit for bit Fixes #1434 User interface changes?: No Code review: @jgfouca --- scripts/create_newcase | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/create_newcase b/scripts/create_newcase index 1e094c3a7a22..99a205f1795f 100755 --- a/scripts/create_newcase +++ b/scripts/create_newcase @@ -28,7 +28,7 @@ OR parser.add_argument("--case", "-case", required=True, help="(required) Specify the case name. " "If not a full pathname, then the case is created " - "under then current working directory ") + "under then current working directory ") parser.add_argument("--compset", "-compset", required=True, help="(required) Specify a compset. " @@ -76,7 +76,7 @@ OR parser.add_argument("--pesfile", help="Only used and required for --user-compset argument." "Full pathname of the pes specification file" - "This argument is required if --user-compset is True") + "This argument is required if --user-compset is True") parser.add_argument("--user-grid", action="store_true", help="If set, then the -grid argument is treated as a user specified grid." @@ -88,7 +88,7 @@ OR help="Full pathname of config grid file to use" "This should be a copy of config/config_grids.xml" "with the new user grid changes added to it" - "This argument is required if --user-grid is True") + "This argument is required if --user-grid is True") parser.add_argument("--srcroot", default=os.path.dirname(cimeroot), help="Alternative path for source root directory. By default this is set to" From 6e5036a9016aa432ce3c1b5312b291a07fb6ab12 Mon Sep 17 00:00:00 2001 From: Erich L Foster Date: Thu, 27 Apr 2017 11:19:29 -0600 Subject: [PATCH 152/219] Fixed Some Tabulation Fixed some missed aligned tabulation in create_newcase Test suite: scripts_regression_tests.py --fast Test baseline: Test namelist changes: Test status: bit for bit Fixes #1434 User interface changes?: No Code review: @jgfouca --- scripts/create_newcase | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/create_newcase b/scripts/create_newcase index 99a205f1795f..1524e76d958a 100755 --- a/scripts/create_newcase +++ b/scripts/create_newcase @@ -76,7 +76,7 @@ OR parser.add_argument("--pesfile", help="Only used and required for --user-compset argument." "Full pathname of the pes specification file" - "This argument is required if --user-compset is True") + "This argument is required if --user-compset is True") parser.add_argument("--user-grid", action="store_true", help="If set, then the -grid argument is treated as a user specified grid." From 633e73a8b43f19b704fa4fafc9b21a7340970883 Mon Sep 17 00:00:00 2001 From: Erich L Foster Date: Thu, 27 Apr 2017 12:24:02 -0600 Subject: [PATCH 153/219] Add --component Option to preview_namelist Added an option to preview_namelist, which allows the user to specify the component for which to run the preview_namelist script. Test suite: scripts_regression_tests.py --fast Test baseline: Test namelist changes: Test status: bit for bit Fixes #887 User interface changes?: added --component option to preview_namelist Code review: @mnlevy1981 --- scripts/Tools/preview_namelists | 38 ++++++----------- scripts/lib/CIME/preview_namelists.py | 61 ++++++++++++++------------- 2 files changed, 44 insertions(+), 55 deletions(-) diff --git a/scripts/Tools/preview_namelists b/scripts/Tools/preview_namelists index a87a52191909..e304e9155684 100755 --- a/scripts/Tools/preview_namelists +++ b/scripts/Tools/preview_namelists @@ -15,50 +15,38 @@ from CIME.utils import expect import argparse, doctest ############################################################################### -def parse_command_line(args, description): +def parse_command_line(args): ############################################################################### - parser = argparse.ArgumentParser( - usage="""\n%s [--debug] -OR -%s --verbose -OR -%s --help -OR -%s --test - -\033[1mEXAMPLES:\033[0m - \033[1;32m# Run \033[0m - > %s -""" % ((os.path.basename(args[0]), ) * 5), - -description=description, - -formatter_class=argparse.ArgumentDefaultsHelpFormatter -) - + cime_model = CIME.utils.get_model() + parser = argparse.ArgumentParser() CIME.utils.setup_standard_logging_options(parser) parser.add_argument("caseroot", nargs="?", default=os.getcwd(), help="Case directory to build") + parser.add_argument('--component', + help="Specify component's namelist to build.") + parser.add_argument('--test', action='store_true', + help="Run preview_namelist in test mode.") - args = parser.parse_args(args[1:]) + args = parser.parse_args() CIME.utils.handle_standard_logging_options(args) - return args.caseroot + return args ############################################################################### def _main_func(description): ############################################################################### - if ("--test" in sys.argv): + args = parse_command_line(sys.argv) + if args.test: test_results = doctest.testmod(verbose=True) sys.exit(1 if test_results.failed > 0 else 0) - caseroot = parse_command_line(sys.argv, description) + caseroot = args.caseroot expect(os.path.isfile(os.path.join(caseroot, "CaseStatus")), "case.setup must be run prior to running preview_namelists") with Case(caseroot, read_only=False) as case: - create_namelists(case) + create_namelists(case, component=args.component) if (__name__ == "__main__"): _main_func(__doc__) diff --git a/scripts/lib/CIME/preview_namelists.py b/scripts/lib/CIME/preview_namelists.py index 3f1429809af3..ce98d2447ad7 100644 --- a/scripts/lib/CIME/preview_namelists.py +++ b/scripts/lib/CIME/preview_namelists.py @@ -40,7 +40,7 @@ def create_dirs(case): with open(os.path.join(dir_,"CASEROOT"),"w+") as fd: fd.write(caseroot+"\n") -def create_namelists(case): +def create_namelists(case, component=None): """ Create component namelists """ @@ -73,36 +73,37 @@ def create_namelists(case): else: compname = case.get_value("COMP_%s" % model_str.upper()) - cmd = os.path.join(config_dir, "buildnml") - do_run_cmd = False - # This code will try to import and run each buildnml as a subroutine - # if that fails it will run it as a program in a seperate shell - try: - with open(cmd, 'r') as f: - first_line = f.readline() - if "python" in first_line: - mod = imp.load_source("buildnml", cmd) - logger.info(" Calling %s buildnml"%compname) - mod.buildnml(case, caseroot, compname) - else: - raise SyntaxError - except SyntaxError as detail: - if 'python' in first_line: - expect(False, detail) - else: + if component is None or component == model_str: + cmd = os.path.join(config_dir, "buildnml") + do_run_cmd = False + # This code will try to import and run each buildnml as a subroutine + # if that fails it will run it as a program in a seperate shell + try: + with open(cmd, 'r') as f: + first_line = f.readline() + if "python" in first_line: + mod = imp.load_source("buildnml", cmd) + logger.info(" Calling %s buildnml"%compname) + mod.buildnml(case, caseroot, compname) + else: + raise SyntaxError + except SyntaxError as detail: + if 'python' in first_line: + expect(False, detail) + else: + do_run_cmd = True + except AttributeError: do_run_cmd = True - except AttributeError: - do_run_cmd = True - except: - raise - - if do_run_cmd: - logger.info(" Running %s buildnml"%compname) - case.flush() - output = run_cmd_no_fail("%s %s" % (cmd, caseroot), verbose=False) - logger.info(output) - # refresh case xml object from file - case.read_xml() + except: + raise + + if do_run_cmd: + logger.info(" Running %s buildnml"%compname) + case.flush() + output = run_cmd_no_fail("%s %s" % (cmd, caseroot), verbose=False) + logger.info(output) + # refresh case xml object from file + case.read_xml() logger.info("Finished creating component namelists") From 5c479821ac7db9b302ecfb8fa17dd2351c4bf19d Mon Sep 17 00:00:00 2001 From: Francis Vitt Date: Thu, 27 Apr 2017 13:10:56 -0600 Subject: [PATCH 154/219] Use system installed esmf libs on cheyenne modified: config/cesm/machines/config_compilers.xml modified: config/cesm/machines/config_machines.xml --- config/cesm/machines/config_compilers.xml | 2 -- config/cesm/machines/config_machines.xml | 4 ---- 2 files changed, 6 deletions(-) diff --git a/config/cesm/machines/config_compilers.xml b/config/cesm/machines/config_compilers.xml index 02d5eda32278..3a5a0d0471b8 100644 --- a/config/cesm/machines/config_compilers.xml +++ b/config/cesm/machines/config_compilers.xml @@ -995,8 +995,6 @@ using a fortran linker. $ENV{CESMDATAROOT}/tools/pFUnit/pFUnit3.2.8_cheyenne_Intel17.0.1_MPI_openMP FALSE - /glade/u/home/fvitt/esmf_7_0_0_intel17/esmf/lib/libO/Linux.intel.64.mpi.default - /glade/u/home/fvitt/esmf_7_0_0_intel17/esmf/lib/libg/Linux.intel.64.mpi.default diff --git a/config/cesm/machines/config_machines.xml b/config/cesm/machines/config_machines.xml index 6350eaf8da4b..d8992f137175 100644 --- a/config/cesm/machines/config_machines.xml +++ b/config/cesm/machines/config_machines.xml @@ -250,9 +250,6 @@ intel/17.0.1 mkl - - gnu/6.3.0 From 845bf15b18f8c948cceb3f6270e2f9df59fa1157 Mon Sep 17 00:00:00 2001 From: James Foucar Date: Thu, 27 Apr 2017 13:44:12 -0600 Subject: [PATCH 155/219] Changes based on review --- scripts/Tools/preview_run | 12 +++++++++--- scripts/lib/CIME/XML/env_batch.py | 4 ++-- scripts/lib/CIME/case.py | 2 +- src/drivers/mct/cime_config/config_component.xml | 2 +- 4 files changed, 13 insertions(+), 7 deletions(-) diff --git a/scripts/Tools/preview_run b/scripts/Tools/preview_run index 1cc94320e20a..eca424d052a0 100755 --- a/scripts/Tools/preview_run +++ b/scripts/Tools/preview_run @@ -4,10 +4,16 @@ Script to query key CIME shell commands (mpirun and batch submission). To force a certain mpirun command, use: -./xmlchange MPI_RUN_COMMAND $your_cmd +./xmlchange MPI_RUN_COMMAND=$your_cmd + +Example: +./xmlchange MPI_RUN_COMMAND='mpiexec -np 16 --some-flag' To force a certain qsub command, use: -./xmlchange --subgroup=case.run BATCH_COMMAND $your_cmd +./xmlchange --subgroup=case.run BATCH_COMMAND_FLAGS=$your_flags + +Example: +./xmlchange --subgroup=case.run BATCH_COMMAND_FLAGS='--some-flag --other-flag' """ from standard_script_setup import * @@ -29,7 +35,7 @@ OR > %s """ % ((os.path.basename(args[0]), ) * 4), description=description, - formatter_class=argparse.ArgumentDefaultsHelpFormatter + formatter_class=argparse.RawTextHelpFormatter ) parser.add_argument("caseroot", nargs="?", default=os.getcwd(), diff --git a/scripts/lib/CIME/XML/env_batch.py b/scripts/lib/CIME/XML/env_batch.py index 330e2382a222..724c48143f92 100644 --- a/scripts/lib/CIME/XML/env_batch.py +++ b/scripts/lib/CIME/XML/env_batch.py @@ -371,7 +371,7 @@ def _submit_single_job(self, case, job, depid=None, no_batch=False, batch_args=N return submitargs = self.get_submit_args(case, job) - args_override = self.get_value("BATCH_COMMAND", subgroup=job) + args_override = self.get_value("BATCH_COMMAND_FLAGS", subgroup=job) if args_override: submitargs = args_override @@ -457,7 +457,7 @@ def get_all_queues(self): def get_nodes(self, nodename, attributes=None, root=None, xpath=None): if nodename in ("JOB_WALLCLOCK_TIME", "PROJECT", "PROJECT_REQUIRED", - "JOB_QUEUE", "BATCH_COMMAND"): + "JOB_QUEUE", "BATCH_COMMAND_FLAGS"): nodes = EnvBase.get_nodes(self, "entry", attributes={"id":nodename}, root=root, xpath=xpath) else: diff --git a/scripts/lib/CIME/case.py b/scripts/lib/CIME/case.py index 9b8000a6a1dd..a6afa77e214f 100644 --- a/scripts/lib/CIME/case.py +++ b/scripts/lib/CIME/case.py @@ -844,6 +844,7 @@ def _create_caseroot_tools(self): os.path.join(toolsdir, "case.submit"), os.path.join(toolsdir, "case.cmpgen_namelists"), os.path.join(toolsdir, "preview_namelists"), + os.path.join(toolsdir, "preview_run"), os.path.join(toolsdir, "check_input_data"), os.path.join(toolsdir, "check_case"), os.path.join(toolsdir, "archive_metadata.sh"), @@ -862,7 +863,6 @@ def _create_caseroot_tools(self): os.path.join(toolsdir, "lt_archive.sh"), os.path.join(toolsdir, "getTiming"), os.path.join(toolsdir, "save_provenance"), - os.path.join(toolsdir, "preview_run"), os.path.join(machines_dir,"Makefile"), os.path.join(machines_dir,"mkSrcfiles"), os.path.join(machines_dir,"mkDepends")] diff --git a/src/drivers/mct/cime_config/config_component.xml b/src/drivers/mct/cime_config/config_component.xml index 186fe6e85a17..48cf2c38d3de 100644 --- a/src/drivers/mct/cime_config/config_component.xml +++ b/src/drivers/mct/cime_config/config_component.xml @@ -2602,7 +2602,7 @@ The machine wallclock setting. Default determined in config_machines.xml can be overwritten by testing - + char From dfa21c30b3814dbb0509dd3fed36189b166a383a Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Thu, 27 Apr 2017 13:49:48 -0600 Subject: [PATCH 156/219] fix issue with setting comp_classes --- scripts/Tools/xmlchange | 5 ++--- scripts/lib/CIME/case.py | 4 ++-- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/scripts/Tools/xmlchange b/scripts/Tools/xmlchange index 69a97840949e..a8686fac4a0c 100755 --- a/scripts/Tools/xmlchange +++ b/scripts/Tools/xmlchange @@ -93,11 +93,10 @@ def xmlchange(caseroot, listofsettings, xmlfile, xmlid, xmlval, subgroup, append, noecho, force, dryrun): with Case(caseroot, read_only=False) as case: + comp_classes = case.get_values("COMP_CLASSES") if xmlfile: case.set_file(xmlfile) - - env_mach_pes = case.get_env("mach_pes") - env_mach_pes.set_components(case.get_values("COMP_CLASSES")) + case.set_comp_classes(comp_classes) if len(listofsettings): logger.debug("List of attributes to change: %s" , listofsettings) diff --git a/scripts/lib/CIME/case.py b/scripts/lib/CIME/case.py index 96c23fa42698..68a52ccc1682 100644 --- a/scripts/lib/CIME/case.py +++ b/scripts/lib/CIME/case.py @@ -492,7 +492,7 @@ def __iter__(self): else: yield key, val - def _set_comp_classes(self, comp_classes): + def set_comp_classes(self, comp_classes): self._component_classes = comp_classes for env_file in self._env_entryid_files: env_file.set_components(comp_classes) @@ -521,7 +521,7 @@ def _get_component_config_data(self, files): # loop over all elements of both component_classes and components - and get config_component_file for # for each component - self._set_comp_classes(drv_comp.get_valid_model_components()) + self.set_comp_classes(drv_comp.get_valid_model_components()) if len(self._component_classes) > len(self._components): self._components.append('sesp') From 78cf98cdcd864afcb066abca15246ad6682ab90f Mon Sep 17 00:00:00 2001 From: James Foucar Date: Thu, 27 Apr 2017 13:55:04 -0600 Subject: [PATCH 157/219] Fix merge problem --- scripts/lib/CIME/case.py | 34 ++++++++++++++-------------------- 1 file changed, 14 insertions(+), 20 deletions(-) diff --git a/scripts/lib/CIME/case.py b/scripts/lib/CIME/case.py index c94e2eb1d51e..667801cbb30d 100644 --- a/scripts/lib/CIME/case.py +++ b/scripts/lib/CIME/case.py @@ -1117,6 +1117,16 @@ def get_mpirun_cmd(self, job="case.run"): if mpirun_cmd_override not in ["", None, "UNSET"]: return mpirun_cmd_override + " " + run_exe + " " + run_misc_suffix + # Things that will have to be matched against mpirun element attributes + mpi_attribs = { + "compiler" : self.get_value("COMPILER"), + "mpilib" : self.get_value("MPILIB"), + "threaded" : self.get_build_threaded(), + "unit_testing" : False + } + + executable, args = env_mach_specific.get_mpirun(self, mpi_attribs, job=job) + # special case for aprun if executable is not None and "aprun" in executable: aprun_args, num_nodes = get_aprun_cmd_for_case(self, run_exe) @@ -1124,28 +1134,12 @@ def get_mpirun_cmd(self, job="case.run"): return executable + aprun_args + " " + run_misc_suffix else: - # Things that will have to be matched against mpirun element attributes - mpi_attribs = { - "compiler" : self.get_value("COMPILER"), - "mpilib" : self.get_value("MPILIB"), - "threaded" : self.get_build_threaded(), - "unit_testing" : False - } - - executable, args = env_mach_specific.get_mpirun(self, mpi_attribs, job=job) - - # special case for aprun - if executable == "aprun": - aprun_cmd, num_nodes = get_aprun_cmd_for_case(self, run_exe) - expect(num_nodes == self.num_nodes, "Not using optimized num nodes") - return aprun_cmd + " " + run_misc_suffix - else: - mpi_arg_string = " ".join(args.values()) + mpi_arg_string = " ".join(args.values()) - if self.get_value("BATCH_SYSTEM") == "cobalt": - mpi_arg_string += " : " + if self.get_value("BATCH_SYSTEM") == "cobalt": + mpi_arg_string += " : " - return "%s %s %s" % (executable if executable is not None else "", mpi_arg_string, run_suffix) + return "%s %s %s" % (executable if executable is not None else "", mpi_arg_string, run_suffix) def set_model_version(self, model): version = "unknown" From 99b7c0eeca41e877ed298dcf055d4021be59df56 Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Thu, 27 Apr 2017 14:13:50 -0600 Subject: [PATCH 158/219] improve error message --- scripts/Tools/xmlquery | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/scripts/Tools/xmlquery b/scripts/Tools/xmlquery index 8a1931ffc789..95bb0996fc70 100755 --- a/scripts/Tools/xmlquery +++ b/scripts/Tools/xmlquery @@ -174,7 +174,11 @@ def xmlquery(case, variables, subgroup=None, fileonly=False, else: value = get_value_as_string(case, var, resolved=resolved, subgroup=group) - expect(value is not None, " No results found for variable %s"%var) + if value is None: + if xmlfile: + expect(False, " No results found for variable %s in file %s"%(var, xmlfile)) + else: + expect(False, " No results found for variable %s"%var) results[group][var]['value'] = value From 593834cbdaa3b11e68254900a3217e9b8de61efa Mon Sep 17 00:00:00 2001 From: Erich L Foster Date: Thu, 27 Apr 2017 14:35:32 -0600 Subject: [PATCH 159/219] Add Some Additional Unit Tests for '+= Added some unit tests in _parse_namelist_group and parse_namelist to ensure '+=' works as expected. Test suite: `scripts_regression_tests.py` and `python -m doctest CIME/namelist.py` Test baseline: Test namelist changes: adds '+=' operator Test status: bit for bit Fixes #839 User interface changes?: Added support for '+=' to namelist parser. Code review: @billsacks @jgfouca --- scripts/lib/CIME/namelist.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/scripts/lib/CIME/namelist.py b/scripts/lib/CIME/namelist.py index bdc0495231b8..1ff121c8867b 100644 --- a/scripts/lib/CIME/namelist.py +++ b/scripts/lib/CIME/namelist.py @@ -1538,6 +1538,8 @@ def _parse_variable_name(self, allow_equals=True): Traceback (most recent call last): ... _NamelistParseError: Error in parsing namelist: '' is not a valid variable name + >>> _NamelistParser('foo+= ')._parse_variable_name() + u'foo' """ old_pos = self._pos separators = (' ', '\n', '=', '+') if allow_equals else (' ', '\n') @@ -2155,6 +2157,8 @@ def parse_namelist(self): OrderedDict([(u'foo', [u"'bar'"])]) >>> _NamelistParser("foo='bar', 'bazz'\n foo+='ban'", groupless=True).parse_namelist() OrderedDict([(u'foo', [u"'bar'", u"'bazz'", u"'ban'"])]) + >>> _NamelistParser("foo+='bar'", groupless=True).parse_namelist() + OrderedDict([(u'foo', [u"'bar'"])]) """ # Return empty dictionary for empty files. if self._len == 0: From dcc82302424e5cbe5c8d262e14eadb56dc4def6a Mon Sep 17 00:00:00 2001 From: Erich L Foster Date: Thu, 27 Apr 2017 15:08:38 -0600 Subject: [PATCH 160/219] Added Description Back into Argparser Realized I had removed description from argparser and added it back in since there is a description in the script. Test suite: scripts_regression_tests.py --fast Test baseline: Test namelist changes: Test status: bit for bit Fixes #887 User interface changes?: Added --component arguemtn to preview_namelist Code review: @jgfouca --- scripts/Tools/preview_namelists | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/scripts/Tools/preview_namelists b/scripts/Tools/preview_namelists index e304e9155684..da9d33b43c0b 100755 --- a/scripts/Tools/preview_namelists +++ b/scripts/Tools/preview_namelists @@ -15,10 +15,10 @@ from CIME.utils import expect import argparse, doctest ############################################################################### -def parse_command_line(args): +def parse_command_line(args, description): ############################################################################### cime_model = CIME.utils.get_model() - parser = argparse.ArgumentParser() + parser = argparse.ArgumentParser(description=description) CIME.utils.setup_standard_logging_options(parser) parser.add_argument("caseroot", nargs="?", default=os.getcwd(), @@ -37,15 +37,14 @@ def parse_command_line(args): ############################################################################### def _main_func(description): ############################################################################### - args = parse_command_line(sys.argv) + args = parse_command_line(sys.argv, description) if args.test: test_results = doctest.testmod(verbose=True) sys.exit(1 if test_results.failed > 0 else 0) - caseroot = args.caseroot - expect(os.path.isfile(os.path.join(caseroot, "CaseStatus")), + expect(os.path.isfile(os.path.join(args.caseroot, "CaseStatus")), "case.setup must be run prior to running preview_namelists") - with Case(caseroot, read_only=False) as case: + with Case(args.caseroot, read_only=False) as case: create_namelists(case, component=args.component) if (__name__ == "__main__"): From 7c5fe9d48764da981300e7d721870b4c65c55d70 Mon Sep 17 00:00:00 2001 From: Michael Deakin Date: Thu, 27 Apr 2017 23:23:44 -0600 Subject: [PATCH 161/219] Untested implementation of specifying the directory to put the cime scripts for a case --- scripts/create_newcase | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/scripts/create_newcase b/scripts/create_newcase index 1524e76d958a..08e3b04d8617 100755 --- a/scripts/create_newcase +++ b/scripts/create_newcase @@ -97,6 +97,9 @@ OR parser.add_argument("--output-root", help="Alternative path for the directory where case output is written") + parser.add_argument("--script-root", dest="script_root", default=None, + help="Alternative path for the directory where the cime scripts are written") + if model == "cesm": parser.add_argument("--run-unsupported", action="store_true", help="Force the creation of a case not tested or supported by CESM developers") @@ -160,20 +163,25 @@ OR args.mpilib, args.project, args.pecount, \ args.user_mods_dir, args.user_compset, args.pesfile, \ args.user_grid, args.gridfile, args.srcroot, args.test, args.ninst, \ - args.walltime, args.queue, args.output_root, run_unsupported, args.answer, args.input_dir + args.walltime, args.queue, args.output_root, args.script_root, \ + run_unsupported, args.answer, args.input_dir ############################################################################### def _main_func(description): ############################################################################### cimeroot = os.path.abspath(CIME.utils.get_cime_root()) - caseroot, compset, grid, machine, compiler, \ + casename, compset, grid, machine, compiler, \ mpilib, project, pecount, \ user_mods_dir, user_compset, pesfile, \ user_grid, gridfile, srcroot, test, ninst, walltime, queue, \ - output_root, run_unsupported, answer, input_dir = parse_command_line(sys.argv, cimeroot, description) + output_root, script_root, run_unsupported, \ + answer, input_dir = parse_command_line(sys.argv, cimeroot, description) - caseroot = os.path.abspath(caseroot) + if script_root is None: + caseroot = os.path.abspath(casename) + else: + caseroot = os.path.abspath(script_root) # create_test creates the caseroot before calling create_newcase # otherwise throw an error if this directory exists @@ -182,7 +190,7 @@ def _main_func(description): with Case(caseroot, read_only=False) as case: # Set values for env_case.xml - case.set_lookup_value("CASE", os.path.basename(caseroot)) + case.set_lookup_value("CASE", os.path.basename(casename)) case.set_lookup_value("CASEROOT", caseroot) case.set_lookup_value("SRCROOT", srcroot) From 5df7269b445a08af5bac062dba36453e639f5f9b Mon Sep 17 00:00:00 2001 From: Michael Deakin Date: Thu, 27 Apr 2017 23:52:00 -0600 Subject: [PATCH 162/219] Update TestCreateNewcase to test setting script-root differently than case --- scripts/tests/scripts_regression_tests.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/tests/scripts_regression_tests.py b/scripts/tests/scripts_regression_tests.py index 7dac780e46d2..150044905cea 100755 --- a/scripts/tests/scripts_regression_tests.py +++ b/scripts/tests/scripts_regression_tests.py @@ -253,7 +253,7 @@ def test_a_createnewcase(self): shutil.rmtree(testdir) cls._testdirs.append(testdir) - run_cmd_assert_result(self, "%s/create_newcase --case %s --compset X --res f19_g16 --output-root %s" % (SCRIPT_DIR, testdir, cls._testroot), from_dir=SCRIPT_DIR) + run_cmd_assert_result(self, "%s/create_newcase --case CreateNewcaseTest --script-root %s --compset X --res f19_g16 --output-root %s" % (SCRIPT_DIR, testdir, cls._testroot), from_dir=SCRIPT_DIR) run_cmd_assert_result(self, "./case.setup", from_dir=testdir) run_cmd_assert_result(self, "./case.build", from_dir=testdir) From ffe5ae083959f77491d35e57295d451121dad3c4 Mon Sep 17 00:00:00 2001 From: Erich L Foster Date: Fri, 28 Apr 2017 10:22:53 -0600 Subject: [PATCH 163/219] Added Unit Test for '+=' to _parse_namelist_group There was no unit test to check '+=' in parse_namelist_group, so I have added one. Test suite: python -m doctest CIME/namelist.py Test baseline: Test namelist changes: '+=' support Test status: bit for bit Fixes #839 User interface changes?: Added support for '+=' to user_nl_* Code review: @billsack --- scripts/lib/CIME/namelist.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/scripts/lib/CIME/namelist.py b/scripts/lib/CIME/namelist.py index 1ff121c8867b..795fce7187a4 100644 --- a/scripts/lib/CIME/namelist.py +++ b/scripts/lib/CIME/namelist.py @@ -2090,6 +2090,10 @@ def _parse_namelist_group(self): >>> x._parse_namelist_group() >>> x._settings OrderedDict([(u'foo', [u"'bar'"])]) + >>> x = _NamelistParser("&group foo='bar', foo+='baz' /", groupless=True) + >>> x._parse_namelist_group() + >>> x._settings + OrderedDict([(u'foo', [u"'bar'", u"'baz'"])]) """ group_name = self._parse_namelist_group_name() if not self._groupless: From 3038af72f989d7deb44bb5b8e356be84f13ac6e1 Mon Sep 17 00:00:00 2001 From: Chris Fischer Date: Fri, 28 Apr 2017 10:44:29 -0600 Subject: [PATCH 164/219] Update ChangeLog --- ChangeLog | 262 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 262 insertions(+) diff --git a/ChangeLog b/ChangeLog index 67b089d6b5b5..efbc7df40c25 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,5 +1,267 @@ ====================================================================== +Originator: Chris Fischer +Date: 4-28-2017 +Tag: cime5.3.0-alpha.08 +Answer Changes: [None, Round Off, Climate Changing] +Tests: scripts_regression_tests, SMS_D_Ld3.f45_g37_rx1.A.hobart_nag, + ERS_Ln9.T62_g37.DTEST.yellowstone_intel, CESM prealpha on hobart_nag +Dependencies: cime_config0.0.1.alpha.07, cism2_1_30, clm4_5_15_r235 + +Brief Summary: + - Use ESMF7 on yellowstone and cheyenne. + - Pass annual-average fields to GLC. + - Give users more info and control over key run commands. + - Moves unit testing infrastructure from tools/unit_testing to + scripts/fortran_unit_testing. + - There were tabs in create_newcase. None prevented the script from + running, but should be removed for consistency. + - nag compliler needs an additional library flag when built with debug. + - Update case members in case_setup. + - Handle the case when executable is None. + - Fix big mistake in scripts_regression_tests. + - Minor changes to xmlchange. + - Fix minor bug in test_scheduler. + - Rewrite testreporter into python. + - Print time built per model. + - Add input-dir to create_newcase. + - Get unit test build and run working with serial or parallel pFUnit. + - Add Support of true and false in Namelist. + - Remove the cprnc.out files prior to running tests. + - Fix bug where LockedFiles weren't checked if there was a dot in + filepath. + - Addition of new dwav and dlnd compsets and functionality. + - Errput may be empty string or None. + - Return aprun args but use executable string from xml. + - Add illegal char : to check_name test. + - Simplify netcdf lib args on hobart. + - Bug fix for DART in case_st_archive.py. + - Get Fortran unit tests working on cheyenne. + - Two changes to fix the Fortran unit tests that had gotten broken + recently. + - Improve create_test output by dumping case errors directly to screen. + - Fix NODEFAIL test on cheyenne. + - Fix indentation problem in case_setup. + - Allow user_mods directory in compset definition. + - Fix typo in cori-knl bind command. + - Big cs.status upgrade. + - Force user to always go through case.submit. + - Promote to warning if machine doesn't match probed machine. + - Make Machines.get_value more likely to return values of the correct + type. + - Remove conditional pio1 code. + - Fixes for PRE test. + - Set rearr opts using a pio interface. + - Bring in latest version of PIO1 to CIME. + +User interface changes: + - Moves run_tests.py to scripts/fortran_unit_testing. + - Adds PET test to cime_developer, PET behavior change. + - Output time per built component. + - config_compilers should now give MPILIB and compile_threaded + attributes for PFUNIT. + - The default mode of operation for run_tests.py is now serial. + - Other components that have unit tests (CLM and CAM) will need to + rework their top-level CMakeLists.txt file slightly. + - Do not allow ':' in testid or case name. + - Output changes for create_test. + - Significant to cs.status. + - Minor output changes to create_newcase. + - New PIO1 interface to set rearranger options. + - Signficant: new preview_run tool and new XML variables. + + +PR summary: git log --oneline --first-parent [previous_tag]..master +6d8ccb3 Merge pull request #1403 from fvitt/esmf +fe82bd6 Merge pull request #1413 from billsacks/billsacks/glc_tavg_v2 +dc29942 Merge pull request #1359 from ESMCI/jgfouca/new_query_tools +9c5b1aa Merge pull request #1428 from billsacks/move_unit_tests +c726ded Merge pull request #1435 from ESMCI/erichlf/create_newcase/tabs2spaces +8924bb3 Merge pull request #1421 from jedwards4b/hobart_nag_fix +f59dd4d Merge pull request #1414 from ESMCI/jgfouca/pet_fix_plus_coverage +67b1be7 Merge pull request #1410 from jedwards4b/executable_none_fix +0fe9e3d Merge pull request #1412 from ESMCI/jgfouca/fix_scripts_regr_test_rc +7c28d90 Merge pull request #1405 from ESMCI/jgfouca/xmlchange_append_add_space +68ab80b Merge pull request #1406 from ESMCI/jgfouca/test_sched_bugfix +da9d3b8 Merge pull request #1299 from ESMCI/fischer/testreporter_rewrite_python +f8f0fec Merge pull request #1402 from ESMCI/jgfouca/show_build_time_per_component +dc1a7ce Merge pull request #1401 from ESMCI/jgfouca/add_input_dir_to_create_newcase +962b20b Merge pull request #1396 from billsacks/compiler_from_macros +af32abd Merge pull request #1395 from ESMCI/erichlf/Build/Namelist.pm +9f5d489 Merge pull request #1394 from jedwards4b/system_test_rerun_improvement +1db632a Merge pull request #1391 from ESMCI/sarich/fix_bug_in_check_lockedfiles +5cda778 Merge pull request #1365 from ESMCI/mvertens/data_wave +2742c17 Merge pull request #1390 from ESMCI/jgfouca/fix_bug_in_errput_run_cmd +6dd85dd Merge pull request #1385 from jedwards4b/aprun_fix +4fb16a1 Merge pull request #1383 from jedwards4b/new_illegal_char +40a6017 Merge pull request #1386 from jedwards4b/hobart_netcdf_slib_fix +5a141f8 Merge pull request #1375 from ESMCI/bertinia/st_archive +06c5645 Merge pull request #1381 from billsacks/cheyenne_unit_tests +9629886 Merge pull request #1378 from jedwards4b/funit_test_fix +d79922e Merge pull request #1377 from ESMCI/jgfouca/improve_create_test_output +9d9f09c Merge pull request #1370 from ESMCI/jgfouca/minor_fix_for_cheyenne +a99eab3 Merge pull request #1373 from jedwards4b/pio_adjust_indentation_fix +ca289d9 Merge pull request #1366 from ESMCI/mvertens/usermods_in_compset +aaee38a Merge pull request #1369 from jedwards4b/cori-knlfix +d0e277d Merge pull request #1363 from ESMCI/jgfouca/big_cs_status_upgrade +dd5a30b Merge pull request #1362 from ESMCI/jgfouca/force_resubmit_to_use_submit +8282a8f Merge pull request #1361 from ESMCI/jgfouca/inform_user_of_machine +2530c66 Merge pull request #1360 from ESMCI/jgfouca/machine_get_value_type +1335a62 Merge pull request #1357 from ESMCI/jayeshkrishna/rem_cond_pio1_code +9903472 Merge pull request #1350 from gold2718/pausePOP +9f9da67 Merge pull request #1354 from ESMCI/jayeshkrishna/pio_use_set_rearr_opts +be0f9a4 Merge pull request #1352 from ESMCI/jayeshkrishna/bring_latest_pio1 + + +Modified files: git diff --name-status [previous_tag] + +M CMakeLists.txt +D README.unit_testing +M config/acme/machines/config_compilers.xml +M config/acme/machines/userdefined_laptop_template/config_compilers.xml +M config/cesm/config_grids.xml +M config/cesm/machines/config_compilers.xml +M config/cesm/machines/config_machines.xml +M config/cesm/machines/userdefined_laptop_template/config_compilers.xml +M config/config_tests.xml +M config/xml_schemas/config_compsets.xsd +M config/xml_schemas/config_grids_v2.xsd +M config/xml_schemas/config_machines.xsd +A scripts/Tools/cs.status +D scripts/Tools/cs_status +A scripts/Tools/preview_run +A scripts/Tools/testreporter.py +M scripts/Tools/xmlchange +M scripts/create_newcase +M scripts/create_test +A scripts/fortran_unit_testing/Examples/circle_area/src/CMakeLists.txt +A scripts/fortran_unit_testing/Examples/circle_area/src/circle.F90 +A scripts/fortran_unit_testing/Examples/circle_area/tests/CMakeLists.txt +A scripts/fortran_unit_testing/Examples/circle_area/tests/CTest/CMakeLists.txt +A scripts/fortran_unit_testing/Examples/circle_area/tests/CTest/test_driver.F90 +A scripts/fortran_unit_testing/Examples/circle_area/tests/pFUnit/CMakeLists.txt +A scripts/fortran_unit_testing/Examples/circle_area/tests/pFUnit/test_circle.pf +A scripts/fortran_unit_testing/Examples/interpolate_1d/src/CMakeLists.txt +A scripts/fortran_unit_testing/Examples/interpolate_1d/src/interpolate_1d.F90 +A scripts/fortran_unit_testing/Examples/interpolate_1d/tests/CMakeLists.txt +A scripts/fortran_unit_testing/Examples/interpolate_1d/tests/CTest/CMakeLists.txt +A scripts/fortran_unit_testing/Examples/interpolate_1d/tests/CTest/test_driver.F90 +A scripts/fortran_unit_testing/Examples/interpolate_1d/tests/pFUnit/CMakeLists.txt +A scripts/fortran_unit_testing/Examples/interpolate_1d/tests/pFUnit/test_interpolate_point.pf +A scripts/fortran_unit_testing/Examples/test_list.xml +A scripts/fortran_unit_testing/README +A scripts/fortran_unit_testing/python/.gitignore +A scripts/fortran_unit_testing/python/printer.py +A scripts/fortran_unit_testing/python/test_xml_test_list.py +A scripts/fortran_unit_testing/python/xml_test_list.py +A scripts/fortran_unit_testing/run_tests.py +M scripts/lib/CIME/BuildTools/cmakemacroswriter.py +M scripts/lib/CIME/BuildTools/configure.py +M scripts/lib/CIME/SystemTests/nck.py +M scripts/lib/CIME/SystemTests/nodefail.py +M scripts/lib/CIME/SystemTests/pre.py +M scripts/lib/CIME/SystemTests/system_tests_common.py +M scripts/lib/CIME/XML/compsets.py +M scripts/lib/CIME/XML/env_batch.py +M scripts/lib/CIME/XML/env_mach_specific.py +M scripts/lib/CIME/XML/machines.py +A scripts/lib/CIME/XML/test_reporter.py +M scripts/lib/CIME/aprun.py +M scripts/lib/CIME/build.py +M scripts/lib/CIME/case.py +M scripts/lib/CIME/case_run.py +M scripts/lib/CIME/case_setup.py +M scripts/lib/CIME/case_st_archive.py +M scripts/lib/CIME/case_test.py +M scripts/lib/CIME/check_lockedfiles.py +M scripts/lib/CIME/code_checker.py +M scripts/lib/CIME/nmlgen.py +M scripts/lib/CIME/test_scheduler.py +M scripts/lib/CIME/test_status.py +A scripts/lib/CIME/tests/test_user_mod_support.py +M scripts/lib/CIME/user_mod_support.py +M scripts/lib/CIME/utils.py +M scripts/lib/cs.status.template +A scripts/lib/testreporter.template +M scripts/lib/update_acme_tests.py +M scripts/tests/scripts_regression_tests.py +M src/components/data_comps/datm/cime_config/config_component.xml +M src/components/data_comps/datm/cime_config/namelist_definition_datm.xml +M src/components/data_comps/datm/datm_comp_mod.F90 +M src/components/data_comps/desp/desp_comp_mod.F90 +M src/components/data_comps/desp/esp_comp_mct.F90 +M src/components/data_comps/dice/cime_config/config_component.xml +M src/components/data_comps/dice/cime_config/namelist_definition_dice.xml +M src/components/data_comps/dice/dice_comp_mod.F90 +M src/components/data_comps/dlnd/cime_config/buildnml +M src/components/data_comps/dlnd/cime_config/config_component.xml +M src/components/data_comps/docn/cime_config/config_component.xml +M src/components/data_comps/docn/cime_config/namelist_definition_docn.xml +M src/components/data_comps/docn/docn_comp_mod.F90 +M src/components/data_comps/drof/cime_config/namelist_definition_drof.xml +M src/components/data_comps/drof/drof_comp_mod.F90 +M src/components/data_comps/dwav/cime_config/buildnml +M src/components/data_comps/dwav/cime_config/config_component.xml +M src/components/data_comps/dwav/cime_config/namelist_definition_dwav.xml +M src/components/data_comps/dwav/dwav_comp_mod.F90 +M src/drivers/mct/cime_config/buildexe +M src/drivers/mct/cime_config/buildnml +M src/drivers/mct/cime_config/config_component.xml +M src/drivers/mct/cime_config/config_component_acme.xml +M src/drivers/mct/cime_config/config_component_cesm.xml +M src/drivers/mct/cime_config/config_compsets.xml +M src/drivers/mct/cime_config/namelist_definition_drv.xml +M src/drivers/mct/main/cesm_comp_mod.F90 +M src/drivers/mct/main/prep_glc_mod.F90 +M src/drivers/mct/main/seq_io_mod.F90 +M src/drivers/mct/main/seq_rest_mod.F90 +M src/drivers/mct/shr/seq_comm_mct.F90 +M src/drivers/mct/shr/seq_infodata_mod.F90 +M src/drivers/mct/shr/seq_timemgr_mod.F90 +M src/drivers/mct/unit_test/CMakeLists.txt +M src/drivers/mct/unit_test/stubs/CMakeLists.txt +A src/drivers/mct/unit_test/stubs/seq_timemgr_mod.F90 +A src/externals/CMake/CIME_initial_setup.cmake +M src/externals/CMake/CIME_utils.cmake +M src/externals/CMake/README.md +M src/externals/CMake/pFUnit_utils.cmake +M src/externals/pio1/pio/CMakeLists.txt +M src/externals/pio1/pio/pio.F90 +M src/externals/pio1/pio/piolib_mod.F90 +M src/share/unit_test_stubs/pio/pio.F90.in +M src/share/util/shr_pio_mod.F90 +D tools/unit_testing/Examples/circle_area/src/CMakeLists.txt +D tools/unit_testing/Examples/circle_area/src/circle.F90 +D tools/unit_testing/Examples/circle_area/tests/CMakeLists.txt +D tools/unit_testing/Examples/circle_area/tests/CTest/CMakeLists.txt +D tools/unit_testing/Examples/circle_area/tests/CTest/test_driver.F90 +D tools/unit_testing/Examples/circle_area/tests/pFUnit/CMakeLists.txt +D tools/unit_testing/Examples/circle_area/tests/pFUnit/test_circle.pf +D tools/unit_testing/Examples/interpolate_1d/src/CMakeLists.txt +D tools/unit_testing/Examples/interpolate_1d/src/interpolate_1d.F90 +D tools/unit_testing/Examples/interpolate_1d/tests/CMakeLists.txt +D tools/unit_testing/Examples/interpolate_1d/tests/CTest/CMakeLists.txt +D tools/unit_testing/Examples/interpolate_1d/tests/CTest/test_driver.F90 +D tools/unit_testing/Examples/interpolate_1d/tests/pFUnit/CMakeLists.txt +D tools/unit_testing/Examples/interpolate_1d/tests/pFUnit/test_interpolate_point.pf +D tools/unit_testing/Examples/test_list.xml +D tools/unit_testing/README +D tools/unit_testing/python/.gitignore +D tools/unit_testing/python/comparable.py +D tools/unit_testing/python/environment.py +D tools/unit_testing/python/machine_setup.py +D tools/unit_testing/python/printer.py +D tools/unit_testing/python/test_environment.py +D tools/unit_testing/python/test_xml_test_list.py +D tools/unit_testing/python/test_xml_utils.py +D tools/unit_testing/python/xml_test_list.py +D tools/unit_testing/python/xml_utils.py +D tools/unit_testing/run_tests.py +M utils/perl5lib/Build/Namelist.pm +====================================================================== + +====================================================================== + Originator: Chris Fischer Date: 4-14-2017 Tag: cime5.3.0-alpha.07 From 50b30d1ebb9c7ccf716bce226684f228e7e81dd8 Mon Sep 17 00:00:00 2001 From: Erich L Foster Date: Fri, 28 Apr 2017 11:04:27 -0600 Subject: [PATCH 165/219] _parse_namelist_group was Incorrect When groupless was False the '+=' operator did not work in _parse_namelist_group. I have added a unit test for this case and changed the logic of the code to handle this case. Test suite: Test baseline: python -m doctest CIME/namelist.py Test namelist changes: '+=' support Test status: bit for bit Fixes #839 User interface changes?: '+=' support in user_nl_* Code review: @billsacks --- scripts/lib/CIME/namelist.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/scripts/lib/CIME/namelist.py b/scripts/lib/CIME/namelist.py index 795fce7187a4..1266d9e4bfd1 100644 --- a/scripts/lib/CIME/namelist.py +++ b/scripts/lib/CIME/namelist.py @@ -2094,6 +2094,10 @@ def _parse_namelist_group(self): >>> x._parse_namelist_group() >>> x._settings OrderedDict([(u'foo', [u"'bar'", u"'baz'"])]) + >>> x = _NamelistParser("&group foo='bar', foo+='baz' /") + >>> x._parse_namelist_group() + >>> x._settings + OrderedDict([(u'group', {u'foo': [u"'bar'", u"'baz'"]})]) """ group_name = self._parse_namelist_group_name() if not self._groupless: @@ -2118,7 +2122,10 @@ def _parse_namelist_group(self): group = self._settings[group_name] if name in group: dsettings = group[name] - values = merge_literal_lists(dsettings, values) + if addto: + values = group[name] + values + if not addto: + values = merge_literal_lists(dsettings, values) group[name] = values def parse_namelist(self): From ea2be8860086f7f31de9b443d55bba467d87eaa9 Mon Sep 17 00:00:00 2001 From: Erich L Foster Date: Fri, 28 Apr 2017 11:10:47 -0600 Subject: [PATCH 166/219] Added an Additional Test to _preview_namelist_group There was no test in _preview_namelist_group which test the case foo+=bar without anything being set in foo before. Test suite: python -m doctest CIME/namelist.py Test baseline: Test namelist changes: '+=' support Test status: bit for bit Fixes #839 User interface changes?: '+=' support in user_nl_* Code review: @billsacks --- scripts/lib/CIME/namelist.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/scripts/lib/CIME/namelist.py b/scripts/lib/CIME/namelist.py index 1266d9e4bfd1..06ef8684eb49 100644 --- a/scripts/lib/CIME/namelist.py +++ b/scripts/lib/CIME/namelist.py @@ -2098,6 +2098,10 @@ def _parse_namelist_group(self): >>> x._parse_namelist_group() >>> x._settings OrderedDict([(u'group', {u'foo': [u"'bar'", u"'baz'"]})]) + >>> x = _NamelistParser("&group foo+='bar' /") + >>> x._parse_namelist_group() + >>> x._settings + OrderedDict([(u'group', {u'foo': [u"'bar'"]})]) """ group_name = self._parse_namelist_group_name() if not self._groupless: From d3cf3f2330f12a21ac4e9a830b1952b390b08ea5 Mon Sep 17 00:00:00 2001 From: Bill Sacks Date: Fri, 28 Apr 2017 11:42:06 -0600 Subject: [PATCH 167/219] Add one more missing unit test --- scripts/lib/CIME/namelist.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/scripts/lib/CIME/namelist.py b/scripts/lib/CIME/namelist.py index 06ef8684eb49..b43098abbb7d 100644 --- a/scripts/lib/CIME/namelist.py +++ b/scripts/lib/CIME/namelist.py @@ -2094,6 +2094,10 @@ def _parse_namelist_group(self): >>> x._parse_namelist_group() >>> x._settings OrderedDict([(u'foo', [u"'bar'", u"'baz'"])]) + >>> x = _NamelistParser("&group foo+='bar' /", groupless=True) + >>> x._parse_namelist_group() + >>> x._settings + OrderedDict([(u'foo', [u"'bar'"])]) >>> x = _NamelistParser("&group foo='bar', foo+='baz' /") >>> x._parse_namelist_group() >>> x._settings From 035d372899e91a901f751e7fe212833333707d1f Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Fri, 28 Apr 2017 12:42:08 -0700 Subject: [PATCH 168/219] improve error message when config_files.xml is not found --- scripts/lib/CIME/XML/files.py | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/lib/CIME/XML/files.py b/scripts/lib/CIME/XML/files.py index 61b2894b579d..a66721c6dfd4 100644 --- a/scripts/lib/CIME/XML/files.py +++ b/scripts/lib/CIME/XML/files.py @@ -20,6 +20,7 @@ def __init__(self): """ cimeroot = get_cime_root() infile = os.path.join(cimeroot, "config", get_model(), "config_files.xml") + expect(os.path.isfile(infile), "Could not find or open file %s"%infile) schema = os.path.join(cimeroot, "config", "xml_schemas", "entry_id.xsd") EntryID.__init__(self, infile, schema=schema) From e36efd291d3978eef46cca38e4a31ac2409162d8 Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Fri, 28 Apr 2017 15:13:15 -0600 Subject: [PATCH 169/219] cleanup pio defaults --- scripts/Tools/case.setup | 11 +++-------- scripts/lib/CIME/SystemTests/seq.py | 4 +--- scripts/lib/CIME/case_setup.py | 25 +++---------------------- src/share/util/shr_pio_mod.F90 | 10 +++------- 4 files changed, 10 insertions(+), 40 deletions(-) diff --git a/scripts/Tools/case.setup b/scripts/Tools/case.setup index ea60b98b1daa..3aca10bb130d 100755 --- a/scripts/Tools/case.setup +++ b/scripts/Tools/case.setup @@ -45,16 +45,11 @@ OR parser.add_argument("-r", "--reset", action="store_true", help="Does a clean followed by setup") - parser.add_argument("--no-adjust-pio", action="store_true", - help="Do NOT adjust pio settings for new pelayout." - "By default if the pelayout is changed the pio layout will be adjusted." - "This option overrides that feature and leaves the pio layout as currently set.") - args = parser.parse_args(args[1:]) CIME.utils.handle_standard_logging_options(args) - return args.caseroot, args.clean, args.test_mode, args.reset, not args.no_adjust_pio + return args.caseroot, args.clean, args.test_mode, args.reset ############################################################################### def _main_func(description): @@ -63,9 +58,9 @@ def _main_func(description): test_results = doctest.testmod(verbose=True) sys.exit(1 if test_results.failed > 0 else 0) - caseroot, clean, test_mode, reset, adjust_pio = parse_command_line(sys.argv, description) + caseroot, clean, test_mode, reset = parse_command_line(sys.argv, description) with Case(caseroot, read_only=False) as case: - case_setup(case, clean=clean, test_mode=test_mode, reset=reset, adjust_pio=adjust_pio) + case_setup(case, clean=clean, test_mode=test_mode, reset=reset) if __name__ == "__main__": _main_func(__doc__) diff --git a/scripts/lib/CIME/SystemTests/seq.py b/scripts/lib/CIME/SystemTests/seq.py index c53eb12a8643..3b6fd9477a8f 100644 --- a/scripts/lib/CIME/SystemTests/seq.py +++ b/scripts/lib/CIME/SystemTests/seq.py @@ -3,7 +3,7 @@ """ from CIME.XML.standard_module_setup import * from CIME.SystemTests.system_tests_common import SystemTestsCommon -from CIME.case_setup import case_setup, adjust_pio_layout +from CIME.case_setup import case_setup from CIME.check_lockedfiles import * import shutil @@ -90,7 +90,6 @@ def run_phase(self): # update the pelayout settings for this run self._case.read_xml() - adjust_pio_layout(self._case, self._case.get_value("PES_PER_NODE")) self.run_indv() @@ -103,7 +102,6 @@ def run_phase(self): logger.info("doing a second %d %s test with rootpes set to zero" % (stop_n, stop_option)) # update the pelayout settings for this run self._case.read_xml() - adjust_pio_layout(self._case, self._case.get_value("PES_PER_NODE")) self.run_indv(suffix="seq") self._component_compare_test("base", "seq") diff --git a/scripts/lib/CIME/case_setup.py b/scripts/lib/CIME/case_setup.py index 4de5b93277f6..d6313c551fd5 100644 --- a/scripts/lib/CIME/case_setup.py +++ b/scripts/lib/CIME/case_setup.py @@ -59,7 +59,7 @@ def _build_usernl_files(case, model, comp): shutil.copy(model_nl, nlfile) ############################################################################### -def _case_setup_impl(case, caseroot, clean=False, test_mode=False, reset=False, adjust_pio=True): +def _case_setup_impl(case, caseroot, clean=False, test_mode=False, reset=False): ############################################################################### os.chdir(caseroot) @@ -163,8 +163,6 @@ def _case_setup_impl(case, caseroot, clean=False, test_mode=False, reset=False, # Make sure pio settings are consistent tasks_per_node = env_mach_pes.get_tasks_per_node(pestot, thread_count) - if adjust_pio: - adjust_pio_layout(case, tasks_per_node) case.initialize_derived_attributes() @@ -221,29 +219,12 @@ def _case_setup_impl(case, caseroot, clean=False, test_mode=False, reset=False, env_module.make_env_mach_specific_file(compiler, debug, mpilib, "csh") env_module.save_all_env_info("software_environment.txt") -def adjust_pio_layout(case, new_pio_stride): - - models = case.get_values("COMP_CLASSES") - for comp in models: - pio_stride = case.get_value("PIO_STRIDE_%s"%comp) - pio_numtasks = case.get_value("PIO_NUMTASKS_%s"%comp) - ntasks = case.get_value("NTASKS_%s"%comp) - new_stride = min(ntasks, new_pio_stride) - new_numtasks = max(1, ntasks//new_stride) - if pio_stride != new_stride: - logger.info("Resetting PIO_STRIDE_%s to %s"%(comp, new_stride)) - case.set_value("PIO_STRIDE_%s"%comp, new_stride) - if pio_numtasks != new_numtasks: - logger.info("Resetting PIO_NUMTASKS_%s to %s"%(comp, new_numtasks)) - case.set_value("PIO_NUMTASKS_%s"%comp, new_numtasks) - - ############################################################################### -def case_setup(case, clean=False, test_mode=False, reset=False, adjust_pio=True): +def case_setup(case, clean=False, test_mode=False, reset=False): ############################################################################### caseroot, casebaseid = case.get_value("CASEROOT"), case.get_value("CASEBASEID") phase = "setup.clean" if clean else "case.setup" - functor = lambda: _case_setup_impl(case, caseroot, clean, test_mode, reset, adjust_pio) + functor = lambda: _case_setup_impl(case, caseroot, clean, test_mode, reset) if case.get_value("TEST") and not test_mode: test_name = casebaseid if casebaseid is not None else case.get_value("CASE") diff --git a/src/share/util/shr_pio_mod.F90 b/src/share/util/shr_pio_mod.F90 index d4aa4b0dcff5..27894e9e73b6 100644 --- a/src/share/util/shr_pio_mod.F90 +++ b/src/share/util/shr_pio_mod.F90 @@ -394,9 +394,9 @@ subroutine shr_pio_read_default_namelist(nlfilename, Comm, pio_stride, pio_root, integer :: iam, ierr, npes, unitn logical :: iamroot - namelist /pio_default_inparm/ pio_stride, pio_root, pio_numiotasks, & - pio_typename, pio_async_interface, pio_debug_level, pio_blocksize, & - pio_buffer_size_limit, pio_rearranger, & + namelist /pio_default_inparm/ & + pio_async_interface, pio_debug_level, pio_blocksize, & + pio_buffer_size_limit, & pio_rearr_comm_type, pio_rearr_comm_fcd, & pio_rearr_comm_max_pend_req_comp2io, pio_rearr_comm_enable_hs_comp2io, & pio_rearr_comm_enable_isend_comp2io, & @@ -437,10 +437,6 @@ subroutine shr_pio_read_default_namelist(nlfilename, Comm, pio_stride, pio_root, pio_rearr_comm_enable_hs_io2comp = .true. pio_rearr_comm_enable_isend_io2comp = .false. - - - - if(iamroot) then unitn=shr_file_getunit() open( unitn, file=trim(nlfilename), status='old' , iostat=ierr) From c39a54c66594d006819cec20aa0583a5114176eb Mon Sep 17 00:00:00 2001 From: John Truesdale Date: Fri, 28 Apr 2017 15:24:14 -0600 Subject: [PATCH 170/219] add new gx1 degree maps --- config/cesm/config_grids.xml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/config/cesm/config_grids.xml b/config/cesm/config_grids.xml index a709e2d5338f..ab022a0580bb 100644 --- a/config/cesm/config_grids.xml +++ b/config/cesm/config_grids.xml @@ -1628,16 +1628,16 @@ - cpl/gridmaps/rx1/map_rx1_to_gx3v7_e1000r500_161214.nc - cpl/gridmaps/rx1/map_rx1_to_gx3v7_e1000r500_161214.nc + cpl/gridmaps/rx1/map_rx1_to_gx3v7_nnsm_e1000r500_170413.nc + cpl/gridmaps/rx1/map_rx1_to_gx3v7_nnsm_e1000r500_170413.nc cpl/gridmaps/rx1/map_rx1_to_gx1v6_nn_ac_161213.nc cpl/gridmaps/rx1/map_rx1_to_gx1v6_e1000r300_161212.nc - cpl/gridmaps/rx1/map_rx1_to_gx1v7_nn_ac_161213.nc - cpl/gridmaps/rx1/map_rx1_to_gx1v7_e1000r300_161213.nc + cpl/gridmaps/rx1/map_rx1_to_gx1v7_nn_open_ocean_nnsm_e1000r300_marginal_sea_170413.nc + cpl/gridmaps/rx1/map_rx1_to_gx1v7_nnsm_e1000r300_170413.nc cpl/gridmaps/rx1/map_rx1_to_tx1v1_e1000r300_161214.nc @@ -1653,16 +1653,16 @@ - cpl/gridmaps/r05/map_r05_to_gx3v7_e1000r500_161214.nc - cpl/gridmaps/r05/map_r05_to_gx3v7_e1000r500_161214.nc + cpl/gridmaps/r05/map_r05_to_gx3v7_nnsm_e1000r500_170413.nc + cpl/gridmaps/r05/map_r05_to_gx3v7_nnsm_e1000r500_170413.nc cpl/gridmaps/r05/map_r05_to_gx1v6_nn_ac_161214.nc cpl/gridmaps/r05/map_r05_to_gx1v6_e1000r300_161212.nc - cpl/gridmaps/r05/map_r05_to_gx1v7_nn_ac_161213.nc - cpl/gridmaps/r05/map_r05_to_gx1v7_e1000r300_161213.nc + cpl/gridmaps/r05/map_r05_to_gx1v7_nn_open_ocean_nnsm_e1000r300_marginal_sea_170413.nc + cpl/gridmaps/r05/map_r05_to_gx1v7_nnsm_e1000r300_170413.nc cpl/gridmaps/r05/map_r05_to_tx1v1_e1000r500_161214.nc From 6d1aef12c3526763fe9fd0cc38d88759c4b39e9a Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Fri, 28 Apr 2017 15:56:21 -0600 Subject: [PATCH 171/219] fix pylint issues --- scripts/Tools/preview_namelists | 1 - scripts/lib/CIME/XML/env_batch.py | 5 +---- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/scripts/Tools/preview_namelists b/scripts/Tools/preview_namelists index da9d33b43c0b..2d3f8efcbf01 100755 --- a/scripts/Tools/preview_namelists +++ b/scripts/Tools/preview_namelists @@ -17,7 +17,6 @@ import argparse, doctest ############################################################################### def parse_command_line(args, description): ############################################################################### - cime_model = CIME.utils.get_model() parser = argparse.ArgumentParser(description=description) CIME.utils.setup_standard_logging_options(parser) diff --git a/scripts/lib/CIME/XML/env_batch.py b/scripts/lib/CIME/XML/env_batch.py index 724c48143f92..68ba5fde7b21 100644 --- a/scripts/lib/CIME/XML/env_batch.py +++ b/scripts/lib/CIME/XML/env_batch.py @@ -296,12 +296,9 @@ def submit_jobs(self, case, no_batch=False, job=None, batch_args=None, dry_run=F continue try: prereq = self.get_value('prereq', subgroup=job, resolved=False) - if prereq is None or job == firstjob: + if prereq is None or job == firstjob or (dry_run and prereq == "$BUILD_COMPLETE"): prereq = True else: - if dry_run: - # Assume build is complete - prereq = prereq.replace("$BUILD_COMPLETE", "True") prereq = case.get_resolved_value(prereq) prereq = eval(prereq) except: From 65aa0774452fdd640cfd9b609378cdca475529b6 Mon Sep 17 00:00:00 2001 From: Bill Sacks Date: Fri, 28 Apr 2017 20:30:09 -0600 Subject: [PATCH 172/219] Pick up multi-instance cprnc.out files in summarize_cprnc_diffs --- tools/cprnc/summarize_cprnc_diffs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/tools/cprnc/summarize_cprnc_diffs b/tools/cprnc/summarize_cprnc_diffs index 1d6b5cf8d224..8adaf87b75fd 100755 --- a/tools/cprnc/summarize_cprnc_diffs +++ b/tools/cprnc/summarize_cprnc_diffs @@ -105,7 +105,10 @@ SYNOPSIS it looks for files of the form *.nc.cprnc.out.SUFFIX. (With this naming convention [i.e., looking for files of the form *.nc.cprnc.out], note that it only looks at output for baseline comparisons - NOT output from the test - itself, such as cprnc output files from the exact restart test.) + itself, such as cprnc output files from the exact restart test.) (Actually, + we also allow for files of the form *.nc_[0-9][0-9][0-9][0-9].cprnc.out, + such as *.nc_0001.cprnc.out and *.nc_0002.cprnc.out, to pick up + multi-instance files.) Summaries of cprnc differences (RMS and normalized RMS differences, FILLDIFFs and DIMSIZEDIFFs) are placed in three output files beginning with the name 'cprnc.summary', in @@ -148,10 +151,10 @@ sub process_cprnc_output { my @cprnc_files; if ($output_suffix) { - @cprnc_files = glob "${test_dir}/run/*.nc.cprnc.out.${output_suffix}"; + @cprnc_files = glob "${test_dir}/run/*.nc.cprnc.out.${output_suffix} ${test_dir}/run/*.nc_[0-9][0-9][0-9][0-9].cprnc.out.${output_suffix}"; } else { - @cprnc_files = glob "${test_dir}/run/*.nc.cprnc.out"; + @cprnc_files = glob "${test_dir}/run/*.nc.cprnc.out ${test_dir}/run/*.nc_[0-9][0-9][0-9][0-9].cprnc.out"; } foreach my $cprnc_file (@cprnc_files) { From 65d2b11a21b04295b4ba53d335244e0fd8bcfe1f Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Sun, 30 Apr 2017 14:05:34 -0600 Subject: [PATCH 173/219] fix erp test build --- scripts/lib/CIME/SystemTests/erp.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/lib/CIME/SystemTests/erp.py b/scripts/lib/CIME/SystemTests/erp.py index 9bade606e290..851b3a676b9b 100644 --- a/scripts/lib/CIME/SystemTests/erp.py +++ b/scripts/lib/CIME/SystemTests/erp.py @@ -56,9 +56,9 @@ def build_phase(self, sharedlib_only=False, model_only=False): # The reason we currently need two executables that CESM-CICE has a compile time decomposition # For cases where ERP works, changing this decomposition will not affect answers, but it will # affect the executable that is used - self._case.set_value("SMP_BUILD","0") for bld in range(1,3): logging.warn("Starting bld %s"%bld) + self._case.set_value("BUILD_THREADED",True) if (bld == 2): # halve the number of tasks and threads From 8cdfc543573649e6120856490fe935a74d25bba6 Mon Sep 17 00:00:00 2001 From: Chris Fischer Date: Sun, 30 Apr 2017 19:48:24 -0600 Subject: [PATCH 174/219] Update ChangeLog --- ChangeLog | 44 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/ChangeLog b/ChangeLog index efbc7df40c25..89037b262716 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,47 @@ + +====================================================================== + +Originator: Chris Fischer +Date: 4-30-2017 +Tag: cime5.3.0-alpha.09 +Answer Changes: None +Tests: ERP_Ln9.f09_f09.F1850_DONOTUSE.yellowstone_intel.cam-outfrq9s_clm5 + scripts_regression_tests.py --fast, hand test xmlchange and xmlquery +Dependencies: + +Brief Summary: + - Fix erp test build. + - Add Support for '+=' in Namelists. + - Add --component Option to preview_namelist. + - Add --file option to xmlquery and enable option in xmlchange. + +User interface changes: + - Added support for '+=' to namelist parser. + - Added --component option to preview_namelist. + - New --file option in xmlquery. + +PR summary: git log --oneline --first-parent [previous_tag]..master +6d8b24b Merge pull request #1444 from jedwards4b/erp_test_fix +84b7bde Merge pull request #1389 from ESMCI/erichlf/namelist/plusEqual +45af658 Merge pull request #1436 from ESMCI/erichlf/preview_namelists/component +c4d4437 Merge pull request #1419 from jedwards4b/xmlcq_fileoption + + +Modified files: git diff --name-status [previous_tag] + +M scripts/Tools/preview_namelists +M scripts/Tools/xmlchange +M scripts/Tools/xmlquery +M scripts/lib/CIME/SystemTests/erp.py +M scripts/lib/CIME/XML/env_mach_specific.py +M scripts/lib/CIME/XML/env_test.py +M scripts/lib/CIME/XML/generic_xml.py +M scripts/lib/CIME/case.py +M scripts/lib/CIME/namelist.py +M scripts/lib/CIME/preview_namelists.py + +====================================================================== + ====================================================================== Originator: Chris Fischer From f694dadc6b37575a521873fdcd4ab1e0874b9559 Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Mon, 1 May 2017 07:53:34 -0600 Subject: [PATCH 175/219] set build threaded before shared lib build --- scripts/lib/CIME/SystemTests/erp.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/scripts/lib/CIME/SystemTests/erp.py b/scripts/lib/CIME/SystemTests/erp.py index 851b3a676b9b..5e0dc748b453 100644 --- a/scripts/lib/CIME/SystemTests/erp.py +++ b/scripts/lib/CIME/SystemTests/erp.py @@ -31,6 +31,7 @@ def build_phase(self, sharedlib_only=False, model_only=False): and tasks. This test will fail for components (e.g. pop) that do not reproduce exactly with different numbers of mpi tasks. """ + self._case.set_value("BUILD_THREADED",True) if sharedlib_only: return self.build_indv(sharedlib_only=sharedlib_only, model_only=model_only) @@ -58,7 +59,6 @@ def build_phase(self, sharedlib_only=False, model_only=False): # affect the executable that is used for bld in range(1,3): logging.warn("Starting bld %s"%bld) - self._case.set_value("BUILD_THREADED",True) if (bld == 2): # halve the number of tasks and threads @@ -79,7 +79,6 @@ def build_phase(self, sharedlib_only=False, model_only=False): case_setup(self._case, test_mode=True, reset=True) # Now rebuild the system, given updated information in env_build.xml - self.build_indv(sharedlib_only=sharedlib_only, model_only=model_only) shutil.move("%s/%s.exe"%(exeroot,cime_model), "%s/%s.ERP%s.exe"%(exeroot,cime_model,bld)) From 5dcb9acfc981677047473e4926f0f227c47c9b50 Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Mon, 1 May 2017 08:45:56 -0600 Subject: [PATCH 176/219] changes needed for nag to use erp test --- config/cesm/machines/config_compilers.xml | 5 ----- scripts/lib/CIME/case_setup.py | 2 -- 2 files changed, 7 deletions(-) diff --git a/config/cesm/machines/config_compilers.xml b/config/cesm/machines/config_compilers.xml index e33594c56ef2..0bfcde185360 100644 --- a/config/cesm/machines/config_compilers.xml +++ b/config/cesm/machines/config_compilers.xml @@ -283,7 +283,6 @@ using a fortran linker. -C=all -g -time -f2003 -ieee=stop -gline - -openmp -g -time -f2003 -ieee=stop -gline - -openmp -fixed @@ -307,9 +305,6 @@ using a fortran linker. -free FALSE - - -openmp - mpicc mpif90 gcc diff --git a/scripts/lib/CIME/case_setup.py b/scripts/lib/CIME/case_setup.py index 4de5b93277f6..2de2b75a543c 100644 --- a/scripts/lib/CIME/case_setup.py +++ b/scripts/lib/CIME/case_setup.py @@ -156,8 +156,6 @@ def _case_setup_impl(case, caseroot, clean=False, test_mode=False, reset=False, case.set_value("TOTALPES", pestot) thread_count = env_mach_pes.get_max_thread_count(models) build_threaded = case.get_build_threaded() - expect(not (build_threaded and compiler == "nag"), - "it is not possible to run with OpenMP if using the NAG Fortran compiler") cost_pes = env_mach_pes.get_cost_pes(pestot, thread_count, machine=case.get_value("MACH")) case.set_value("COST_PES", cost_pes) From fe9b2ea28aa91fc54118f9b9af5a40606ee8ee14 Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Mon, 1 May 2017 08:59:06 -0600 Subject: [PATCH 177/219] fix pylint issues --- scripts/Tools/preview_namelists | 1 - scripts/lib/CIME/namelist.py | 1 - 2 files changed, 2 deletions(-) diff --git a/scripts/Tools/preview_namelists b/scripts/Tools/preview_namelists index da9d33b43c0b..2d3f8efcbf01 100755 --- a/scripts/Tools/preview_namelists +++ b/scripts/Tools/preview_namelists @@ -17,7 +17,6 @@ import argparse, doctest ############################################################################### def parse_command_line(args, description): ############################################################################### - cime_model = CIME.utils.get_model() parser = argparse.ArgumentParser(description=description) CIME.utils.setup_standard_logging_options(parser) diff --git a/scripts/lib/CIME/namelist.py b/scripts/lib/CIME/namelist.py index b43098abbb7d..5e2ed18ab4db 100644 --- a/scripts/lib/CIME/namelist.py +++ b/scripts/lib/CIME/namelist.py @@ -1006,7 +1006,6 @@ def set_variable_value(self, group_name, variable_name, value, var_size=1): group_name = group_name.lower() minindex, maxindex, step = get_fortran_variable_indices(variable_name, var_size) - original_var = variable_name variable_name = get_fortran_name_only(variable_name.lower()) expect(minindex > 0, "Indices < 1 not supported in CIME interface to fortran namelists... lower bound=%s"%minindex) From 8ae6c5a4ea813320ea14e39eb1245fb30eefef1b Mon Sep 17 00:00:00 2001 From: James Foucar Date: Mon, 1 May 2017 10:43:11 -0600 Subject: [PATCH 178/219] Fix pylint error --- scripts/lib/CIME/namelist.py | 1 - 1 file changed, 1 deletion(-) diff --git a/scripts/lib/CIME/namelist.py b/scripts/lib/CIME/namelist.py index b43098abbb7d..5e2ed18ab4db 100644 --- a/scripts/lib/CIME/namelist.py +++ b/scripts/lib/CIME/namelist.py @@ -1006,7 +1006,6 @@ def set_variable_value(self, group_name, variable_name, value, var_size=1): group_name = group_name.lower() minindex, maxindex, step = get_fortran_variable_indices(variable_name, var_size) - original_var = variable_name variable_name = get_fortran_name_only(variable_name.lower()) expect(minindex > 0, "Indices < 1 not supported in CIME interface to fortran namelists... lower bound=%s"%minindex) From ae2a5ca07669e22274599ac366f6632f10e3b5ef Mon Sep 17 00:00:00 2001 From: Andy Salinger Date: Wed, 12 Apr 2017 15:39:58 -0600 Subject: [PATCH 179/219] Add Sandia Corporation to CIME LICENSE.TXT Per earlier discussions, DOE labs whose ACME staff contributed to CIME 5 are going to assert their copyright as co-authors. Sandia copyright came through. I expect Argonne to follow at some point. I just added Sandia alongside UCAR in the main LICENSE.TXT file. The specific BSD license was fine, so I did not repeat it, just added Sandia banner after UCAR. --- LICENSE.TXT | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/LICENSE.TXT b/LICENSE.TXT index d832be2be936..6d0ae1a9a2e4 100644 --- a/LICENSE.TXT +++ b/LICENSE.TXT @@ -1,9 +1,15 @@ Copyright (c) 2015, University Corporation for Atmospheric Research (UCAR) All rights reserved. + and +Copyright (c) 2017, Sandia Corporation. +Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, +the U.S. Government retains certain rights in this software. Developed by: University Corporation for Atmospheric Research - National Center for Atmospheric Research https://www2.cesm.ucar.edu/working-groups/sewg + and + DOE BER ACME project team members, including those at SNL and ANL Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), @@ -17,7 +23,7 @@ the Software is furnished to do so, subject to the following conditions: - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimers in the documentation and/or other materials provided with the distribution. - - Neither the names of [Name of Development Group, UCAR], + - Neither the names of UCAR or Sandia Corporation, nor the names of its contributors may be used to endorse or promote products derived from this Software without specific prior written permission. From b254a24b50489ea923cb2d663bfaa8a4134c2454 Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Mon, 1 May 2017 10:29:18 -0600 Subject: [PATCH 180/219] update to version pio2.2.1 --- src/externals/pio2/CMakeLists.txt | 63 +- src/externals/pio2/CTestScript.cmake | 25 +- .../CTestEnvironment-anlworkstation.cmake | 5 + .../pio2/ctest/CTestEnvironment-nwsc.cmake | 2 +- .../pio2/ctest/runcdash-cgd-gnu-openmpi.sh | 38 + src/externals/pio2/ctest/runcdash-cgd-nag.sh | 2 +- .../ctest/runcdash-nwsc-intel-mpiserial.sh | 2 +- .../pio2/ctest/runcdash-nwscla-intel.sh | 10 +- src/externals/pio2/ctest/runctest-cgd.sh | 2 +- src/externals/pio2/ctest/runctest-nwscla.sh | 19 +- .../pio2/doc/source/contributing_code.txt | 2 +- src/externals/pio2/examples/c/CMakeLists.txt | 10 + src/externals/pio2/examples/c/darray_async.c | 387 ++++ .../pio2/examples/c/darray_no_async.c | 358 ++++ src/externals/pio2/src/clib/CMakeLists.txt | 13 +- src/externals/pio2/src/clib/bget.c | 8 +- src/externals/pio2/src/clib/config.h.in | 3 + src/externals/pio2/src/clib/pio.h | 111 +- src/externals/pio2/src/clib/pio_darray.c | 484 ++--- src/externals/pio2/src/clib/pio_darray_int.c | 619 ++++--- src/externals/pio2/src/clib/pio_file.c | 18 +- src/externals/pio2/src/clib/pio_get_nc.c | 26 +- src/externals/pio2/src/clib/pio_getput_int.c | 517 +++--- src/externals/pio2/src/clib/pio_internal.h | 96 +- src/externals/pio2/src/clib/pio_lists.c | 6 + src/externals/pio2/src/clib/pio_msg.c | 117 +- src/externals/pio2/src/clib/pio_nc.c | 87 +- src/externals/pio2/src/clib/pio_nc4.c | 24 +- src/externals/pio2/src/clib/pio_put_nc.c | 26 +- src/externals/pio2/src/clib/pio_rearrange.c | 1599 ++++++++++------- src/externals/pio2/src/clib/pio_spmd.c | 63 +- src/externals/pio2/src/clib/pio_varm.c | 78 +- src/externals/pio2/src/clib/pioc.c | 602 ++++--- src/externals/pio2/src/clib/pioc_sc.c | 219 ++- src/externals/pio2/src/clib/pioc_support.c | 444 ++--- src/externals/pio2/tests/cunit/CMakeLists.txt | 32 +- src/externals/pio2/tests/cunit/pio_tests.h | 3 + .../pio2/tests/cunit/test_async_2comp.c | 132 -- .../pio2/tests/cunit/test_async_3proc.c | 6 +- .../pio2/tests/cunit/test_async_4proc.c | 11 +- .../pio2/tests/cunit/test_async_simple.c | 26 +- .../pio2/tests/cunit/test_darray_1d.c | 303 +++- .../pio2/tests/cunit/test_darray_3d.c | 5 +- .../pio2/tests/cunit/test_darray_multi.c | 473 +++++ .../pio2/tests/cunit/test_darray_multivar.c | 407 +++-- .../pio2/tests/cunit/test_darray_multivar2.c | 287 +++ .../pio2/tests/cunit/test_decomp_uneven.c | 380 ++++ src/externals/pio2/tests/cunit/test_decomps.c | 3 +- .../pio2/tests/cunit/test_intercomm2.c | 9 +- .../pio2/tests/cunit/test_iosystem2_simple.c | 2 + src/externals/pio2/tests/cunit/test_pioc.c | 403 ++++- .../pio2/tests/cunit/test_pioc_fill.c | 6 +- src/externals/pio2/tests/cunit/test_rearr.c | 1542 ++++++++++++++++ src/externals/pio2/tests/cunit/test_shared.c | 17 +- src/externals/pio2/tests/cunit/test_spmd.c | 381 ++-- .../pio2/tests/general/ncdf_get_put.F90.in | 300 ++++ .../general/pio_decomp_frame_tests.F90.in | 193 ++ .../pio2/tests/general/util/pio_tutil.F90 | 2 +- 58 files changed, 8096 insertions(+), 2912 deletions(-) create mode 100755 src/externals/pio2/ctest/runcdash-cgd-gnu-openmpi.sh create mode 100644 src/externals/pio2/examples/c/darray_async.c create mode 100644 src/externals/pio2/examples/c/darray_no_async.c delete mode 100644 src/externals/pio2/tests/cunit/test_async_2comp.c create mode 100644 src/externals/pio2/tests/cunit/test_darray_multi.c create mode 100644 src/externals/pio2/tests/cunit/test_darray_multivar2.c create mode 100644 src/externals/pio2/tests/cunit/test_decomp_uneven.c create mode 100644 src/externals/pio2/tests/cunit/test_rearr.c diff --git a/src/externals/pio2/CMakeLists.txt b/src/externals/pio2/CMakeLists.txt index 6677ab46eb09..514384a5a9e6 100644 --- a/src/externals/pio2/CMakeLists.txt +++ b/src/externals/pio2/CMakeLists.txt @@ -46,29 +46,6 @@ else() set(USE_MPI_SERIAL 0) endif() -#===== Library Variables ===== -set (PIO_FILESYSTEM_HINTS IGNORE CACHE STRING "Filesystem hints (lustre or gpfs)") - -#===== Testing Options ===== -option (PIO_ENABLE_TESTS "Enable the testing builds" ON) -option (PIO_VALGRIND_CHECK "Enable memory leak check using valgrind" OFF) - -#============================================================================== -# BACKWARDS COMPATIBILITY -#============================================================================== - -# Old NETCDF_DIR variable --> NetCDF_PATH -if (DEFINED NETCDF_DIR) - set (NetCDF_PATH ${NETCDF_DIR} - CACHE STRING "Location of the NetCDF library installation") -endif () - -# Old PNETCDF_DIR variable --> PnetCDF_PATH -if (DEFINED PNETCDF_DIR) - set (PnetCDF_PATH ${PNETCDF_DIR} - CACHE STRING "Location of the PnetCDF library installation") -endif () - #============================================================================== # PREPEND TO CMAKE MODULE PATH #============================================================================== @@ -97,6 +74,46 @@ set (USER_CMAKE_MODULE_PATH ${USER_CMAKE_MODULE_PATH} CACHE STRING "Location of the CMake_Fortran_utils") list (APPEND CMAKE_MODULE_PATH ${USER_CMAKE_MODULE_PATH}) +INCLUDE (CheckTypeSize) + +#===== MPI ===== +if (PIO_USE_MPISERIAL) + find_package (MPISERIAL COMPONENTS C REQUIRED) + if (MPISERIAL_C_FOUND) + set (CMAKE_REQUIRED_INCLUDES ${MPISERIAL_C_INCLUDE_DIRS}) + endif () +else () + find_package (MPI REQUIRED) + set (CMAKE_REQUIRED_INCLUDES ${MPI_INCLUDE_PATH}) +endif () + +SET(CMAKE_EXTRA_INCLUDE_FILES "mpi.h") +check_type_size("MPI_Offset" SIZEOF_MPI_OFFSET) +SET(CMAKE_EXTRA_INCLUDE_FILES) + +#===== Library Variables ===== +set (PIO_FILESYSTEM_HINTS IGNORE CACHE STRING "Filesystem hints (lustre or gpfs)") + +#===== Testing Options ===== +option (PIO_ENABLE_TESTS "Enable the testing builds" ON) +option (PIO_VALGRIND_CHECK "Enable memory leak check using valgrind" OFF) + +#============================================================================== +# BACKWARDS COMPATIBILITY +#============================================================================== + +# Old NETCDF_DIR variable --> NetCDF_PATH +if (DEFINED NETCDF_DIR) + set (NetCDF_PATH ${NETCDF_DIR} + CACHE STRING "Location of the NetCDF library installation") +endif () + +# Old PNETCDF_DIR variable --> PnetCDF_PATH +if (DEFINED PNETCDF_DIR) + set (PnetCDF_PATH ${PNETCDF_DIR} + CACHE STRING "Location of the PnetCDF library installation") +endif () + #============================================================================== # HELPFUL GLOBAL VARIABLES #============================================================================== diff --git a/src/externals/pio2/CTestScript.cmake b/src/externals/pio2/CTestScript.cmake index 1d0de3ccdcdd..e817f422d158 100644 --- a/src/externals/pio2/CTestScript.cmake +++ b/src/externals/pio2/CTestScript.cmake @@ -18,7 +18,7 @@ else () set (CTEST_DASHBOARD_ROOT "$ENV{HOME}/pio-dashboard") endif () -## -- Compiler ID +## -- Compiler ID if (DEFINED ENV{PIO_COMPILER_ID}) set (compid "$ENV{PIO_COMPILER_ID}") else () @@ -49,7 +49,8 @@ if (HOSTNAME MATCHES "^yslogin" OR HOSTNAME MATCHES "^pronghorn") set (HOSTNAME_ID "nwsc") # New UCAR/NWSC SGI Machines -elseif (HOSTNAME MATCHES "^laramie") +elseif (HOSTNAME MATCHES "^laramie" OR + HOSTNAME MATCHES "^chadmin") set (HOSTNAME_ID "nwscla") # ALCF/Argonne Machines elseif (HOSTNAME MATCHES "^mira" OR @@ -109,9 +110,9 @@ find_program (CTEST_GIT_COMMAND NAMES git) ## -- make command find_program (MAKE NAMES make) -#----------------------------------------------------------- +#----------------------------------------------------------- #-- Generate build-specific information -#----------------------------------------------------------- +#----------------------------------------------------------- ## -- CTest Site Name @@ -124,25 +125,25 @@ set (CTEST_BUILD_NAME "${osname}-${osrel}-${cpu}-${compid}") ## -- SRC Dir (where this script exists) set (CTEST_SOURCE_DIRECTORY "${CTEST_SCRIPT_DIRECTORY}") -## -- BIN Dir +## -- BIN Dir set (CTEST_BINARY_DIRECTORY "${CTEST_DASHBOARD_ROOT}/build-${CTEST_BUILD_NAME}-${CTEST_BUILD_GROUP}") ## -- Add the CTest script directory to the module path set (CTEST_EXTRA_SCRIPT_PATH "${CTEST_SOURCE_DIRECTORY}/ctest") list (APPEND CMAKE_MODULE_PATH ${CTEST_EXTRA_SCRIPT_PATH}) -# ----------------------------------------------------------- +# ----------------------------------------------------------- # -- Store Build-Specific Info (environment variables) -# ----------------------------------------------------------- +# ----------------------------------------------------------- set (ENV{PIO_DASHBOARD_SITE} ${CTEST_SITE}) set (ENV{PIO_DASHBOARD_BUILD_NAME} ${CTEST_BUILD_NAME}) set (ENV{PIO_DASHBOARD_SOURCE_DIR} ${CTEST_SOURCE_DIRECTORY}) set (ENV{PIO_DASHBOARD_BINARY_DIR} ${CTEST_BINARY_DIRECTORY}) -# ----------------------------------------------------------- +# ----------------------------------------------------------- # -- Run CTest -# ----------------------------------------------------------- +# ----------------------------------------------------------- ## -- Empty the binary directory ctest_empty_binary_directory(${CTEST_BINARY_DIRECTORY}) @@ -157,7 +158,7 @@ message (" -- Update source - ${CTEST_BUILD_NAME} --") set (CTEST_UPDATE_COMMAND "${CTEST_GIT_COMMAND}") ctest_update () -## -- Configure +## -- Configure message (" -- Configure build - ${CTEST_BUILD_NAME} --") include (CTestEnvironment-${HOSTNAME_ID}) set (CTEST_CONFIGURE_COMMAND "${CMAKE_COMMAND} ${CTEST_CONFIGURE_OPTIONS} ${CTEST_SOURCE_DIRECTORY}") @@ -179,9 +180,9 @@ message (" -- Submit to dashboard - ${CTEST_BUILD_NAME} --") message ("** -- PIO_DASHBOARD_SITE=$ENV{PIO_DASHBOARD_SITE}") ctest_submit () -# ----------------------------------------------------------- +# ----------------------------------------------------------- # -- Clear environment -# ----------------------------------------------------------- +# ----------------------------------------------------------- unset (ENV{PIO_DASHBOARD_SITE}) unset (ENV{PIO_DASHBOARD_BUILD_NAME}) diff --git a/src/externals/pio2/ctest/CTestEnvironment-anlworkstation.cmake b/src/externals/pio2/ctest/CTestEnvironment-anlworkstation.cmake index 07ba92a2a726..ddf04f063a6e 100644 --- a/src/externals/pio2/ctest/CTestEnvironment-anlworkstation.cmake +++ b/src/externals/pio2/ctest/CTestEnvironment-anlworkstation.cmake @@ -23,3 +23,8 @@ endif () if (DEFINED ENV{VALGRIND_CHECK}) set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DPIO_VALGRIND_CHECK=ON") endif () + +# If USE_MALLOC environment variable is set, then use native malloc (instead of bget package) +if (DEFINED ENV{USE_MALLOC}) + set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DPIO_USE_MALLOC=ON") +endif () diff --git a/src/externals/pio2/ctest/CTestEnvironment-nwsc.cmake b/src/externals/pio2/ctest/CTestEnvironment-nwsc.cmake index 356390f933d6..4a0d6fd3acd5 100644 --- a/src/externals/pio2/ctest/CTestEnvironment-nwsc.cmake +++ b/src/externals/pio2/ctest/CTestEnvironment-nwsc.cmake @@ -14,5 +14,5 @@ set (CTEST_CONFIGURE_OPTIONS "-DCMAKE_VERBOSE_MAKEFILE=TRUE -DPIO_ENABLE_DOC=OFF # If MPISERIAL environment variable is set, then enable MPISERIAL if (DEFINED ENV{MPISERIAL}) - set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DPIO_USE_MPISERIAL=ON") + set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DPIO_USE_MPISERIAL=ON -DPIO_ENABLE_EXAMPLES=OFF ") endif () diff --git a/src/externals/pio2/ctest/runcdash-cgd-gnu-openmpi.sh b/src/externals/pio2/ctest/runcdash-cgd-gnu-openmpi.sh new file mode 100755 index 000000000000..56407d9c4251 --- /dev/null +++ b/src/externals/pio2/ctest/runcdash-cgd-gnu-openmpi.sh @@ -0,0 +1,38 @@ +#!/bin/sh + +# Get/Generate the Dashboard Model +if [ $# -eq 0 ]; then + model=Experimental +else + model=$1 +fi + +module purge +module load compiler/gnu/5.4.0 +module load tool/parallel-netcdf/1.8.1/gnu-5.4.0/openmpi + +export CC=mpicc +export FC=mpif90 +export PIO_DASHBOARD_SITE="cgd" +export PIO_DASHBOARD_ROOT=/scratch/cluster/jedwards/dashboard +export CTEST_SCRIPT_DIRECTORY=${PIO_DASHBOARD_ROOT}/src +export PIO_DASHBOARD_SOURCE_DIR=${CTEST_SCRIPT_DIRECTORY} +export PIO_COMPILER_ID=gcc-`gcc --version | head -n 1 | cut -d' ' -f3` + +if [ ! -d "$PIO_DASHBOARD_ROOT" ]; then + mkdir "$PIO_DASHBOARD_ROOT" +fi +cd "$PIO_DASHBOARD_ROOT" + +echo "CTEST_SCRIPT_DIRECTORY="${CTEST_SCRIPT_DIRECTORY} +echo "PIO_DASHBOARD_SOURCE_DIR="${PIO_DASHBOARD_SOURCE_DIR} + +if [ ! -d src ]; then + git clone --branch develop https://github.com/PARALLELIO/ParallelIO src +fi +cd src +git checkout develop +git pull origin develop + + +ctest -S CTestScript.cmake,${model} -VV diff --git a/src/externals/pio2/ctest/runcdash-cgd-nag.sh b/src/externals/pio2/ctest/runcdash-cgd-nag.sh index 55c477b73bce..e413186131af 100755 --- a/src/externals/pio2/ctest/runcdash-cgd-nag.sh +++ b/src/externals/pio2/ctest/runcdash-cgd-nag.sh @@ -14,7 +14,7 @@ module load tool/parallel-netcdf/1.7.0/nag/mvapich2 export CC=mpicc export FC=mpif90 export PIO_DASHBOARD_SITE="cgd" -export PIO_DASHBOARD_ROOT=/scratch/cluster/katec/dashboard +export PIO_DASHBOARD_ROOT=/scratch/cluster/jedwards/dashboard export CTEST_SCRIPT_DIRECTORY=${PIO_DASHBOARD_ROOT}/src export PIO_DASHBOARD_SOURCE_DIR=${CTEST_SCRIPT_DIRECTORY} export PIO_COMPILER_ID=Nag-6.1-gcc-`gcc --version | head -n 1 | cut -d' ' -f3` diff --git a/src/externals/pio2/ctest/runcdash-nwsc-intel-mpiserial.sh b/src/externals/pio2/ctest/runcdash-nwsc-intel-mpiserial.sh index 702427e5a7b8..68ac5826be9f 100755 --- a/src/externals/pio2/ctest/runcdash-nwsc-intel-mpiserial.sh +++ b/src/externals/pio2/ctest/runcdash-nwsc-intel-mpiserial.sh @@ -14,7 +14,7 @@ module load git/2.3.0 module load cmake/3.0.2 module load netcdf/4.3.3.1 -export MPISERIAL=/glade/p/work/katec/installs/intel_15.0.3 +export MPISERIAL=/glade/u/home/jedwards/mpi-serial/intel15.0.3/ export CC=icc export FC=ifort diff --git a/src/externals/pio2/ctest/runcdash-nwscla-intel.sh b/src/externals/pio2/ctest/runcdash-nwscla-intel.sh index 3812b9d61f01..630ac8380859 100755 --- a/src/externals/pio2/ctest/runcdash-nwscla-intel.sh +++ b/src/externals/pio2/ctest/runcdash-nwscla-intel.sh @@ -11,18 +11,18 @@ source /etc/profile.d/modules.sh module reset module unload netcdf -module swap intel intel/16.0.3 -module load git/2.10.0 -module load cmake/3.6.2 -module load netcdf-mpi/4.4.1 +module swap intel intel/17.0.1 +module load cmake/3.7.2 +module load netcdf-mpi/4.4.1.1 module load pnetcdf/1.8.0 +module switch mpt mpt/2.15 echo "MODULE LIST..." module list export CC=mpicc export FC=mpif90 -export PIO_DASHBOARD_ROOT=`pwd`/dashboard +export PIO_DASHBOARD_ROOT=/glade/scratch/jedwards/dashboard export PIO_COMPILER_ID=Intel-`$CC --version | head -n 1 | cut -d' ' -f3` if [ ! -d "$PIO_DASHBOARD_ROOT" ]; then diff --git a/src/externals/pio2/ctest/runctest-cgd.sh b/src/externals/pio2/ctest/runctest-cgd.sh index fccf3396d3b1..bbd31ccf5d0f 100755 --- a/src/externals/pio2/ctest/runctest-cgd.sh +++ b/src/externals/pio2/ctest/runctest-cgd.sh @@ -29,7 +29,7 @@ echo "\$CTESTCMD -S ${scrdir}/CTestScript-Test.cmake,${model} -V" >> runctest.sh chmod +x runctest.sh # Submit the job to the queue -jobid=`/usr/local/bin/qsub -l nodes=1:ppn=4 runctest.sh -q short` +jobid=`/usr/local/bin/qsub -l nodes=1:ppn=8 runctest.sh -q short` # Wait for the job to complete before exiting while true; do diff --git a/src/externals/pio2/ctest/runctest-nwscla.sh b/src/externals/pio2/ctest/runctest-nwscla.sh index 6b6f4d87f7c8..d3e252317d05 100755 --- a/src/externals/pio2/ctest/runctest-nwscla.sh +++ b/src/externals/pio2/ctest/runctest-nwscla.sh @@ -1,8 +1,8 @@ #!/bin/sh #============================================================================== # -# This script defines how to run CTest on the NCAR Wyoming Supercomputing -# Center systems (yellowstone/caldera/geyser). +# This script defines how to run CTest on the NCAR Wyoming Supercomputing +# Center systems (cheyenne/laramie). # # This assumes the CTest model name (e.g., "Nightly") is passed to it when # run. @@ -19,7 +19,8 @@ model=$2 echo "#!/bin/sh" > runctest.sh echo "#PBS -l walltime=01:00:00" >> runctest.sh echo "#PBS -l select=1:ncpus=8:mpiprocs=8" >> runctest.sh -echo "#PBS -A SCSG0002" >> runctest.sh +echo "#PBS -A P93300606" >> runctest.sh +echo "#PBS -q regular" >> runctest.sh echo "export PIO_DASHBOARD_SITE=nwscla-${HOSTNAME}" >> runctest.sh echo "CTESTCMD=`which ctest`" >> runctest.sh echo "\$CTESTCMD -S ${scrdir}/CTestScript-Test.cmake,${model} -V" >> runctest.sh @@ -28,18 +29,16 @@ echo "\$CTESTCMD -S ${scrdir}/CTestScript-Test.cmake,${model} -V" >> runctest.sh chmod +x runctest.sh # Submit the job to the queue -jobid=`qsub runctest.sh` +jobid=`qsub -l walltime=01:00:00 runctest.sh` # Wait for the job to complete before exiting while true; do - status=`qstat $jobid` - echo $status - if [ "$status" == "" ]; then - break + qstat $jobid + if [ $? -eq 0 ]; then + sleep 30 else - sleep 10 + break; fi done exit 0 - diff --git a/src/externals/pio2/doc/source/contributing_code.txt b/src/externals/pio2/doc/source/contributing_code.txt index 6dbd70898264..c811777a02b8 100644 --- a/src/externals/pio2/doc/source/contributing_code.txt +++ b/src/externals/pio2/doc/source/contributing_code.txt @@ -13,7 +13,7 @@ * Documents produced by Doxygen are derivative works derived from the * input used in their production; they are not affected by this license. * - */ /*! \page code_style Code Style for Contributors + */ /*! \page contributing_code Guide for Contributors # Introduction # diff --git a/src/externals/pio2/examples/c/CMakeLists.txt b/src/externals/pio2/examples/c/CMakeLists.txt index 74b4837c7f1d..e35562b11cc1 100644 --- a/src/externals/pio2/examples/c/CMakeLists.txt +++ b/src/externals/pio2/examples/c/CMakeLists.txt @@ -33,12 +33,22 @@ ADD_EXECUTABLE(example1 example1.c) TARGET_LINK_LIBRARIES(example1 pioc) add_dependencies(tests example1) +ADD_EXECUTABLE(darray_no_async darray_no_async.c) +TARGET_LINK_LIBRARIES(darray_no_async pioc) +add_dependencies(tests darray_no_async) + +ADD_EXECUTABLE(darray_async darray_async.c) +TARGET_LINK_LIBRARIES(darray_async pioc) +add_dependencies(tests darray_async) + if (PIO_USE_MPISERIAL) add_test(NAME examplePio COMMAND examplePio) add_test(NAME example1 COMMAND example1) else () add_mpi_test(examplePio EXECUTABLE ${CMAKE_CURRENT_BINARY_DIR}/examplePio NUMPROCS 4 TIMEOUT 60) add_mpi_test(example1 EXECUTABLE ${CMAKE_CURRENT_BINARY_DIR}/example1 NUMPROCS 4 TIMEOUT 60) + #add_mpi_test(darray_async EXECUTABLE ${CMAKE_CURRENT_BINARY_DIR}/darray_async NUMPROCS 5 TIMEOUT 60) + add_mpi_test(darray_no_async EXECUTABLE ${CMAKE_CURRENT_BINARY_DIR}/darray_no_async NUMPROCS 4 TIMEOUT 60) endif () diff --git a/src/externals/pio2/examples/c/darray_async.c b/src/externals/pio2/examples/c/darray_async.c new file mode 100644 index 000000000000..1b15607f0f62 --- /dev/null +++ b/src/externals/pio2/examples/c/darray_async.c @@ -0,0 +1,387 @@ +/* + * @file + * @brief A simple C example for the ParallelIO Library. + * + * This example creates a netCDF output file with three dimensions + * (one unlimited) and one variable. It first writes, then reads the + * sample file using distributed arrays. + * + * This example can be run in parallel for 4 processors. + */ + +#include +#include +#include +#include +#include +#include +#include +#ifdef TIMING +#include +#endif + +/* The name of this program. */ +#define TEST_NAME "darray_async" + +/* The number of possible output netCDF output flavors available to + * the ParallelIO library. */ +#define NUM_NETCDF_FLAVORS 4 + +/* The number of dimensions in the example data. */ +#define NDIM3 3 + +/* The number of timesteps of data. */ +#define NUM_TIMESTEPS 2 + +/* The length of our sample data in X dimension.*/ +#define DIM_LEN_X 4 + +/* The length of our sample data in Y dimension.*/ +#define DIM_LEN_Y 4 + +/* The name of the variable in the netCDF output file. */ +#define VAR_NAME "foo" + +/* Return code when netCDF output file does not match + * expectations. */ +#define ERR_BAD 1001 + +/* The meaning of life, the universe, and everything. */ +#define START_DATA_VAL 42 + +/* Number of tasks this example runs on. */ +#define TARGET_NTASKS 5 + +/* Logging level. */ +#define LOG_LEVEL 3 + +/* Number of IO processors. */ +#define NUM_IO_TASKS 1 + +/* Number of computation processors. */ +#define NUM_COMP_TASKS 4 + +/* Number of computation components. */ +#define COMPONENT_COUNT 1 + +/* Lengths of dimensions. */ +int dim_len[NDIM3] = {NC_UNLIMITED, DIM_LEN_X, DIM_LEN_Y}; + +/* Names of dimensions. */ +char dim_name[NDIM3][PIO_MAX_NAME + 1] = {"unlimted", "x", "y"}; + +/* Handle MPI errors. This should only be used with MPI library + * function calls. */ +#define MPIERR(e) do { \ + MPI_Error_string(e, err_buffer, &resultlen); \ + printf("MPI error, line %d, file %s: %s\n", __LINE__, __FILE__, err_buffer); \ + MPI_Finalize(); \ + return 2; \ + } while (0) + +/* Handle non-MPI errors by finalizing the MPI library and exiting + * with an exit code. */ +#define ERR(e) do { \ + MPI_Finalize(); \ + return e; \ + } while (0) + +/* Global err buffer for MPI. When there is an MPI error, this buffer + * is used to store the error message that is associated with the MPI + * error. */ +char err_buffer[MPI_MAX_ERROR_STRING]; + +/* This is the length of the most recent MPI error message, stored + * int the global error string. */ +int resultlen; + +/* @brief Check the output file. + * + * Use netCDF to check that the output is as expected. + * + * @param ntasks The number of processors running the example. + * @param filename The name of the example file to check. + * + * @return 0 if example file is correct, non-zero otherwise. */ +/* int check_file(int iosysid, int ntasks, char *filename, int iotype, */ +/* int elements_per_pe, int my_rank, int ioid) */ +/* { */ + +/* int ncid; /\* File ID from netCDF. *\/ */ +/* int ndims; /\* Number of dimensions. *\/ */ +/* int nvars; /\* Number of variables. *\/ */ +/* int ngatts; /\* Number of global attributes. *\/ */ +/* int unlimdimid; /\* ID of unlimited dimension. *\/ */ +/* int natts; /\* Number of variable attributes. *\/ */ +/* nc_type xtype; /\* NetCDF data type of this variable. *\/ */ +/* int ret; /\* Return code for function calls. *\/ */ +/* int dimids[NDIM3]; /\* Dimension ids for this variable. *\/ */ +/* char var_name[NC_MAX_NAME]; /\* Name of the variable. *\/ */ +/* /\* size_t start[NDIM3]; /\\* Zero-based index to start read. *\\/ *\/ */ +/* /\* size_t count[NDIM3]; /\\* Number of elements to read. *\\/ *\/ */ +/* /\* int buffer[DIM_LEN_X]; /\\* Buffer to read in data. *\\/ *\/ */ +/* /\* int expected[DIM_LEN_X]; /\\* Data values we expect to find. *\\/ *\/ */ + +/* /\* Open the file. *\/ */ +/* if ((ret = PIOc_openfile_retry(iosysid, &ncid, &iotype, filename, 0, 0))) */ +/* return ret; */ +/* printf("opened file %s ncid = %d\n", filename, ncid); */ + +/* /\* Check the metadata. *\/ */ +/* if ((ret = PIOc_inq(ncid, &ndims, &nvars, &ngatts, &unlimdimid))) */ +/* return ret; */ + +/* /\* Check the dimensions. *\/ */ +/* if (ndims != NDIM3 || nvars != 1 || ngatts != 0 || unlimdimid != 0) */ +/* return ERR_BAD; */ +/* for (int d = 0; d < NDIM3; d++) */ +/* { */ +/* char my_dim_name[NC_MAX_NAME]; */ +/* PIO_Offset dimlen; */ + +/* if ((ret = PIOc_inq_dim(ncid, d, my_dim_name, &dimlen))) */ +/* return ret; */ +/* if (dimlen != (d ? dim_len[d] : NUM_TIMESTEPS) || strcmp(my_dim_name, dim_name[d])) */ +/* return ERR_BAD; */ +/* } */ + +/* /\* Check the variable. *\/ */ +/* if ((ret = PIOc_inq_var(ncid, 0, var_name, &xtype, &ndims, dimids, &natts))) */ +/* return ret; */ +/* if (xtype != NC_INT || ndims != NDIM3 || dimids[0] != 0 || dimids[1] != 1 || */ +/* dimids[2] != 2 || natts != 0) */ +/* return ERR_BAD; */ + +/* /\* Allocate storage for sample data. *\/ */ +/* int buffer[elements_per_pe]; */ +/* int buffer_in[elements_per_pe]; */ + +/* /\* Check each timestep. *\/ */ +/* for (int t = 0; t < NUM_TIMESTEPS; t++) */ +/* { */ +/* int varid = 0; /\* There's only one var in sample file. *\/ */ + +/* /\* This is the data we expect for this timestep. *\/ */ +/* for (int i = 0; i < elements_per_pe; i++) */ +/* buffer[i] = 100 * t + START_DATA_VAL + my_rank; */ + +/* /\* Read one record. *\/ */ +/* if ((ret = PIOc_setframe(ncid, varid, t))) */ +/* ERR(ret); */ +/* if ((ret = PIOc_read_darray(ncid, varid, ioid, elements_per_pe, buffer_in))) */ +/* return ret; */ + +/* /\* Check the results. *\/ */ +/* for (int i = 0; i < elements_per_pe; i++) */ +/* if (buffer_in[i] != buffer[i]) */ +/* return ERR_BAD; */ +/* } */ + +/* /\* Close the file. *\/ */ +/* if ((ret = PIOc_closefile(ncid))) */ +/* return ret; */ + +/* /\* Everything looks good! *\/ */ +/* return 0; */ +/* } */ + +/* Write, then read, a simple example with darrays. + + The sample file created by this program is a small netCDF file. It + has the following contents (as shown by ncdump): + +
+netcdf darray_no_async_iotype_1 {
+dimensions:
+	unlimted = UNLIMITED ; // (2 currently)
+	x = 4 ;
+	y = 4 ;
+variables:
+	int foo(unlimted, x, y) ;
+data:
+
+ foo =
+  42, 42, 42, 42,
+  43, 43, 43, 43,
+  44, 44, 44, 44,
+  45, 45, 45, 45,
+  142, 142, 142, 142,
+  143, 143, 143, 143,
+  144, 144, 144, 144,
+  145, 145, 145, 145 ;
+}
+    
+ +*/ + int main(int argc, char* argv[]) + { + int my_rank; /* Zero-based rank of processor. */ + int ntasks; /* Number of processors involved in current execution. */ + int iosysid; /* The ID for the parallel I/O system. */ + /* int ncid; /\* The ncid of the netCDF file. *\/ */ + /* int dimid[NDIM3]; /\* The dimension ID. *\/ */ + /* int varid; /\* The ID of the netCDF varable. *\/ */ + /* char filename[NC_MAX_NAME + 1]; /\* Test filename. *\/ */ + /* int num_flavors = 0; /\* Number of iotypes available in this build. *\/ */ + /* int format[NUM_NETCDF_FLAVORS]; /\* Different output flavors. *\/ */ + int ret; /* Return value. */ + +#ifdef TIMING + /* Initialize the GPTL timing library. */ + if ((ret = GPTLinitialize ())) + return ret; +#endif + + /* Initialize MPI. */ + if ((ret = MPI_Init(&argc, &argv))) + MPIERR(ret); + if ((ret = MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN))) + MPIERR(ret); + + /* Learn my rank and the total number of processors. */ + if ((ret = MPI_Comm_rank(MPI_COMM_WORLD, &my_rank))) + MPIERR(ret); + if ((ret = MPI_Comm_size(MPI_COMM_WORLD, &ntasks))) + MPIERR(ret); + + /* Check that a valid number of processors was specified. */ + printf("%d: ParallelIO Library darray_async example running on %d processors.\n", + my_rank, ntasks); + if (ntasks != TARGET_NTASKS) + { + fprintf(stderr, "Number of processors must be %d!\n", TARGET_NTASKS); + return ERR_BAD; + } + + /* Turn on logging. */ + if ((ret = PIOc_set_log_level(LOG_LEVEL))) + return ret; + + /* Num procs for computation. */ + int num_procs2[COMPONENT_COUNT] = {4}; + + /* Is the current process a computation task? */ + int comp_task = my_rank < NUM_IO_TASKS ? 0 : 1; + + /* Initialize the IO system. */ + if ((ret = PIOc_init_async(MPI_COMM_WORLD, NUM_IO_TASKS, NULL, COMPONENT_COUNT, + num_procs2, NULL, NULL, NULL, PIO_REARR_BOX, &iosysid))) + ERR(ret); + + + /* The rest of the code executes on computation tasks only. As + * PIO functions are called on the computation tasks, the + * async system will call them on the IO task. When the + * computation tasks call PIO_finalize(), the IO task will get + * a message to shut itself down. */ + if (comp_task) + { + /* PIO_Offset elements_per_pe; /\* Array elements per processing unit. *\/ */ + /* int ioid; /\* The I/O description ID. *\/ */ + + /* /\* How many elements on each computation task? *\/ */ + /* elements_per_pe = DIM_LEN_X * DIM_LEN_Y / NUM_COMP_TASKS; */ + + /* /\* Allocate and initialize array of decomposition mapping. *\/ */ + /* PIO_Offset compdof[elements_per_pe]; */ + /* for (int i = 0; i < elements_per_pe; i++) */ + /* compdof[i] = my_rank * elements_per_pe + i; */ + + /* /\* Create the PIO decomposition for this example. Since */ + /* this is a variable with an unlimited dimension, we want */ + /* to create a 2-D composition which represents one */ + /* record. *\/ */ + /* printf("rank: %d Creating decomposition...\n", my_rank); */ + /* if ((ret = PIOc_init_decomp(iosysid, PIO_INT, NDIM3 - 1, &dim_len[1], elements_per_pe, */ + /* compdof, &ioid, 0, NULL, NULL))) */ + /* ERR(ret); */ + +/* /\* The number of favors may change with the build parameters. *\/ */ +/* #ifdef _PNETCDF */ +/* format[num_flavors++] = PIO_IOTYPE_PNETCDF; */ +/* #endif */ +/* format[num_flavors++] = PIO_IOTYPE_NETCDF; */ +/* #ifdef _NETCDF4 */ +/* format[num_flavors++] = PIO_IOTYPE_NETCDF4C; */ +/* format[num_flavors++] = PIO_IOTYPE_NETCDF4P; */ +/* #endif */ + +/* /\* Use PIO to create the example file in each of the four */ +/* * available ways. *\/ */ +/* for (int fmt = 0; fmt < num_flavors; fmt++) */ +/* { */ +/* /\* Create a filename. *\/ */ +/* sprintf(filename, "darray_no_async_iotype_%d.nc", format[fmt]); */ + +/* /\* Create the netCDF output file. *\/ */ +/* printf("rank: %d Creating sample file %s with format %d...\n", */ +/* my_rank, filename, format[fmt]); */ +/* if ((ret = PIOc_createfile(iosysid, &ncid, &(format[fmt]), filename, PIO_CLOBBER))) */ +/* ERR(ret); */ + +/* /\* Define netCDF dimension and variable. *\/ */ +/* printf("rank: %d Defining netCDF metadata...\n", my_rank); */ +/* for (int d = 0; d < NDIM3; d++) */ +/* if ((ret = PIOc_def_dim(ncid, dim_name[d], dim_len[d], &dimid[d]))) */ +/* ERR(ret); */ +/* if ((ret = PIOc_def_var(ncid, VAR_NAME, PIO_INT, NDIM3, dimid, &varid))) */ +/* ERR(ret); */ +/* if ((ret = PIOc_enddef(ncid))) */ +/* ERR(ret); */ + +/* /\* Allocate storage for sample data. *\/ */ +/* int buffer[elements_per_pe]; */ + +/* /\* Write each timestep. *\/ */ +/* for (int t = 0; t < NUM_TIMESTEPS; t++) */ +/* { */ +/* /\* Create some data for this timestep. *\/ */ +/* for (int i = 0; i < elements_per_pe; i++) */ +/* buffer[i] = 100 * t + START_DATA_VAL + my_rank; */ + +/* /\* Write data to the file. *\/ */ +/* printf("rank: %d Writing sample data...\n", my_rank); */ +/* if ((ret = PIOc_setframe(ncid, varid, t))) */ +/* ERR(ret); */ +/* if ((ret = PIOc_write_darray(ncid, varid, ioid, elements_per_pe, buffer, NULL))) */ +/* ERR(ret); */ +/* } */ + +/* /\* THis will cause all data to be written to disk. *\/ */ +/* if ((ret = PIOc_sync(ncid))) */ +/* ERR(ret); */ + +/* /\* Close the netCDF file. *\/ */ +/* printf("rank: %d Closing the sample data file...\n", my_rank); */ +/* if ((ret = PIOc_closefile(ncid))) */ +/* ERR(ret); */ + +/* /\* Check the output file. *\/ */ +/* /\* if ((ret = check_file(iosysid, ntasks, filename, format[fmt], elements_per_pe, *\/ */ +/* /\* my_rank, ioid))) *\/ */ +/* /\* ERR(ret); *\/ */ +/* } */ + + /* Free the PIO decomposition. */ + /* printf("rank: %d Freeing PIO decomposition...\n", my_rank); */ + /* if ((ret = PIOc_freedecomp(iosysid, ioid))) */ + /* ERR(ret); */ + + /* Finalize the IO system. Only call this from the computation tasks. */ + printf("%d %s Freeing PIO resources\n", my_rank, TEST_NAME); + if ((ret = PIOc_finalize(iosysid))) + ERR(ret); + } /* endif comp_task */ + + /* Finalize the MPI library. */ + MPI_Finalize(); + +#ifdef TIMING + /* Finalize the GPTL timing library. */ + if ((ret = GPTLfinalize ())) + return ret; +#endif + + printf("rank: %d SUCCESS!\n", my_rank); + return 0; + } diff --git a/src/externals/pio2/examples/c/darray_no_async.c b/src/externals/pio2/examples/c/darray_no_async.c new file mode 100644 index 000000000000..14228ab422c6 --- /dev/null +++ b/src/externals/pio2/examples/c/darray_no_async.c @@ -0,0 +1,358 @@ +/* + * @file + * @brief A simple C example for the ParallelIO Library. + * + * This example creates a netCDF output file with three dimensions + * (one unlimited) and one variable. It first writes, then reads the + * sample file using distributed arrays. + * + * This example can be run in parallel for 4 processors. + */ + +#include +#include +#include +#include +#include +#include +#include +#ifdef TIMING +#include +#endif + +/* The number of possible output netCDF output flavors available to + * the ParallelIO library. */ +#define NUM_NETCDF_FLAVORS 4 + +/* The number of dimensions in the example data. */ +#define NDIM3 3 + +/* The number of timesteps of data. */ +#define NUM_TIMESTEPS 2 + +/* The length of our sample data in X dimension.*/ +#define DIM_LEN_X 4 + +/* The length of our sample data in Y dimension.*/ +#define DIM_LEN_Y 4 + +/* The name of the variable in the netCDF output file. */ +#define VAR_NAME "foo" + +/* Return code when netCDF output file does not match + * expectations. */ +#define ERR_BAD 1001 + +/* The meaning of life, the universe, and everything. */ +#define START_DATA_VAL 42 + +/* Number of tasks this example runs on. */ +#define TARGET_NTASKS 4 + +/* Logging level. */ +#define LOG_LEVEL 3 + +/* Lengths of dimensions. */ +int dim_len[NDIM3] = {NC_UNLIMITED, DIM_LEN_X, DIM_LEN_Y}; + +/* Names of dimensions. */ +char dim_name[NDIM3][PIO_MAX_NAME + 1] = {"unlimted", "x", "y"}; + +/* Handle MPI errors. This should only be used with MPI library + * function calls. */ +#define MPIERR(e) do { \ + MPI_Error_string(e, err_buffer, &resultlen); \ + printf("MPI error, line %d, file %s: %s\n", __LINE__, __FILE__, err_buffer); \ + MPI_Finalize(); \ + return 2; \ + } while (0) + +/* Handle non-MPI errors by finalizing the MPI library and exiting + * with an exit code. */ +#define ERR(e) do { \ + MPI_Finalize(); \ + return e; \ + } while (0) + +/* Global err buffer for MPI. When there is an MPI error, this buffer + * is used to store the error message that is associated with the MPI + * error. */ +char err_buffer[MPI_MAX_ERROR_STRING]; + +/* This is the length of the most recent MPI error message, stored + * int the global error string. */ +int resultlen; + +/* @brief Check the output file. + * + * Use netCDF to check that the output is as expected. + * + * @param ntasks The number of processors running the example. + * @param filename The name of the example file to check. + * + * @return 0 if example file is correct, non-zero otherwise. */ +int check_file(int iosysid, int ntasks, char *filename, int iotype, + int elements_per_pe, int my_rank, int ioid) +{ + + int ncid; /* File ID from netCDF. */ + int ndims; /* Number of dimensions. */ + int nvars; /* Number of variables. */ + int ngatts; /* Number of global attributes. */ + int unlimdimid; /* ID of unlimited dimension. */ + int natts; /* Number of variable attributes. */ + nc_type xtype; /* NetCDF data type of this variable. */ + int ret; /* Return code for function calls. */ + int dimids[NDIM3]; /* Dimension ids for this variable. */ + char var_name[NC_MAX_NAME]; /* Name of the variable. */ + /* size_t start[NDIM3]; /\* Zero-based index to start read. *\/ */ + /* size_t count[NDIM3]; /\* Number of elements to read. *\/ */ + /* int buffer[DIM_LEN_X]; /\* Buffer to read in data. *\/ */ + /* int expected[DIM_LEN_X]; /\* Data values we expect to find. *\/ */ + + /* Open the file. */ + if ((ret = PIOc_openfile_retry(iosysid, &ncid, &iotype, filename, 0, 0))) + return ret; + printf("opened file %s ncid = %d\n", filename, ncid); + + /* Check the metadata. */ + if ((ret = PIOc_inq(ncid, &ndims, &nvars, &ngatts, &unlimdimid))) + return ret; + + /* Check the dimensions. */ + if (ndims != NDIM3 || nvars != 1 || ngatts != 0 || unlimdimid != 0) + return ERR_BAD; + for (int d = 0; d < NDIM3; d++) + { + char my_dim_name[NC_MAX_NAME]; + PIO_Offset dimlen; + + if ((ret = PIOc_inq_dim(ncid, d, my_dim_name, &dimlen))) + return ret; + if (dimlen != (d ? dim_len[d] : NUM_TIMESTEPS) || strcmp(my_dim_name, dim_name[d])) + return ERR_BAD; + } + + /* Check the variable. */ + if ((ret = PIOc_inq_var(ncid, 0, var_name, &xtype, &ndims, dimids, &natts))) + return ret; + if (xtype != NC_INT || ndims != NDIM3 || dimids[0] != 0 || dimids[1] != 1 || + dimids[2] != 2 || natts != 0) + return ERR_BAD; + + /* Allocate storage for sample data. */ + int buffer[elements_per_pe]; + int buffer_in[elements_per_pe]; + + /* Check each timestep. */ + for (int t = 0; t < NUM_TIMESTEPS; t++) + { + int varid = 0; /* There's only one var in sample file. */ + + /* This is the data we expect for this timestep. */ + for (int i = 0; i < elements_per_pe; i++) + buffer[i] = 100 * t + START_DATA_VAL + my_rank; + + /* Read one record. */ + if ((ret = PIOc_setframe(ncid, varid, t))) + ERR(ret); + if ((ret = PIOc_read_darray(ncid, varid, ioid, elements_per_pe, buffer_in))) + return ret; + + /* Check the results. */ + for (int i = 0; i < elements_per_pe; i++) + if (buffer_in[i] != buffer[i]) + return ERR_BAD; + } + + /* Close the file. */ + if ((ret = PIOc_closefile(ncid))) + return ret; + + /* Everything looks good! */ + return 0; +} + +/* Write, then read, a simple example with darrays. + + The sample file created by this program is a small netCDF file. It + has the following contents (as shown by ncdump): + +
+netcdf darray_no_async_iotype_1 {
+dimensions:
+	unlimted = UNLIMITED ; // (2 currently)
+	x = 4 ;
+	y = 4 ;
+variables:
+	int foo(unlimted, x, y) ;
+data:
+
+ foo =
+  42, 42, 42, 42,
+  43, 43, 43, 43,
+  44, 44, 44, 44,
+  45, 45, 45, 45,
+  142, 142, 142, 142,
+  143, 143, 143, 143,
+  144, 144, 144, 144,
+  145, 145, 145, 145 ;
+}
+    
+ +*/ + int main(int argc, char* argv[]) + { + int my_rank; /* Zero-based rank of processor. */ + int ntasks; /* Number of processors involved in current execution. */ + int ioproc_stride = 1; /* Stride in the mpi rank between io tasks. */ + int ioproc_start = 0; /* Rank of first task to be used for I/O. */ + PIO_Offset elements_per_pe; /* Array elements per processing unit. */ + int iosysid; /* The ID for the parallel I/O system. */ + int ncid; /* The ncid of the netCDF file. */ + int dimid[NDIM3]; /* The dimension ID. */ + int varid; /* The ID of the netCDF varable. */ + int ioid; /* The I/O description ID. */ + char filename[NC_MAX_NAME + 1]; /* Test filename. */ + int num_flavors = 0; /* Number of iotypes available in this build. */ + int format[NUM_NETCDF_FLAVORS]; /* Different output flavors. */ + int ret; /* Return value. */ + +#ifdef TIMING + /* Initialize the GPTL timing library. */ + if ((ret = GPTLinitialize ())) + return ret; +#endif + + /* Initialize MPI. */ + if ((ret = MPI_Init(&argc, &argv))) + MPIERR(ret); + if ((ret = MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN))) + MPIERR(ret); + + /* Learn my rank and the total number of processors. */ + if ((ret = MPI_Comm_rank(MPI_COMM_WORLD, &my_rank))) + MPIERR(ret); + if ((ret = MPI_Comm_size(MPI_COMM_WORLD, &ntasks))) + MPIERR(ret); + + /* Check that a valid number of processors was specified. */ + if (ntasks != TARGET_NTASKS) + fprintf(stderr, "Number of processors must be 4!\n"); + printf("%d: ParallelIO Library darray_no_async example running on %d processors.\n", + my_rank, ntasks); + + /* Turn on logging. */ + if ((ret = PIOc_set_log_level(LOG_LEVEL))) + return ret; + + /* Initialize the PIO IO system. This specifies how many and + * which processors are involved in I/O. */ + if ((ret = PIOc_Init_Intracomm(MPI_COMM_WORLD, 1, ioproc_stride, + ioproc_start, PIO_REARR_BOX, &iosysid))) + ERR(ret); + + /* Describe the decomposition. */ + elements_per_pe = DIM_LEN_X * DIM_LEN_Y / TARGET_NTASKS; + + /* Allocate and initialize array of decomposition mapping. */ + PIO_Offset compdof[elements_per_pe]; + for (int i = 0; i < elements_per_pe; i++) + compdof[i] = my_rank * elements_per_pe + i; + + /* Create the PIO decomposition for this example. Since this + * is a variable with an unlimited dimension, we want to + * create a 2-D composition which represents one record. */ + printf("rank: %d Creating decomposition...\n", my_rank); + if ((ret = PIOc_init_decomp(iosysid, PIO_INT, NDIM3 - 1, &dim_len[1], elements_per_pe, + compdof, &ioid, 0, NULL, NULL))) + ERR(ret); + + /* The number of favors may change with the build parameters. */ +#ifdef _PNETCDF + format[num_flavors++] = PIO_IOTYPE_PNETCDF; +#endif + format[num_flavors++] = PIO_IOTYPE_NETCDF; +#ifdef _NETCDF4 + format[num_flavors++] = PIO_IOTYPE_NETCDF4C; + format[num_flavors++] = PIO_IOTYPE_NETCDF4P; +#endif + + /* Use PIO to create the example file in each of the four + * available ways. */ + for (int fmt = 0; fmt < num_flavors; fmt++) + { + /* Create a filename. */ + sprintf(filename, "darray_no_async_iotype_%d.nc", format[fmt]); + + /* Create the netCDF output file. */ + printf("rank: %d Creating sample file %s with format %d...\n", + my_rank, filename, format[fmt]); + if ((ret = PIOc_createfile(iosysid, &ncid, &(format[fmt]), filename, PIO_CLOBBER))) + ERR(ret); + + /* Define netCDF dimension and variable. */ + printf("rank: %d Defining netCDF metadata...\n", my_rank); + for (int d = 0; d < NDIM3; d++) + if ((ret = PIOc_def_dim(ncid, dim_name[d], dim_len[d], &dimid[d]))) + ERR(ret); + if ((ret = PIOc_def_var(ncid, VAR_NAME, PIO_INT, NDIM3, dimid, &varid))) + ERR(ret); + if ((ret = PIOc_enddef(ncid))) + ERR(ret); + + /* Allocate storage for sample data. */ + int buffer[elements_per_pe]; + + /* Write each timestep. */ + for (int t = 0; t < NUM_TIMESTEPS; t++) + { + /* Create some data for this timestep. */ + for (int i = 0; i < elements_per_pe; i++) + buffer[i] = 100 * t + START_DATA_VAL + my_rank; + + /* Write data to the file. */ + printf("rank: %d Writing sample data...\n", my_rank); + if ((ret = PIOc_setframe(ncid, varid, t))) + ERR(ret); + if ((ret = PIOc_write_darray(ncid, varid, ioid, elements_per_pe, buffer, NULL))) + ERR(ret); + } + + /* THis will cause all data to be written to disk. */ + if ((ret = PIOc_sync(ncid))) + ERR(ret); + + /* Close the netCDF file. */ + printf("rank: %d Closing the sample data file...\n", my_rank); + if ((ret = PIOc_closefile(ncid))) + ERR(ret); + + /* Check the output file. */ + /* if ((ret = check_file(iosysid, ntasks, filename, format[fmt], elements_per_pe, */ + /* my_rank, ioid))) */ + /* ERR(ret); */ + } + + /* Free the PIO decomposition. */ + printf("rank: %d Freeing PIO decomposition...\n", my_rank); + if ((ret = PIOc_freedecomp(iosysid, ioid))) + ERR(ret); + + /* Finalize the IO system. */ + printf("rank: %d Freeing PIO resources...\n", my_rank); + if ((ret = PIOc_finalize(iosysid))) + ERR(ret); + + /* Finalize the MPI library. */ + MPI_Finalize(); + +#ifdef TIMING + /* Finalize the GPTL timing library. */ + if ((ret = GPTLfinalize ())) + return ret; +#endif + + printf("rank: %d SUCCESS!\n", my_rank); + return 0; + } diff --git a/src/externals/pio2/src/clib/CMakeLists.txt b/src/externals/pio2/src/clib/CMakeLists.txt index d2c1c3bbeb68..7fac8891a8e8 100644 --- a/src/externals/pio2/src/clib/CMakeLists.txt +++ b/src/externals/pio2/src/clib/CMakeLists.txt @@ -54,24 +54,19 @@ install (FILES ${CMAKE_CURRENT_SOURCE_DIR}/pio.h DESTINATION include) #============================================================================== # DEFINE THE DEPENDENCIES #============================================================================== - -#===== MPI ===== -if (PIO_USE_MPISERIAL) - find_package (MPISERIAL COMPONENTS C REQUIRED) - if (MPISERIAL_C_FOUND) +# MPI test done at top level +if (MPISERIAL_C_FOUND) target_compile_definitions (pioc PRIVATE MPI_SERIAL) target_include_directories (pioc PUBLIC ${MPISERIAL_C_INCLUDE_DIRS}) target_link_libraries (pioc PUBLIC ${MPISERIAL_C_LIBRARIES}) - set (WITH_PNETCDF FALSE) - endif () -else () - find_package (MPI REQUIRED) endif () + + #===== GPTL ===== if (PIO_ENABLE_TIMING) find_package (GPTL COMPONENTS C QUIET) diff --git a/src/externals/pio2/src/clib/bget.c b/src/externals/pio2/src/clib/bget.c index 726ce5580da6..98bfc36893d2 100644 --- a/src/externals/pio2/src/clib/bget.c +++ b/src/externals/pio2/src/clib/bget.c @@ -400,7 +400,7 @@ #include #include #include -#ifdef PIO_USE_MALLOC +#if PIO_USE_MALLOC #include #endif @@ -614,7 +614,7 @@ bufsize requested_size; int compactseq = 0; #endif -#ifdef PIO_USE_MALLOC +#if PIO_USE_MALLOC // if(requested_size>maxsize){ // maxsize=requested_size; // printf("%s %d %d\n",__FILE__,__LINE__,maxsize); @@ -847,7 +847,7 @@ bufsize size; bufsize osize; /* Old size of buffer */ struct bhead *b; -#ifdef PIO_USE_MALLOC +#if PIO_USE_MALLOC return(realloc(buf, size)); #endif if ((nbuf = bget(size)) == NULL) { /* Acquire new buffer */ @@ -882,7 +882,7 @@ void *buf; { struct bfhead *b, *bn; -#ifdef PIO_USE_MALLOC +#if PIO_USE_MALLOC // printf("bget free %d %x\n",__LINE__,buf); free(buf); return; diff --git a/src/externals/pio2/src/clib/config.h.in b/src/externals/pio2/src/clib/config.h.in index 1722872c3056..8d80d37fd510 100644 --- a/src/externals/pio2/src/clib/config.h.in +++ b/src/externals/pio2/src/clib/config.h.in @@ -22,4 +22,7 @@ /** Set to non-zero to turn on logging. Output may be large. */ #define PIO_ENABLE_LOGGING @ENABLE_LOGGING@ +/** Size of MPI_Offset type. */ +#define SIZEOF_MPI_OFFSET @SIZEOF_MPI_OFFSET@ + #endif /* _PIO_CONFIG_ */ diff --git a/src/externals/pio2/src/clib/pio.h b/src/externals/pio2/src/clib/pio.h index fe234a155930..9dace3782c99 100644 --- a/src/externals/pio2/src/clib/pio.h +++ b/src/externals/pio2/src/clib/pio.h @@ -40,6 +40,9 @@ /** The maximum number of variables allowed in a netCDF file. */ #define PIO_MAX_VARS NC_MAX_VARS +/** The maximum number of dimensions allowed in a netCDF file. */ +#define PIO_MAX_DIMS NC_MAX_DIMS + /** Pass this to PIOc_set_iosystem_error_handling() as the iosysid in * order to set default error handling. */ #define PIO_DEFAULT (-1) @@ -115,7 +118,20 @@ typedef struct var_desc_t /** Number of requests bending with pnetcdf. */ int nreqs; - /** Buffer that contains the fill value for this variable. */ + /* Holds the fill value of this var. */ + void *fillvalue; + + /* The PIO data type (PIO_INT, PIO_FLOAT, etc.) */ + int pio_type; + + /* The size of the data type (2 for PIO_SHORT, 4 for PIO_INT, etc.) */ + PIO_Offset type_size; + + /** Non-zero if fill mode is turned on for this var. */ + int use_fill; + + /** Buffer that contains the holegrid fill values used to fill in + * missing sections of data when using the subset rearranger. */ void *fillbuf; /** Data buffer for this variable. */ @@ -191,10 +207,10 @@ enum PIO_REARR_COMM_FC_DIR typedef struct rearr_comm_fc_opt { /** Enable handshake */ - bool enable_hs; + bool hs; /** Enable isends - if false use blocking sends */ - bool enable_isend; + bool isend; /** Max pending requests * (PIO_REARR_COMM_UNLIMITED_PEND_REQ => unlimited pend req). @@ -217,10 +233,10 @@ typedef struct rearr_opt int fcd; /** flow control opts, comp to io procs */ - rearr_comm_fc_opt_t comm_fc_opts_comp2io; + rearr_comm_fc_opt_t comp2io; /** flow control opts, io to comp procs */ - rearr_comm_fc_opt_t comm_fc_opts_io2comp; + rearr_comm_fc_opt_t io2comp; } rearr_opt_t; /** @@ -245,10 +261,7 @@ typedef struct io_desc_t * io tasks. */ int nrecvs; - /** Local size of the decomposition array on the compute node. On - each compute task the application passes a compmap array of - length ndof. This array describes the arrangement of data in - memory on that task. */ + /** Local size of the decomposition array on the compute node. */ int ndof; /** All vars included in this io_desc_t have the same number of @@ -277,6 +290,9 @@ typedef struct io_desc_t /** The MPI type of the data. */ MPI_Datatype basetype; + /** The size in bytes of a datum of MPI type basetype. */ + int basetype_size; + /** Length of the iobuffer on this task for a single field on the * IO node. The arrays from compute nodes gathered and rearranged * to the io-nodes (which are sometimes collocated with compute @@ -287,24 +303,25 @@ typedef struct io_desc_t /** Maximum llen participating. */ int maxiobuflen; - /** Array of tasks received from in pio_swapm(). */ + /** Array (length nrecvs) of computation tasks received from. */ int *rfrom; - /** Array of counts of data to be received from each task in - * pio_swapm(). */ + /** Array (length nrecvs) of counts of data to be received from + * each computation task by the IO tasks. */ int *rcount; - /** Array of data count to send to each task in the communication - * in pio_swapm(). */ + /** Array (length numiotasks) of data counts to send to each task + * in the communication in pio_swapm(). */ int *scount; - /** Send index. */ + /** Array (length ndof) for the BOX rearranger with the index + * for computation taks (send side during writes). */ PIO_Offset *sindex; - /** Receive index. */ + /** Index for the IO tasks (receive side during writes). */ PIO_Offset *rindex; - /** Array of receive MPI types in pio_swapm() call. */ + /** Array (of length nrecvs) of receive MPI types in pio_swapm() call. */ MPI_Datatype *rtype; /** Array of send MPI types in pio_swapm() call. */ @@ -387,6 +404,11 @@ typedef struct iosystem_desc_t /** The number of tasks in the computation communicator. */ int num_comptasks; + /** The number of tasks in the union communicator (will be + * num_comptasks for non-async, num_comptasks + num_iotasks for + * async). */ + int num_uniontasks; + /** Rank of this task in the union communicator. */ int union_rank; @@ -419,6 +441,10 @@ typedef struct iosystem_desc_t * communicator. */ int *ioranks; + /** An array of the ranks of all computation tasks within the + * union communicator. */ + int *compranks; + /** Controls handling errors. */ int error_handler; @@ -427,11 +453,15 @@ typedef struct iosystem_desc_t int default_rearranger; /** True if asynchronous interface is in use. */ - bool async_interface; + bool async; /** True if this task is a member of the IO communicator. */ bool ioproc; + /** True if this task is a member of a computation + * communicator. */ + bool compproc; + /** MPI Info object. */ MPI_Info info; @@ -455,8 +485,13 @@ typedef struct wmulti_buffer * PIOc_Init_Decomp(). */ int ioid; - /** Number of variables in this multi variable buffer. */ - int validvars; + /** Non-zero if this is a buffer for a record var. */ + int recordvar; + + /** Number of arrays of data in the multibuffer. Each array had + * data for one var or record. When multibuffer is flushed, all + * arrays are written and num_arrays returns to zero. */ + int num_arrays; /** Size of this variables data on local task. All vars in the * multi-buffer have the same size. */ @@ -673,10 +708,19 @@ enum PIO_ERROR_HANDLERS #define PIO_EBADCHUNK NC_EBADCHUNK #define PIO_ENOTBUILT NC_ENOTBUILT #define PIO_EDISKLESS NC_EDISKLESS -#define PIO_FILL_DOUBLE NC_FILL_DOUBLE -#define PIO_FILL_FLOAT NC_FILL_FLOAT -#define PIO_FILL_INT NC_FILL_INT + +/* These are the netCDF default fill values. */ +#define PIO_FILL_BYTE NC_FILL_BYTE #define PIO_FILL_CHAR NC_FILL_CHAR +#define PIO_FILL_SHORT NC_FILL_SHORT +#define PIO_FILL_INT NC_FILL_INT +#define PIO_FILL_FLOAT NC_FILL_FLOAT +#define PIO_FILL_DOUBLE NC_FILL_DOUBLE +#define PIO_FILL_UBYTE NC_FILL_UBYTE +#define PIO_FILL_USHORT NC_FILL_USHORT +#define PIO_FILL_UINT NC_FILL_UINT +#define PIO_FILL_INT64 NC_FILL_INT64 +#define PIO_FILL_UINT64 NC_FILL_UINT64 #endif /* defined( _PNETCDF) || defined(_NETCDF) */ /** Define the extra error codes for the parallel-netcdf library. */ @@ -703,18 +747,20 @@ extern "C" { /* Decomposition. */ /* Init decomposition with 1-based compmap array. */ - int PIOc_InitDecomp(int iosysid, int basetype, int ndims, const int *dims, int maplen, + int PIOc_InitDecomp(int iosysid, int pio_type, int ndims, const int *gdimlen, int maplen, const PIO_Offset *compmap, int *ioidp, const int *rearr, const PIO_Offset *iostart, const PIO_Offset *iocount); - int PIOc_InitDecomp_bc(int iosysid, int basetype, int ndims, const int *dims, + int PIOc_InitDecomp_bc(int iosysid, int basetype, int ndims, const int *gdimlen, const long int *start, const long int *count, int *ioidp); /* Init decomposition with 0-based compmap array. */ - int PIOc_init_decomp(int iosysid, int basetype, int ndims, const int *dims, int maplen, - const PIO_Offset *compmap, int *ioidp, const int *rearranger, + int PIOc_init_decomp(int iosysid, int pio_type, int ndims, const int *gdimlen, int maplen, + const PIO_Offset *compmap, int *ioidp, int rearranger, const PIO_Offset *iostart, const PIO_Offset *iocount); - + + /* Free resources associated with a decomposition. */ int PIOc_freedecomp(int iosysid, int ioid); + int PIOc_readmap(const char *file, int *ndims, int **gdims, PIO_Offset *fmaplen, PIO_Offset **map, MPI_Comm comm); int PIOc_readmap_from_f90(const char *file,int *ndims, int **gdims, PIO_Offset *maplen, @@ -729,16 +775,17 @@ extern "C" { /* Write a decomposition file using netCDF. */ int PIOc_write_nc_decomp(int iosysid, const char *filename, int cmode, int ioid, - MPI_Comm comm, char *title, char *history, int fortran_order); + char *title, char *history, int fortran_order); /* Read a netCDF decomposition file. */ int PIOc_read_nc_decomp(int iosysid, const char *filename, int *ioid, MPI_Comm comm, int pio_type, char *title, char *history, int *fortran_order); - /* Initializing IO system. */ - int PIOc_Init_Async(MPI_Comm world, int num_io_procs, int *io_proc_list, int component_count, + /* Initializing IO system for async. */ + int PIOc_init_async(MPI_Comm world, int num_io_procs, int *io_proc_list, int component_count, int *num_procs_per_comp, int **proc_list, MPI_Comm *io_comm, MPI_Comm *comp_comm, - int *iosysidp); + int rearranger, int *iosysidp); + int PIOc_Init_Intercomm(int component_count, MPI_Comm peer_comm, MPI_Comm *comp_comms, MPI_Comm io_comm, int *iosysidp); int PIOc_get_numiotasks(int iosysid, int *numiotasks); diff --git a/src/externals/pio2/src/clib/pio_darray.c b/src/externals/pio2/src/clib/pio_darray.c index 4250d87ea38e..1d6f15c7d308 100644 --- a/src/externals/pio2/src/clib/pio_darray.c +++ b/src/externals/pio2/src/clib/pio_darray.c @@ -49,6 +49,27 @@ PIO_Offset PIOc_set_buffer_size_limit(PIO_Offset limit) * caller to use their own data buffering (instead of using the * buffering implemented in PIOc_write_darray()). * + * When the user calls PIOc_write_darray() one or more times, then + * PIO_write_darray_multi() will be called when the buffer is flushed. + * + * Internally, this function will: + *
    + *
  • Find info about file, decomposition, and variable. + *
  • Do a special flush for pnetcdf if needed. + *
  • Allocates a buffer big enough to hold all the data in the + * multi-buffer, for all tasks. + *
  • Calls rearrange_comp2io() to move data from compute to IO + * tasks. + *
  • For parallel iotypes (pnetcdf and netCDF-4 parallel) call + * pio_write_darray_multi_nc(). + *
  • For serial iotypes (netcdf classic and netCDF-4 serial) call + * write_darray_multi_serial(). + *
  • For subset rearranger, create holegrid to write missing + * data. Then call pio_write_darray_multi_nc() or + * write_darray_multi_serial() to write the holegrid. + *
  • Special buffer flush for pnetcdf. + *
+ * * @param ncid identifies the netCDF file. * @param varids an array of length nvars containing the variable ids to * be written. @@ -65,25 +86,23 @@ PIO_Offset PIOc_set_buffer_size_limit(PIO_Offset limit) * that is on this processor. There are nvars arrays of data, and each * array of data contains one record worth of data for that variable. * @param frame an array of length nvars with the frame or record - * dimension for each of the nvars variables in IOBUF - * @param fillvalue pointer to the fill value to be used for missing - * data. Ignored if NULL. If provided, must be the correct fill value - * for the variable. The correct fill value will be used if NULL is - * passed. + * dimension for each of the nvars variables in IOBUF. NULL if this + * iodesc contains non-record vars. + * @param fillvalue pointer an array (of length nvars) of pointers to + * the fill value to be used for missing data. * @param flushtodisk non-zero to cause buffers to be flushed to disk. * @return 0 for success, error code otherwise. * @ingroup PIO_write_darray */ -int PIOc_write_darray_multi(int ncid, const int *varids, int ioid, int nvars, PIO_Offset arraylen, - void *array, const int *frame, void **fillvalue, bool flushtodisk) +int PIOc_write_darray_multi(int ncid, const int *varids, int ioid, int nvars, + PIO_Offset arraylen, void *array, const int *frame, + void **fillvalue, bool flushtodisk) { iosystem_desc_t *ios; /* Pointer to io system information. */ file_desc_t *file; /* Pointer to file information. */ io_desc_t *iodesc; /* Pointer to IO description information. */ - int vsize; /* size in bytes of the given data. */ int rlen; /* total data buffer size. */ var_desc_t *vdesc0; /* pointer to var_desc structure for each var. */ - int mpierr; /* Return code from MPI functions. */ int ierr; /* Return code. */ /* Get the file info. */ @@ -98,61 +117,69 @@ int PIOc_write_darray_multi(int ncid, const int *varids, int ioid, int nvars, PI if (varids[v] < 0 || varids[v] > PIO_MAX_VARS) return pio_err(ios, file, PIO_EINVAL, __FILE__, __LINE__); - LOG((1, "PIOc_write_darray_multi ncid = %d ioid = %d nvars = %d arraylen = %ld flushtodisk = %d", + LOG((1, "PIOc_write_darray_multi ncid = %d ioid = %d nvars = %d arraylen = %ld " + "flushtodisk = %d", ncid, ioid, nvars, arraylen, flushtodisk)); /* Check that we can write to this file. */ - if (! (file->mode & PIO_WRITE)) + if (!(file->mode & PIO_WRITE)) return pio_err(ios, file, PIO_EPERM, __FILE__, __LINE__); /* Get iodesc. */ if (!(iodesc = pio_get_iodesc_from_id(ioid))) return pio_err(ios, file, PIO_EBADID, __FILE__, __LINE__); + pioassert(iodesc->rearranger == PIO_REARR_BOX || iodesc->rearranger == PIO_REARR_SUBSET, + "unknown rearranger", __FILE__, __LINE__); + + /* Get a pointer to the variable info for the first variable. */ + vdesc0 = &file->varlist[varids[0]]; + + /* if the buffer is already in use in pnetcdf we need to flush first */ + if (file->iotype == PIO_IOTYPE_PNETCDF && vdesc0->iobuf) + flush_output_buffer(file, 1, 0); - /* For netcdf serial writes we collect the data on io nodes and + pioassert(!vdesc0->iobuf, "buffer overwrite",__FILE__, __LINE__); + + /* Determine total size of aggregated data (all vars/records). + * For netcdf serial writes we collect the data on io nodes and * then move that data one node at a time to the io master node * and write (or read). The buffer size on io task 0 must be as * large as the largest used to accommodate this serial io - * method. */ - vdesc0 = file->varlist + varids[0]; - pioassert(!vdesc0->iobuf, "Attempt to overwrite existing io buffer",__FILE__, __LINE__); - - /* ??? */ - /* rlen = iodesc->llen*nvars; */ + * method. */ rlen = 0; if (iodesc->llen > 0) rlen = iodesc->maxiobuflen * nvars; - /* Currently there are two rearrangers box=1 and subset=2. There - * is never a case where rearranger==0. */ - LOG((2, "iodesc->rearranger = %d iodesc->needsfill = %d\n", iodesc->rearranger, - iodesc->needsfill)); - if (iodesc->rearranger > 0) + /* Allocate iobuf. */ + if (rlen > 0) { - if (rlen > 0) - { - if ((mpierr = MPI_Type_size(iodesc->basetype, &vsize))) - return check_mpi(file, mpierr, __FILE__, __LINE__); - LOG((3, "vsize = %d", vsize)); - - /* Allocate memory for the variable buffer. */ - if (!(vdesc0->iobuf = bget((size_t)vsize * (size_t)rlen))) - piomemerror(ios, (size_t)rlen * (size_t)vsize, __FILE__, __LINE__); - LOG((3, "allocated %ld bytes for variable buffer", (size_t)rlen * (size_t)vsize)); - - /* If data are missing for the BOX rearranger, insert fill values. */ - if (iodesc->needsfill && iodesc->rearranger == PIO_REARR_BOX) - for (int nv = 0; nv < nvars; nv++) - for (int i = 0; i < iodesc->maxiobuflen; i++) - memcpy(&((char *)vdesc0->iobuf)[vsize * (i + nv * iodesc->maxiobuflen)], - &((char *)fillvalue)[nv * vsize], vsize); - } - - /* Move data from compute to IO tasks. */ - if ((ierr = rearrange_comp2io(ios, iodesc, array, vdesc0->iobuf, nvars))) - return pio_err(ios, file, ierr, __FILE__, __LINE__); + /* Allocate memory for the buffer for all vars/records. */ + if (!(vdesc0->iobuf = bget(iodesc->basetype_size * (size_t)rlen))) + return pio_err(ios, file, PIO_ENOMEM, __FILE__, __LINE__); + LOG((3, "allocated %lld bytes for variable buffer", (size_t)rlen * iodesc->basetype_size)); + + /* If fill values are desired, and we're using the BOX + * rearranger, insert fill values. */ + if (iodesc->needsfill && iodesc->rearranger == PIO_REARR_BOX) + for (int nv = 0; nv < nvars; nv++) + for (int i = 0; i < iodesc->maxiobuflen; i++) + memcpy(&((char *)vdesc0->iobuf)[iodesc->basetype_size * (i + nv * iodesc->maxiobuflen)], + &((char *)fillvalue)[nv * iodesc->basetype_size], iodesc->basetype_size); } - + else if (file->iotype == PIO_IOTYPE_PNETCDF && ios->ioproc) + { + /* this assures that iobuf is allocated on all iotasks thus + assuring that the flush_output_buffer call above is called + collectively (from all iotasks) */ + if (!(vdesc0->iobuf = bget(1))) + return pio_err(ios, file, PIO_ENOMEM, __FILE__, __LINE__); + LOG((3, "allocated token for variable buffer")); + } + + /* Move data from compute to IO tasks. */ + if ((ierr = rearrange_comp2io(ios, iodesc, array, vdesc0->iobuf, nvars))) + return pio_err(ios, file, ierr, __FILE__, __LINE__); + /* Write the darray based on the iotype. */ LOG((2, "about to write darray for iotype = %d", file->iotype)); switch (file->iotype) @@ -166,9 +193,7 @@ int PIOc_write_darray_multi(int ncid, const int *varids, int ioid, int nvars, PI break; case PIO_IOTYPE_NETCDF4C: case PIO_IOTYPE_NETCDF: - if ((ierr = pio_write_darray_multi_nc_serial(file, nvars, varids, iodesc->ndims, iodesc->basetype, - iodesc->maxregions, iodesc->firstregion, iodesc->llen, - iodesc->num_aiotasks, vdesc0->iobuf, frame))) + if ((ierr = write_darray_multi_serial(file, nvars, varids, iodesc, 0, frame))) return pio_err(ios, file, ierr, __FILE__, __LINE__); break; @@ -182,6 +207,7 @@ int PIOc_write_darray_multi(int ncid, const int *varids, int ioid, int nvars, PI /* Release resources. */ if (vdesc0->iobuf) { + LOG((3,"freeing variable buffer in pio_darray")); brel(vdesc0->iobuf); vdesc0->iobuf = NULL; } @@ -202,22 +228,21 @@ int PIOc_write_darray_multi(int ncid, const int *varids, int ioid, int nvars, PI LOG((2, "nvars = %d holegridsize = %ld iodesc->needsfill = %d\n", nvars, iodesc->holegridsize, iodesc->needsfill)); - if (vdesc0->fillbuf) - piodie("Attempt to overwrite existing buffer",__FILE__,__LINE__); + pioassert(!vdesc0->fillbuf, "buffer overwrite",__FILE__, __LINE__); /* Get a buffer. */ if (ios->io_rank == 0) - vdesc0->fillbuf = bget(iodesc->maxholegridsize * vsize * nvars); + vdesc0->fillbuf = bget(iodesc->maxholegridsize * iodesc->basetype_size * nvars); else if (iodesc->holegridsize > 0) - vdesc0->fillbuf = bget(iodesc->holegridsize * vsize * nvars); + vdesc0->fillbuf = bget(iodesc->holegridsize * iodesc->basetype_size * nvars); /* copying the fill value into the data buffer for the box * rearranger. This will be overwritten with data where * provided. */ for (int nv = 0; nv < nvars; nv++) for (int i = 0; i < iodesc->holegridsize; i++) - memcpy(&((char *)vdesc0->fillbuf)[vsize * (i + nv * iodesc->holegridsize)], - &((char *)fillvalue)[vsize * nv], vsize); + memcpy(&((char *)vdesc0->fillbuf)[iodesc->basetype_size * (i + nv * iodesc->holegridsize)], + &((char *)fillvalue)[iodesc->basetype_size * nv], iodesc->basetype_size); /* Write the darray based on the iotype. */ switch (file->iotype) @@ -232,10 +257,7 @@ int PIOc_write_darray_multi(int ncid, const int *varids, int ioid, int nvars, PI break; case PIO_IOTYPE_NETCDF4C: case PIO_IOTYPE_NETCDF: - if ((ierr = pio_write_darray_multi_nc_serial(file, nvars, varids, iodesc->ndims, iodesc->basetype, - iodesc->maxfillregions, iodesc->fillregion, - iodesc->holegridsize, - iodesc->num_aiotasks, vdesc0->fillbuf, frame))) + if ((ierr = write_darray_multi_serial(file, nvars, varids, iodesc, 1, frame))) return pio_err(ios, file, ierr, __FILE__, __LINE__); break; default: @@ -254,7 +276,7 @@ int PIOc_write_darray_multi(int ncid, const int *varids, int ioid, int nvars, PI } } - /* Flush data to disk. */ + /* Flush data to disk for pnetcdf. */ if (ios->ioproc && file->iotype == PIO_IOTYPE_PNETCDF) if ((ierr = flush_output_buffer(file, flushtodisk, 0))) return pio_err(ios, file, ierr, __FILE__, __LINE__); @@ -262,6 +284,50 @@ int PIOc_write_darray_multi(int ncid, const int *varids, int ioid, int nvars, PI return PIO_NOERR; } +/** + * Find the fillvalue that should be used for a variable. + * + * @param file Info about file we are writing to. + * @param varid the variable ID. + * @param vdesc pointer to var_desc_t info for this var. + * @returns 0 for success, non-zero error code for failure. + * @ingroup PIO_write_darray + */ +int find_var_fillvalue(file_desc_t *file, int varid, var_desc_t *vdesc) +{ + iosystem_desc_t *ios; /* Pointer to io system information. */ + int no_fill; + int ierr; + + /* Check inputs. */ + pioassert(file && file->iosystem && vdesc, "invalid input", __FILE__, __LINE__); + ios = file->iosystem; + + LOG((3, "find_var_fillvalue file->pio_ncid = %d varid = %d", file->pio_ncid, varid)); + + /* Find out PIO data type of var. */ + if ((ierr = PIOc_inq_vartype(file->pio_ncid, varid, &vdesc->pio_type))) + return pio_err(ios, NULL, ierr, __FILE__, __LINE__); + + /* Find out length of type. */ + if ((ierr = PIOc_inq_type(file->pio_ncid, vdesc->pio_type, NULL, &vdesc->type_size))) + return pio_err(ios, NULL, ierr, __FILE__, __LINE__); + LOG((3, "getting fill value for varid = %d pio_type = %d type_size = %d", + varid, vdesc->pio_type, vdesc->type_size)); + + /* Allocate storage for the fill value. */ + if (!(vdesc->fillvalue = malloc(vdesc->type_size))) + return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); + + /* Get the fill value. */ + if ((ierr = PIOc_inq_var_fill(file->pio_ncid, varid, &no_fill, vdesc->fillvalue))) + return pio_err(ios, NULL, ierr, __FILE__, __LINE__); + vdesc->use_fill = no_fill ? 0 : 1; + LOG((3, "vdesc->use_fill = %d", vdesc->use_fill)); + + return PIO_NOERR; +} + /** * Write a distributed array to the output file. * @@ -269,20 +335,43 @@ int PIOc_write_darray_multi(int ncid, const int *varids, int ioid, int nvars, PI * it to the IO nodes when the compute buffer is full or when a flush * is triggered. * + * Internally, this function will: + *
    + *
  • Locate info about this file, decomposition, and variable. + *
  • If we don't have a fillvalue for this variable, determine one + * and remember it for future calls. + *
  • Initialize or find the multi_buffer for this record/var. + *
  • Find out how much free space is available in the multi buffer + * and flush if needed. + *
  • Store the new user data in the mutli buffer. + *
  • If needed (only for subset rearranger), fill in gaps in data + * with fillvalue. + *
  • Remember the frame value (i.e. record number) of this data if + * there is one. + *
+ * + * NOTE: The write multi buffer wmulti_buffer is the cache on compute + * nodes that will collect and store multiple variables before sending + * them to the io nodes. Aggregating variables in this way leads to a + * considerable savings in communication expense. Variables in the wmb + * array must have the same decomposition and base data size and we + * also need to keep track of whether each is a recordvar (has an + * unlimited dimension) or not. + * * @param ncid the ncid of the open netCDF file. * @param varid the ID of the variable that these data will be written * to. * @param ioid the I/O description ID as passed back by * PIOc_InitDecomp(). - * @param arraylen the length of the array to be written. This should + * @param arraylen the length of the array to be written. This should * be at least the length of the local component of the distrubited * array. (Any values beyond length of the local component will be * ignored.) * @param array pointer to an array of length arraylen with the data * to be written. This is a pointer to the distributed portion of the * array that is on this task. - * @param fillvalue pointer to the fill value to be used for - * missing data. + * @param fillvalue pointer to the fill value to be used for missing + * data. * @returns 0 for success, non-zero error code for failure. * @ingroup PIO_write_darray */ @@ -296,8 +385,7 @@ int PIOc_write_darray(int ncid, int varid, int ioid, PIO_Offset arraylen, void * void *bufptr; /* A data buffer. */ MPI_Datatype vtype; /* The MPI type of the variable. */ wmulti_buffer *wmb; /* The write multi buffer for one or more vars. */ - int tsize; /* Size of MPI type. */ - bool recordvar; /* True if this is a record variable. */ + int recordvar; /* Non-zero if this is a record variable. */ int needsflush = 0; /* True if we need to flush buffer. */ bufsize totfree; /* Amount of free space in the buffer. */ bufsize maxfree; /* Max amount of free space in buffer. */ @@ -320,95 +408,59 @@ int PIOc_write_darray(int ncid, int varid, int ioid, PIO_Offset arraylen, void * if (!(iodesc = pio_get_iodesc_from_id(ioid))) return pio_err(ios, file, PIO_EBADID, __FILE__, __LINE__); - /* Get var description. */ - vdesc = file->varlist + varid; - LOG((2, "vdesc record %d ndims %d nreqs %d", vdesc->record, vdesc->ndims, vdesc->nreqs)); - - /* Is this a record variable? */ - recordvar = vdesc->record >= 0 ? true : false; - LOG((3, "recordvar = %d", recordvar)); - /* Check that the local size of the variable passed in matches the - * size expected by the io descriptor. */ + * size expected by the io descriptor. Fail if arraylen is too + * small, just put a warning in the log if it is too big (the + * excess values will be ignored.) */ if (arraylen < iodesc->ndof) return pio_err(ios, file, PIO_EINVAL, __FILE__, __LINE__); + LOG((2, "%s arraylen = %d iodesc->ndof = %d", + (iodesc->ndof != arraylen) ? "WARNING: iodesc->ndof != arraylen" : "", + arraylen, iodesc->ndof)); - if (iodesc->ndof != arraylen) - LOG((1, "User supplied array is larger than expected, arraylen != iodesc->ndof")); - - /* Get a pointer to the buffer space for this file. It will hold - * data from one or more variables that fit the same - * description. */ - wmb = &file->buffer; + /* Get var description. */ + vdesc = &(file->varlist[varid]); + LOG((2, "vdesc record %d ndims %d nreqs %d", vdesc->record, vdesc->ndims, + vdesc->nreqs)); + + /* If we don't know the fill value for this var, get it. */ + if (!vdesc->fillvalue) + if ((ierr = find_var_fillvalue(file, varid, vdesc))) + return pio_err(ios, file, PIO_EBADID, __FILE__, __LINE__); + + /* Is this a record variable? The user must set the vdesc->record + * value by calling PIOc_setframe() before calling this + * function. */ + recordvar = vdesc->record >= 0 ? 1 : 0; + LOG((3, "recordvar = %d", recordvar)); - /* If the ioid is not initialized, set it. For non record vars, - * use the negative ??? */ - if (wmb->ioid == -1) - wmb->ioid = recordvar ? ioid : -ioid; - else - { - /* Handle record and non-record variables differently. */ - if (recordvar) - { - /* Moving to the end of the wmb linked list to add the - * current variable. ??? */ - while(wmb->next && wmb->ioid != ioid) - if (wmb->next) - wmb = wmb->next; -#ifdef _PNETCDF - /* Do we still need the commented code below? ??? */ - /* flush the previous record before starting a new one. this is collective */ - /* if (vdesc->request != NULL && (vdesc->request[0] != NC_REQ_NULL) || - (wmb->frame != NULL && vdesc->record != wmb->frame[0])){ - needsflush = 2; // flush to disk - } */ -#endif - } - else - { - /* Move to end of list. */ - while(wmb->next && wmb->ioid != -(ioid)) - if (wmb->next) - wmb = wmb->next; - } - } + /* Move to end of list or the entry that matches this ioid. */ + for (wmb = &file->buffer; wmb->next; wmb = wmb->next) + if (wmb->ioid == ioid && wmb->recordvar == recordvar) + break; - /* The write multi buffer wmulti_buffer is the cache on compute - nodes that will collect and store multiple variables before - sending them to the io nodes. Aggregating variables in this way - leads to a considerable savings in communication - expense. Variables in the wmb array must have the same - decomposition and base data size and we also need to keep track - of whether each is a recordvar (has an unlimited dimension) or - not. */ - if ((recordvar && wmb->ioid != ioid) || (!recordvar && wmb->ioid != -(ioid))) + /* If we did not find an existing wmb entry, create a new wmb. */ + if (wmb->ioid != ioid || wmb->recordvar != recordvar) { /* Allocate a buffer. */ if (!(wmb->next = bget((bufsize)sizeof(wmulti_buffer)))) - piomemerror(ios, sizeof(wmulti_buffer), __FILE__, __LINE__); + return pio_err(ios, file, PIO_ENOMEM, __FILE__, __LINE__); LOG((3, "allocated multi-buffer")); /* Set pointer to newly allocated buffer and initialize.*/ wmb = wmb->next; + wmb->recordvar = recordvar; wmb->next = NULL; - wmb->ioid = recordvar ? ioid : -ioid; - wmb->validvars = 0; + wmb->ioid = ioid; + wmb->num_arrays = 0; wmb->arraylen = arraylen; wmb->vid = NULL; wmb->data = NULL; wmb->frame = NULL; wmb->fillvalue = NULL; } - - /* Get the size of the MPI type. */ - if ((mpierr = MPI_Type_size(iodesc->basetype, &tsize))) - return check_mpi(file, mpierr, __FILE__, __LINE__); - - LOG((2, "wmb->validvars = %d arraylen = %d tsize = %d\n", wmb->validvars, - arraylen, tsize)); - - /* At this point wmb should be pointing to a new or existing buffer - so we can add the data. */ + LOG((2, "wmb->num_arrays = %d arraylen = %d iodesc->basetype_size = %d\n", + wmb->num_arrays, arraylen, iodesc->basetype_size)); /* Find out how much free, contiguous space is available. */ bfreespace(&totfree, &maxfree); @@ -416,23 +468,24 @@ int PIOc_write_darray(int ncid, int varid, int ioid, PIO_Offset arraylen, void * /* maxfree is the available memory. If that is < 10% greater than * the size of the current request needsflush is true. */ if (needsflush == 0) - needsflush = (maxfree <= 1.1 * (1 + wmb->validvars) * arraylen * tsize); + needsflush = (maxfree <= 1.1 * (1 + wmb->num_arrays) * arraylen * iodesc->basetype_size); /* Tell all tasks on the computation communicator whether we need * to flush data. */ - if ((mpierr = MPI_Allreduce(MPI_IN_PLACE, &needsflush, 1, MPI_INT, MPI_MAX, ios->comp_comm))) + if ((mpierr = MPI_Allreduce(MPI_IN_PLACE, &needsflush, 1, MPI_INT, MPI_MAX, + ios->comp_comm))) return check_mpi(file, mpierr, __FILE__, __LINE__); LOG((2, "needsflush = %d", needsflush)); /* Flush data if needed. */ if (needsflush > 0) { - LOG((2, "maxfree = %ld wmb->validvars = %d (1 + wmb->validvars) * arraylen * tsize = %ld totfree = %ld\n", - maxfree, wmb->validvars, (1 + wmb->validvars) * arraylen * tsize, totfree)); - #ifdef PIO_ENABLE_LOGGING /* Collect a debug report about buffer. */ cn_buffer_report(ios, true); + LOG((2, "maxfree = %ld wmb->num_arrays = %d (1 + wmb->num_arrays) *" + " arraylen * iodesc->basetype_size = %ld totfree = %ld\n", maxfree, wmb->num_arrays, + (1 + wmb->num_arrays) * arraylen * iodesc->basetype_size, totfree)); #endif /* PIO_ENABLE_LOGGING */ /* If needsflush == 2 flush to disk otherwise just flush to io node. */ @@ -443,91 +496,119 @@ int PIOc_write_darray(int ncid, int varid, int ioid, PIO_Offset arraylen, void * /* Get memory for data. */ if (arraylen > 0) { - if (!(wmb->data = bgetr(wmb->data, (1 + wmb->validvars) * arraylen * tsize))) - piomemerror(ios, (1 + wmb->validvars) * arraylen * tsize, __FILE__, __LINE__); - LOG((2, "got %ld bytes for data", (1 + wmb->validvars) * arraylen * tsize)); + if (!(wmb->data = bgetr(wmb->data, (1 + wmb->num_arrays) * arraylen * iodesc->basetype_size))) + return pio_err(ios, file, PIO_ENOMEM, __FILE__, __LINE__); + LOG((2, "got %ld bytes for data", (1 + wmb->num_arrays) * arraylen * iodesc->basetype_size)); } /* vid is an array of variable ids in the wmb list, grow the list * and add the new entry. */ - if (!(wmb->vid = bgetr(wmb->vid, sizeof(int) * (1 + wmb->validvars)))) - piomemerror(ios, (1 + wmb->validvars) * sizeof(int), __FILE__, __LINE__); + if (!(wmb->vid = bgetr(wmb->vid, sizeof(int) * (1 + wmb->num_arrays)))) + return pio_err(ios, file, PIO_ENOMEM, __FILE__, __LINE__); /* wmb->frame is the record number, we assume that the variables * in the wmb list may not all have the same unlimited dimension * value although they usually do. */ if (vdesc->record >= 0) - if (!(wmb->frame = bgetr(wmb->frame, sizeof(int) * (1 + wmb->validvars)))) - piomemerror(ios, (1 + wmb->validvars) * sizeof(int), __FILE__, __LINE__); + if (!(wmb->frame = bgetr(wmb->frame, sizeof(int) * (1 + wmb->num_arrays)))) + return pio_err(ios, file, PIO_ENOMEM, __FILE__, __LINE__); - /* If we need a fill value, get it. */ + /* If we need a fill value, get it. If we are using the subset + * rearranger and not using the netcdf fill mode then we need to + * do an extra write to fill in the holes with the fill value. */ if (iodesc->needsfill) { /* Get memory to hold fill value. */ - if (!(wmb->fillvalue = bgetr(wmb->fillvalue, tsize * (1 + wmb->validvars)))) - piomemerror(ios, (1 + wmb->validvars) * tsize, __FILE__, __LINE__); + if (!(wmb->fillvalue = bgetr(wmb->fillvalue, iodesc->basetype_size * (1 + wmb->num_arrays)))) + return pio_err(ios, file, PIO_ENOMEM, __FILE__, __LINE__); /* If the user passed a fill value, use that, otherwise use * the default fill value of the netCDF type. Copy the fill * value to the buffer. */ if (fillvalue) { - memcpy((char *)wmb->fillvalue + tsize * wmb->validvars, fillvalue, tsize); - LOG((3, "copied user-provided fill value tsize = %d", tsize)); + memcpy((char *)wmb->fillvalue + iodesc->basetype_size * wmb->num_arrays, + fillvalue, iodesc->basetype_size); + LOG((3, "copied user-provided fill value iodesc->basetype_size = %d", + iodesc->basetype_size)); } else { + void *fill; + signed char byte_fill = PIO_FILL_BYTE; + char char_fill = PIO_FILL_CHAR; + short short_fill = PIO_FILL_SHORT; + int int_fill = PIO_FILL_INT; + float float_fill = PIO_FILL_FLOAT; + double double_fill = PIO_FILL_DOUBLE; +#ifdef _NETCDF4 + unsigned char ubyte_fill = PIO_FILL_UBYTE; + unsigned short ushort_fill = PIO_FILL_USHORT; + unsigned int uint_fill = PIO_FILL_UINT; + long long int64_fill = PIO_FILL_INT64; + long long uint64_fill = PIO_FILL_UINT64; +#endif /* _NETCDF4 */ vtype = (MPI_Datatype)iodesc->basetype; LOG((3, "caller did not provide fill value vtype = %d", vtype)); - if (vtype == MPI_INT) - { - int fill = PIO_FILL_INT; - memcpy((char *)wmb->fillvalue + tsize * wmb->validvars, &fill, tsize); - } + + /* This must be done with an if statement, not a case, or + * openmpi will not build. */ + if (vtype == MPI_BYTE) + fill = &byte_fill; + else if (vtype == MPI_CHAR) + fill = &char_fill; + else if (vtype == MPI_SHORT) + fill = &short_fill; + else if (vtype == MPI_INT) + fill = &int_fill; else if (vtype == MPI_FLOAT) - { - float fill = PIO_FILL_FLOAT; - memcpy((char *)wmb->fillvalue + tsize * wmb->validvars, &fill, tsize); - } + fill = &float_fill; else if (vtype == MPI_DOUBLE) - { - double fill = PIO_FILL_DOUBLE; - memcpy((char *)wmb->fillvalue + tsize * wmb->validvars, &fill, tsize); - } - else if (vtype == MPI_CHARACTER) - { - char fill = PIO_FILL_CHAR; - memcpy((char *)wmb->fillvalue + tsize * wmb->validvars, &fill, tsize); - } + fill = &double_fill; +#ifdef _NETCDF4 + else if (vtype == MPI_UNSIGNED_CHAR) + fill = &ubyte_fill; + else if (vtype == MPI_UNSIGNED_SHORT) + fill = &ushort_fill; + else if (vtype == MPI_UNSIGNED) + fill = &uint_fill; + else if (vtype == MPI_LONG_LONG) + fill = &int64_fill; + else if (vtype == MPI_UNSIGNED_LONG_LONG) + fill = &uint64_fill; +#endif /* _NETCDF4 */ else return pio_err(ios, file, PIO_EBADTYPE, __FILE__, __LINE__); + + memcpy((char *)wmb->fillvalue + iodesc->basetype_size * wmb->num_arrays, + fill, iodesc->basetype_size); + LOG((3, "copied fill value")); } } /* Tell the buffer about the data it is getting. */ wmb->arraylen = arraylen; - wmb->vid[wmb->validvars] = varid; + wmb->vid[wmb->num_arrays] = varid; + LOG((3, "wmb->num_arrays = %d wmb->vid[wmb->num_arrays] = %d", wmb->num_arrays, + wmb->vid[wmb->num_arrays])); /* Copy the user-provided data to the buffer. */ - bufptr = (void *)((char *)wmb->data + arraylen * tsize * wmb->validvars); + bufptr = (void *)((char *)wmb->data + arraylen * iodesc->basetype_size * wmb->num_arrays); if (arraylen > 0) { - memcpy(bufptr, array, arraylen * tsize); - LOG((3, "copied %ld bytes of user data", arraylen * tsize)); + memcpy(bufptr, array, arraylen * iodesc->basetype_size); + LOG((3, "copied %ld bytes of user data", arraylen * iodesc->basetype_size)); } /* Add the unlimited dimension value of this variable to the frame * array in wmb. */ if (wmb->frame) - wmb->frame[wmb->validvars] = vdesc->record; - wmb->validvars++; + wmb->frame[wmb->num_arrays] = vdesc->record; + wmb->num_arrays++; - LOG((2, "wmb->validvars = %d iodesc->maxbytes / tsize = %d iodesc->ndof = %d iodesc->llen = %d", - wmb->validvars, iodesc->maxbytes / tsize, iodesc->ndof, iodesc->llen)); - - /* Call the sync when ??? */ - if (wmb->validvars >= iodesc->maxbytes / tsize) - PIOc_sync(ncid); + LOG((2, "wmb->num_arrays = %d iodesc->maxbytes / iodesc->basetype_size = %d " + "iodesc->ndof = %d iodesc->llen = %d", wmb->num_arrays, + iodesc->maxbytes / iodesc->basetype_size, iodesc->ndof, iodesc->llen)); return PIO_NOERR; } @@ -556,8 +637,6 @@ int PIOc_read_darray(int ncid, int varid, int ioid, PIO_Offset arraylen, io_desc_t *iodesc; /* Pointer to IO description information. */ void *iobuf = NULL; /* holds the data as read on the io node. */ size_t rlen = 0; /* the length of data in iobuf. */ - int tsize; /* Total size. */ - int mpierr; /* Return code from MPI functions. */ int ierr; /* Return code. */ /* Get the file info. */ @@ -568,6 +647,8 @@ int PIOc_read_darray(int ncid, int varid, int ioid, PIO_Offset arraylen, /* Get the iodesc. */ if (!(iodesc = pio_get_iodesc_from_id(ioid))) return pio_err(ios, file, PIO_EBADID, __FILE__, __LINE__); + pioassert(iodesc->rearranger == PIO_REARR_BOX || iodesc->rearranger == PIO_REARR_SUBSET, + "unknown rearranger", __FILE__, __LINE__); /* ??? */ if (ios->iomaster == MPI_ROOT) @@ -575,24 +656,10 @@ int PIOc_read_darray(int ncid, int varid, int ioid, PIO_Offset arraylen, else rlen = iodesc->llen; - /* Is a rearranger in use? */ - if (iodesc->rearranger > 0) - { - if (ios->ioproc && rlen > 0) - { - /* Get the MPI type size. */ - if ((mpierr = MPI_Type_size(iodesc->basetype, &tsize))) - return check_mpi(file, mpierr, __FILE__, __LINE__); - - /* Allocate a buffer for one record. */ - if (!(iobuf = bget((size_t)tsize * rlen))) - piomemerror(ios, rlen * (size_t)tsize, __FILE__, __LINE__); - } - } - else - { - iobuf = array; - } + /* Allocate a buffer for one record. */ + if (ios->ioproc && rlen > 0) + if (!(iobuf = bget(iodesc->basetype_size * rlen))) + return pio_err(ios, file, PIO_ENOMEM, __FILE__, __LINE__); /* Call the correct darray read function based on iotype. */ switch (file->iotype) @@ -611,16 +678,13 @@ int PIOc_read_darray(int ncid, int varid, int ioid, PIO_Offset arraylen, return pio_err(NULL, NULL, PIO_EBADIOTYPE, __FILE__, __LINE__); } - /* If a rearranger was specified, rearrange the data. */ - if (iodesc->rearranger > 0) - { - if ((ierr = rearrange_io2comp(ios, iodesc, iobuf, array))) - return pio_err(ios, file, ierr, __FILE__, __LINE__); + /* Rearrange the data. */ + if ((ierr = rearrange_io2comp(ios, iodesc, iobuf, array))) + return pio_err(ios, file, ierr, __FILE__, __LINE__); - /* Free the buffer. */ - if (rlen > 0) - brel(iobuf); - } + /* Free the buffer. */ + if (rlen > 0) + brel(iobuf); return PIO_NOERR; } diff --git a/src/externals/pio2/src/clib/pio_darray_int.c b/src/externals/pio2/src/clib/pio_darray_int.c index ac19b37e198e..79572219b91d 100644 --- a/src/externals/pio2/src/clib/pio_darray_int.c +++ b/src/externals/pio2/src/clib/pio_darray_int.c @@ -25,6 +25,16 @@ extern void *CN_bpool; /* Maximum buffer usage. */ extern PIO_Offset maxusage; +/* handler for freeing the memory buffer pool */ +void bpool_free(void *p) +{ + free(p); + if(p == CN_bpool){ + CN_bpool = NULL; + } +} + + /** * Initialize the compute buffer to size pio_cnbuffer_limit. * @@ -49,10 +59,10 @@ int compute_buffer_init(iosystem_desc_t *ios) if (!CN_bpool) return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); - bectl(NULL, malloc, free, pio_cnbuffer_limit); + bectl(NULL, malloc, bpool_free, pio_cnbuffer_limit); } #endif - LOG((2, "compute_buffer_init CN_bpool = %d", CN_bpool)); + LOG((2, "compute_buffer_init complete")); return PIO_NOERR; } @@ -60,7 +70,7 @@ int compute_buffer_init(iosystem_desc_t *ios) /** * Write a set of one or more aggregated arrays to output file. This * function is only used with parallel-netcdf and netcdf-4 parallel - * iotypes. Serial io types use pio_write_darray_multi_nc_serial(). + * iotypes. Serial io types use write_darray_multi_serial(). * * @param file a pointer to the open file descriptor for the file * that will be written to @@ -85,7 +95,7 @@ int compute_buffer_init(iosystem_desc_t *ios) * less than blocksize*numiotasks then some iotasks will have a NULL * iobuf. * @param frame the frame or record dimension for each of the nvars - * variables in iobuf. + * variables in iobuf. NULL if this iodesc contains non-record vars. * @return 0 for success, error code otherwise. * @ingroup PIO_write_darray */ @@ -122,7 +132,7 @@ int pio_write_darray_multi_nc(file_desc_t *file, int nvars, const int *vid, int vdesc = file->varlist + vid[0]; /* If async is in use, send message to IO master task. */ - if (ios->async_interface) + if (ios->async) { if (!ios->ioproc) { @@ -350,6 +360,298 @@ int pio_write_darray_multi_nc(file_desc_t *file, int nvars, const int *vid, int return ierr; } +/** + * Fill the tmp_start and tmp_count arrays, which contain the start + * and count arrays for all regions. + * + * This is an internal function which is only called on io tasks. It + * is called by write_darray_multi_serial(). + * + * @param region pointer to the first in a linked list of regions. + * @param maxregions the number of regions in the list. + * @param fndims the number of dimensions in the file. + * @param iodesc_ndims the number of dimensions in the decomposition. + * @param vdesc pointer to an array of var_desc_t for the vars being + * written. + * @param tmp_start pointer to an already allocaed array of length + * fndims * maxregions. This array will get the start values for all + * regions. + * @param tmp_count pointer to an already allocaed array of length + * fndims * maxregions. This array will get the count values for all + * regions. + * @returns 0 for success, error code otherwise. + * @ingroup PIO_read_darray + **/ +int find_all_start_count(io_region *region, int maxregions, int fndims, + int iodesc_ndims, var_desc_t *vdesc, size_t *tmp_start, + size_t *tmp_count) +{ + /* Check inputs. */ + pioassert(maxregions >= 0 && fndims > 0 && iodesc_ndims >= 0 && vdesc && + tmp_start && tmp_count, "invalid input", __FILE__, __LINE__); + + /* Find the start/count arrays for each region in the list. */ + for (int r = 0; r < maxregions; r++) + { + /* Initialize the start/count arrays for this region to 0. */ + for (int d = 0; d < fndims; d++) + { + tmp_start[d + r * fndims] = 0; + tmp_count[d + r * fndims] = 0; + } + + if (region) + { + if (vdesc->record >= 0) + { + /* This is a record based multidimensional + * array. Copy start/count for non-record + * dimensions. */ + for (int i = fndims - iodesc_ndims; i < fndims; i++) + { + tmp_start[i + r * fndims] = region->start[i - (fndims - iodesc_ndims)]; + tmp_count[i + r * fndims] = region->count[i - (fndims - iodesc_ndims)]; + LOG((3, "tmp_start[%d] = %d tmp_count[%d] = %d", i + r * fndims, + tmp_start[i + r * fndims], i + r * fndims, + tmp_count[i + r * fndims])); + } + } + else + { + /* This is not a record based multidimensional array. */ + for (int i = 0; i < iodesc_ndims; i++) + { + tmp_start[i + r * fndims] = region->start[i]; + tmp_count[i + r * fndims] = region->count[i]; + LOG((3, "tmp_start[%d] = %d tmp_count[%d] = %d", i + r * fndims, + tmp_start[i + r * fndims], i + r * fndims, + tmp_count[i + r * fndims])); + } + } + + /* Move to next region. */ + region = region->next; + + } /* endif region */ + } /* next r */ + + return PIO_NOERR; +} + +/** + * Internal function called by IO tasks other than IO task 0 to send + * their tmp_start/tmp_count arrays to IO task 0. + * + * This is an internal function which is only called on io tasks other + * than IO task 0. It is called by write_darray_multi_serial(). + * + * @return 0 for success, error code otherwise. + * @ingroup PIO_write_darray + **/ +int send_all_start_count(iosystem_desc_t *ios, io_desc_t *iodesc, PIO_Offset llen, + int maxregions, int nvars, int fndims, size_t *tmp_start, + size_t *tmp_count, void *iobuf) +{ + MPI_Status status; /* Recv status for MPI. */ + int mpierr; /* Return code from MPI function codes. */ + int ierr; /* Return code. */ + + /* Check inputs. */ + pioassert(ios && ios->ioproc && ios->io_rank > 0 && maxregions >= 0, + "invalid inputs", __FILE__, __LINE__); + + /* Do a handshake. */ + if ((mpierr = MPI_Recv(&ierr, 1, MPI_INT, 0, 0, ios->io_comm, &status))) + return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); + + /* Send local length of iobuffer for each field (all + * fields are the same length). */ + if ((mpierr = MPI_Send((void *)&llen, 1, MPI_OFFSET, 0, ios->io_rank, ios->io_comm))) + return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); + LOG((3, "sent llen = %d", llen)); + + /* Send the number of data regions, the start/count for + * all regions, and the data buffer with all the data. */ + if (llen > 0) + { + if ((mpierr = MPI_Send((void *)&maxregions, 1, MPI_INT, 0, ios->io_rank + ios->num_iotasks, + ios->io_comm))) + return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); + if ((mpierr = MPI_Send(tmp_start, maxregions * fndims, MPI_OFFSET, 0, + ios->io_rank + 2 * ios->num_iotasks, ios->io_comm))) + return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); + if ((mpierr = MPI_Send(tmp_count, maxregions * fndims, MPI_OFFSET, 0, + ios->io_rank + 3 * ios->num_iotasks, ios->io_comm))) + return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); + if ((mpierr = MPI_Send(iobuf, nvars * llen, iodesc->basetype, 0, + ios->io_rank + 4 * ios->num_iotasks, ios->io_comm))) + return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); + LOG((3, "sent data for maxregions = %d", maxregions)); + } + + return PIO_NOERR; +} + +/** + * This is an internal function that is run only on IO proc 0. It + * receives data from all the other IO tasks, and write that data to + * disk. This is called from write_darray_multi_serial(). + * + * @param file a pointer to the open file descriptor for the file + * that will be written to. + * @param vid an array of the variable ids to be written + * @param frame the record dimension for each of the nvars variables + * in iobuf. NULL if this iodesc contains non-record vars. + * @param iodesc pointer to the decomposition info. + * @param llen length of the iobuffer on this task for a single + * field. + * @param maxregions max number of blocks to be written from this + * iotask. + * @param nvars the number of variables to be written with this + * decomposition. + * @param fndims the number of dimensions in the file. + * @param tmp_start pointer to an already allocaed array of length + * fndims * maxregions. This array will get the start values for all + * regions. + * @param tmp_count pointer to an already allocaed array of length + * fndims * maxregions. This array will get the count values for all + * regions. + * @param iobuf the buffer to be written from this mpi task. May be + * null. for example we have 8 ionodes and a distributed array with + * global size 4, then at least 4 nodes will have a null iobuf. In + * practice the box rearranger trys to have at least blocksize bytes + * on each io task and so if the total number of bytes to write is + * less than blocksize*numiotasks then some iotasks will have a NULL + * iobuf. + * @return 0 for success, error code otherwise. + * @ingroup PIO_write_darray + */ +int recv_and_write_data(file_desc_t *file, const int *vid, const int *frame, + io_desc_t *iodesc, PIO_Offset llen, int maxregions, int nvars, + int fndims, size_t *tmp_start, size_t *tmp_count, void *iobuf) +{ + iosystem_desc_t *ios; /* Pointer to io system information. */ + size_t rlen; /* Length of IO buffer on this task. */ + int rregions; /* Number of regions in buffer for this task. */ + size_t start[fndims], count[fndims]; + size_t loffset; + void *bufptr; + var_desc_t *vdesc; /* Contains info about the variable. */ + MPI_Status status; /* Recv status for MPI. */ + int mpierr; /* Return code from MPI function codes. */ + int ierr; /* Return code. */ + + ios = file->iosystem; + + /* For each of the other tasks that are using this task + * for IO. */ + for (int rtask = 0; rtask < ios->num_iotasks; rtask++) + { + /* From the remote tasks, we send information about + * the data regions. and also the data. */ + if (rtask) + { + /* handshake - tell the sending task I'm ready */ + if ((mpierr = MPI_Send(&ierr, 1, MPI_INT, rtask, 0, ios->io_comm))) + return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); + + /* Get length of iobuffer for each field on this + * task (all fields are the same length). */ + if ((mpierr = MPI_Recv(&rlen, 1, MPI_OFFSET, rtask, rtask, ios->io_comm, + &status))) + return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); + LOG((3, "received rlen = %d", rlen)); + + /* Get the number of regions, the start/count + * values for all regions, and the data buffer. */ + if (rlen > 0) + { + if ((mpierr = MPI_Recv(&rregions, 1, MPI_INT, rtask, rtask + ios->num_iotasks, + ios->io_comm, &status))) + return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); + if ((mpierr = MPI_Recv(tmp_start, rregions * fndims, MPI_OFFSET, rtask, + rtask + 2 * ios->num_iotasks, ios->io_comm, &status))) + return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); + if ((mpierr = MPI_Recv(tmp_count, rregions * fndims, MPI_OFFSET, rtask, + rtask + 3 * ios->num_iotasks, ios->io_comm, &status))) + return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); + if ((mpierr = MPI_Recv(iobuf, nvars * rlen, iodesc->basetype, rtask, + rtask + 4 * ios->num_iotasks, ios->io_comm, &status))) + return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); + LOG((3, "received data rregions = %d fndims = %d", rregions, fndims)); + } + } + else /* task 0 */ + { + rlen = llen; + rregions = maxregions; + } + LOG((3, "rtask = %d rlen = %d rregions = %d", rtask, rlen, rregions)); + + /* If there is data from this task, write it. */ + if (rlen > 0) + { + loffset = 0; + for (int regioncnt = 0; regioncnt < rregions; regioncnt++) + { + LOG((3, "writing data for region with regioncnt = %d", regioncnt)); + + /* Get the start/count arrays for this region. */ + for (int i = 0; i < fndims; i++) + { + start[i] = tmp_start[i + regioncnt * fndims]; + count[i] = tmp_count[i + regioncnt * fndims]; + LOG((3, "start[%d] = %d count[%d] = %d", i, start[i], i, count[i])); + } + + /* Process each variable in the buffer. */ + for (int nv = 0; nv < nvars; nv++) + { + LOG((3, "writing buffer var %d", nv)); + vdesc = file->varlist + vid[0]; + + /* Get a pointer to the correct part of the buffer. */ + bufptr = (void *)((char *)iobuf + iodesc->basetype_size * (nv * rlen + loffset)); + + /* If this var has an unlimited dim, set + * the start on that dim to the frame + * value for this variable. */ + if (vdesc->record >= 0) + { + if (fndims > 1 && iodesc->ndims < fndims && count[1] > 0) + { + count[0] = 1; + start[0] = frame[nv]; + } + else if (fndims == iodesc->ndims) + { + start[0] += vdesc->record; + } + } + + /* Call the netCDF functions to write the data. */ + if ((ierr = nc_put_vara(file->fh, vid[nv], start, count, bufptr))) + return check_netcdf2(ios, NULL, ierr, __FILE__, __LINE__); + + } /* next var */ + + /* Calculate the total size. */ + size_t tsize = 1; + for (int i = 0; i < fndims; i++) + tsize *= count[i]; + + /* Keep track of where we are in the buffer. */ + loffset += tsize; + + LOG((3, " at bottom of loop regioncnt = %d tsize = %d loffset = %d", regioncnt, + tsize, loffset)); + } /* next regioncnt */ + } /* endif (rlen > 0) */ + } /* next rtask */ + + return PIO_NOERR; +} + /** * Write a set of one or more aggregated arrays to output file in * serial mode. This function is called for netCDF classic and @@ -361,16 +663,13 @@ int pio_write_darray_multi_nc(file_desc_t *file, int nvars, const int *vid, int * @param nvars the number of variables to be written with this * decomposition. * @param vid an array of the variable ids to be written - * @param iodesc_ndims the number of dimensions explicitly in the - * iodesc. - * @param basetype the basic type of the minimal data unit + * @param iodesc pointer to the decomposition info. * @param maxregions max number of blocks to be written from this * iotask. * @param firstregion pointer to the first element of a linked * list of region descriptions. May be NULL. * @param llen length of the iobuffer on this task for a single * field. - * @param num_aiotasks actual number of iotasks participating * @param iobuf the buffer to be written from this mpi task. May be * null. for example we have 8 ionodes and a distributed array with * global size 4, then at least 4 nodes will have a null iobuf. In @@ -379,48 +678,47 @@ int pio_write_darray_multi_nc(file_desc_t *file, int nvars, const int *vid, int * less than blocksize*numiotasks then some iotasks will have a NULL * iobuf. * @param frame the record dimension for each of the nvars variables - * in iobuf. + * in iobuf. NULL if this iodesc contains non-record vars. * @return 0 for success, error code otherwise. * @ingroup PIO_write_darray */ -int pio_write_darray_multi_nc_serial(file_desc_t *file, int nvars, const int *vid, int iodesc_ndims, - MPI_Datatype basetype, int maxregions, io_region *firstregion, - PIO_Offset llen, int num_aiotasks, void *iobuf, - const int *frame) +int write_darray_multi_serial(file_desc_t *file, int nvars, const int *vid, + io_desc_t *iodesc, int fill, const int *frame) { iosystem_desc_t *ios; /* Pointer to io system information. */ var_desc_t *vdesc; /* Contains info about the variable. */ - int dsize; /* Size in bytes of one element of basetype. */ int fndims; /* Number of dims in the var in the file. */ - int tsize; /* Size of the MPI type, in bytes. */ - MPI_Status status; /* Recv status for MPI. */ int mpierr = MPI_SUCCESS, mpierr2; /* Return code from MPI function codes. */ int ierr; /* Return code. */ /* Check inputs. */ - pioassert(file && file->iosystem && vid && vid[0] >= 0 && vid[0] <= PIO_MAX_VARS, - "invalid input", __FILE__, __LINE__); + pioassert(file && file->iosystem && file->varlist && vid && vid[0] >= 0 && + vid[0] <= PIO_MAX_VARS && iodesc, "invalid input", __FILE__, __LINE__); - LOG((1, "pio_write_darray_multi_nc_serial nvars = %d iodesc_ndims = %d basetype = %d " - "maxregions = %d llen = %d num_aiotasks = %d", nvars, iodesc_ndims, - basetype, maxregions, llen, num_aiotasks)); - -#ifdef TIMING - /* Start timing this function. */ - GPTLstart("PIO:write_darray_multi_nc_serial"); -#endif + LOG((1, "write_darray_multi_serial nvars = %d iodesc->ndims = %d iodesc->basetype = %d", + nvars, iodesc->ndims, iodesc->basetype)); /* Get the iosystem info. */ ios = file->iosystem; /* Get the var info. */ vdesc = file->varlist + vid[0]; + LOG((2, "vdesc record %d ndims %d nreqs %d ios->async = %d", vdesc->record, + vdesc->ndims, vdesc->nreqs, ios->async)); - LOG((2, "vdesc record %d ndims %d nreqs %d ios->async_interface = %d", vdesc->record, - vdesc->ndims, vdesc->nreqs, ios->async_interface)); + /* Set these differently for data and fill writing. */ + int num_regions = fill ? iodesc->maxfillregions: iodesc->maxregions; + io_region *region = fill ? iodesc->fillregion : iodesc->firstregion; + PIO_Offset llen = fill ? iodesc->holegridsize : iodesc->llen; + void *iobuf = fill ? vdesc->fillbuf : vdesc->iobuf; + +#ifdef TIMING + /* Start timing this function. */ + GPTLstart("PIO:write_darray_multi_nc_serial"); +#endif /* If async is in use, and this is not an IO task, bcast the parameters. */ - if (ios->async_interface) + if (ios->async) { if (!ios->ioproc) { @@ -444,220 +742,36 @@ int pio_write_darray_multi_nc_serial(file_desc_t *file, int nvars, const int *vi if ((ierr = PIOc_inq_varndims(file->pio_ncid, vid[0], &fndims))) return pio_err(ios, file, ierr, __FILE__, __LINE__); - /* Get the size of the type. */ - if ((mpierr = MPI_Type_size(basetype, &tsize))) - return check_mpi(file, mpierr, __FILE__, __LINE__); - LOG((3, "fndims = %d tsize = %d", fndims, tsize)); - /* Only IO tasks participate in this code. */ if (ios->ioproc) { - io_region *region; - void *bufptr; - size_t tmp_start[fndims * maxregions]; /* A start array for each region. */ - size_t tmp_count[fndims * maxregions]; /* A count array for each region. */ + size_t tmp_start[fndims * num_regions]; /* A start array for each region. */ + size_t tmp_count[fndims * num_regions]; /* A count array for each region. */ - LOG((3, "maxregions = %d", maxregions)); + LOG((3, "num_regions = %d", num_regions)); /* Fill the tmp_start and tmp_count arrays, which contain the * start and count arrays for all regions. */ - region = firstregion; - for (int regioncnt = 0; regioncnt < maxregions; regioncnt++) - { - /* Initialize the start/count arrays for this region to 0. */ - for (int i = 0; i < fndims; i++) - { - tmp_start[i + regioncnt * fndims] = 0; - tmp_count[i + regioncnt * fndims] = 0; - } - - if (region) - { - if (vdesc->record >= 0) - { - /* This is a record based multidimensional - * array. Copy start/count for non-record - * dimensions. */ - for (int i = fndims - iodesc_ndims; i < fndims; i++) - { - tmp_start[i + regioncnt * fndims] = region->start[i - (fndims - iodesc_ndims)]; - tmp_count[i + regioncnt * fndims] = region->count[i - (fndims - iodesc_ndims)]; - LOG((3, "tmp_start[%d] = %d tmp_count[%d] = %d", i + regioncnt * fndims, - tmp_start[i + regioncnt * fndims], i + regioncnt * fndims, - tmp_count[i + regioncnt * fndims])); - } - } - else - { - /* This is not a record based multidimensional array. */ - for (int i = 0; i < iodesc_ndims; i++) - { - tmp_start[i + regioncnt * fndims] = region->start[i]; - tmp_count[i + regioncnt * fndims] = region->count[i]; - LOG((3, "tmp_start[%d] = %d tmp_count[%d] = %d", i + regioncnt * fndims, - tmp_start[i + regioncnt * fndims], i + regioncnt * fndims, - tmp_count[i + regioncnt * fndims])); - } - } - - /* Move to next region. */ - region = region->next; - - } /* endif region */ - } /* next regioncnt */ + if ((ierr = find_all_start_count(region, num_regions, fndims, iodesc->ndims, vdesc, + tmp_start, tmp_count))) + return pio_err(ios, file, ierr, __FILE__, __LINE__); /* Tasks other than 0 will send their data to task 0. */ if (ios->io_rank > 0) { - /* Do a handshake. */ - if ((mpierr = MPI_Recv(&ierr, 1, MPI_INT, 0, 0, ios->io_comm, &status))) - return check_mpi(file, mpierr, __FILE__, __LINE__); - - /* Send local length of iobuffer for each field (all - * fields are the same length). */ - if ((mpierr = MPI_Send((void *)&llen, 1, MPI_OFFSET, 0, ios->io_rank, ios->io_comm))) - return check_mpi(file, mpierr, __FILE__, __LINE__); - LOG((3, "sent llen = %d", llen)); - - /* Send the number of data regions, the start/count for - * all regions, and the data buffer with all the data. */ - if (llen > 0) - { - if ((mpierr = MPI_Send((void *)&maxregions, 1, MPI_INT, 0, ios->io_rank + ios->num_iotasks, ios->io_comm))) - return check_mpi(file, mpierr, __FILE__, __LINE__); - if ((mpierr = MPI_Send(tmp_start, maxregions * fndims, MPI_OFFSET, 0, ios->io_rank + 2 * ios->num_iotasks, - ios->io_comm))) - return check_mpi(file, mpierr, __FILE__, __LINE__); - if ((mpierr = MPI_Send(tmp_count, maxregions * fndims, MPI_OFFSET, 0, ios->io_rank + 3 * ios->num_iotasks, - ios->io_comm))) - return check_mpi(file, mpierr, __FILE__, __LINE__); - if ((mpierr = MPI_Send(iobuf, nvars * llen, basetype, 0, ios->io_rank + 4 * ios->num_iotasks, ios->io_comm))) - return check_mpi(file, mpierr, __FILE__, __LINE__); - LOG((3, "sent data for maxregions = %d", maxregions)); - } + /* Send the tmp_start and tmp_count arrays from this IO task + * to task 0. */ + if ((ierr = send_all_start_count(ios, iodesc, llen, num_regions, nvars, fndims, + tmp_start, tmp_count, iobuf))) + return pio_err(ios, file, ierr, __FILE__, __LINE__); } else { /* Task 0 will receive data from all other IO tasks. */ - size_t rlen; /* Length of IO buffer on this task. */ - int rregions; /* Number of regions in buffer for this task. */ - size_t start[fndims], count[fndims]; - size_t loffset; - - /* Get the size of the MPI data type. */ - if ((mpierr = MPI_Type_size(basetype, &dsize))) - return check_mpi(file, mpierr, __FILE__, __LINE__); - LOG((3, "dsize = %d", dsize)); - - /* For each of the other tasks that are using this task - * for IO. */ - for (int rtask = 0; rtask < ios->num_iotasks; rtask++) - { - /* From the remote tasks, we send information about - * the data regions. and also the data. */ - if (rtask) - { - /* handshake - tell the sending task I'm ready */ - if ((mpierr = MPI_Send(&ierr, 1, MPI_INT, rtask, 0, ios->io_comm))) - return check_mpi(file, mpierr, __FILE__, __LINE__); - - /* Get length of iobuffer for each field on this - * task (all fields are the same length). */ - if ((mpierr = MPI_Recv(&rlen, 1, MPI_OFFSET, rtask, rtask, ios->io_comm, &status))) - return check_mpi(file, mpierr, __FILE__, __LINE__); - LOG((3, "received rlen = %d", rlen)); - /* Get the number of regions, the start/count - * values for all regions, and the data buffer. */ - if (rlen > 0) - { - if ((mpierr = MPI_Recv(&rregions, 1, MPI_INT, rtask, rtask + ios->num_iotasks, - ios->io_comm, &status))) - return check_mpi(file, mpierr, __FILE__, __LINE__); - if ((mpierr = MPI_Recv(tmp_start, rregions * fndims, MPI_OFFSET, rtask, rtask + 2 * ios->num_iotasks, - ios->io_comm, &status))) - return check_mpi(file, mpierr, __FILE__, __LINE__); - if ((mpierr = MPI_Recv(tmp_count, rregions * fndims, MPI_OFFSET, rtask, rtask + 3 * ios->num_iotasks, - ios->io_comm, &status))) - return check_mpi(file, mpierr, __FILE__, __LINE__); - if ((mpierr = MPI_Recv(iobuf, nvars * rlen, basetype, rtask, rtask + 4 * ios->num_iotasks, ios->io_comm, - &status))) - return check_mpi(file, mpierr, __FILE__, __LINE__); - LOG((3, "received data rregions = %d fndims = %d", rregions, fndims)); - } - } - else /* task 0 */ - { - rlen = llen; - rregions = maxregions; - } - LOG((3, "rtask = %d rlen = %d rregions = %d", rtask, rlen, rregions)); - - /* If there is data from this task, write it. */ - if (rlen > 0) - { - loffset = 0; - for (int regioncnt = 0; regioncnt < rregions; regioncnt++) - { - LOG((3, "writing data for region with regioncnt = %d", regioncnt)); - - /* Get the start/count arrays for this region. */ - for (int i = 0; i < fndims; i++) - { - start[i] = tmp_start[i + regioncnt * fndims]; - count[i] = tmp_count[i + regioncnt * fndims]; - LOG((3, "start[%d] = %d count[%d] = %d", i, start[i], i, count[i])); - } - - /* Process each variable in the buffer. */ - for (int nv = 0; nv < nvars; nv++) - { - LOG((3, "writing buffer var %d", nv)); - - /* Get a pointer to the correct part of the buffer. */ - bufptr = (void *)((char *)iobuf + tsize * (nv * rlen + loffset)); - - /* If this var has an unlimited dim, set - * the start on that dim to the frame - * value for this variable. */ - if (vdesc->record >= 0) - { - if (fndims > 1 && iodesc_ndims < fndims && count[1] > 0) - { - count[0] = 1; - start[0] = frame[nv]; - } - else if (fndims == iodesc_ndims) - { - start[0] += vdesc->record; - } - } - - /* Call the netCDF functions to write the data. */ - ierr = nc_put_vara(file->fh, vid[nv], start, count, bufptr); - - if (ierr) - { - for (int i = 0; i < fndims; i++) - fprintf(stderr, "vid %d dim %d start %ld count %ld \n", vid[nv], i, - start[i], count[i]); - return check_netcdf(file, ierr, __FILE__, __LINE__); - } - } /* next var */ - - /* Calculate the total size. */ - size_t tsize = 1; - for (int i = 0; i < fndims; i++) - tsize *= count[i]; - - /* Keep track of where we are in the buffer. */ - loffset += tsize; - - LOG((3, " at bottom of loop regioncnt = %d tsize = %d loffset = %d", regioncnt, - tsize, loffset)); - } /* next regioncnt */ - } /* endif (rlen > 0) */ - } /* next rtask */ + if ((ierr = recv_and_write_data(file, vid, frame, iodesc, llen, num_regions, nvars, fndims, + tmp_start, tmp_count, iobuf))) + return pio_err(ios, file, ierr, __FILE__, __LINE__); } } @@ -692,7 +806,6 @@ int pio_read_darray_nc(file_desc_t *file, io_desc_t *iodesc, int vid, void *iobu var_desc_t *vdesc; /* Information about the variable. */ int ndims; /* Number of dims in decomposition. */ int fndims; /* Number of dims for this var in file. */ - int mpierr; /* Return code from MPI functions. */ int ierr; /* Return code from netCDF functions. */ /* Check inputs. */ @@ -729,7 +842,6 @@ int pio_read_darray_nc(file_desc_t *file, io_desc_t *iodesc, int vid, void *iobu size_t count[fndims]; size_t tmp_bufsize = 1; void *bufptr; - int tsize; int rrlen = 0; PIO_Offset *startlist[iodesc->maxregions]; PIO_Offset *countlist[iodesc->maxregions]; @@ -739,10 +851,6 @@ int pio_read_darray_nc(file_desc_t *file, io_desc_t *iodesc, int vid, void *iobu the basetype. */ region = iodesc->firstregion; - /* Get the size of the MPI type. */ - if ((mpierr = MPI_Type_size(iodesc->basetype, &tsize))) - return check_mpi(file, mpierr, __FILE__, __LINE__); - /* ??? */ if (fndims > ndims) { @@ -771,7 +879,7 @@ int pio_read_darray_nc(file_desc_t *file, io_desc_t *iodesc, int vid, void *iobu if (regioncnt == 0 || region == NULL) bufptr = iobuf; else - bufptr=(void *)((char *)iobuf + tsize * region->loffset); + bufptr=(void *)((char *)iobuf + iodesc->basetype_size * region->loffset); LOG((2, "%d %d %d", iodesc->llen - region->loffset, iodesc->llen, region->loffset)); @@ -932,17 +1040,12 @@ int pio_read_darray_nc_serial(file_desc_t *file, io_desc_t *iodesc, int vid, size_t tmp_count[fndims * iodesc->maxregions]; size_t tmp_bufsize; void *bufptr; - int tsize; /* buffer is incremented by byte and loffset is in terms of the iodessc->basetype so we need to multiply by the size of the basetype. */ region = iodesc->firstregion; - /* Get the size of the MPI type. */ - if ((mpierr = MPI_Type_size(iodesc->basetype, &tsize))) - return check_mpi(file, mpierr, __FILE__, __LINE__); - if (fndims > ndims) { if (vdesc->record < 0) @@ -1074,7 +1177,7 @@ int pio_read_darray_nc_serial(file_desc_t *file, io_desc_t *iodesc, int vid, for (int regioncnt = 0; regioncnt < maxregions; regioncnt++) { /* Get pointer where data should go. */ - bufptr = (void *)((char *)iobuf + tsize * loffset); + bufptr = (void *)((char *)iobuf + iodesc->basetype_size * loffset); regionsize = 1; /* ??? */ @@ -1151,7 +1254,9 @@ int flush_output_buffer(file_desc_t *file, bool force, PIO_Offset addsize) /* Find out the buffer usage. */ if ((ierr = ncmpi_inq_buffer_usage(file->fh, &usage))) - return pio_err(NULL, file, PIO_EBADID, __FILE__, __LINE__); + /* allow the buffer to be undefined */ + if (ierr != NC_ENULLABUF) + return pio_err(NULL, file, PIO_EBADID, __FILE__, __LINE__); /* If we are not forcing a flush, spread the usage to all IO * tasks. */ @@ -1223,6 +1328,7 @@ int flush_output_buffer(file_desc_t *file, bool force, PIO_Offset addsize) vdesc = file->varlist + i; if (vdesc->iobuf) { + LOG((3,"freeing variable buffer in flush_output_buffer")); brel(vdesc->iobuf); vdesc->iobuf = NULL; } @@ -1298,6 +1404,7 @@ void free_cn_buffer_pool(iosystem_desc_t *ios) { #if !PIO_USE_MALLOC LOG((2, "free_cn_buffer_pool CN_bpool = %d", CN_bpool)); + /* Note: it is possible that CN_bpool has been freed and set to NULL by bpool_free() */ if (CN_bpool) { cn_buffer_report(ios, false); @@ -1312,9 +1419,9 @@ void free_cn_buffer_pool(iosystem_desc_t *ios) /** * Flush the buffer. * - * @param ncid identifies the netCDF file - * @param wmb May be NULL, in which case function returns. - * @param flushtodisk + * @param ncid identifies the netCDF file. + * @param wmb pointer to the wmulti_buffer structure. + * @param flushtodisk if true, then flush data to disk. * @returns 0 for success, error code otherwise. * @ingroup PIO_write_darray */ @@ -1333,14 +1440,14 @@ int flush_buffer(int ncid, wmulti_buffer *wmb, bool flushtodisk) LOG((1, "flush_buffer ncid = %d flushtodisk = %d", ncid, flushtodisk)); /* If there are any variables in this buffer... */ - if (wmb->validvars > 0) + if (wmb->num_arrays > 0) { /* Write any data in the buffer. */ - ret = PIOc_write_darray_multi(ncid, wmb->vid, wmb->ioid, wmb->validvars, + ret = PIOc_write_darray_multi(ncid, wmb->vid, wmb->ioid, wmb->num_arrays, wmb->arraylen, wmb->data, wmb->frame, wmb->fillvalue, flushtodisk); - wmb->validvars = 0; + wmb->num_arrays = 0; /* Release the list of variable IDs. */ brel(wmb->vid); @@ -1368,11 +1475,11 @@ int flush_buffer(int ncid, wmulti_buffer *wmb, bool flushtodisk) } /** - * Compute the maximum aggregate number of bytes. + * Compute the maximum aggregate number of bytes. This is called by + * subset_rearrange_create() and box_rearrange_create(). * - * @param ios the IO system structure - * @param iodesc a pointer to the defined iodescriptor for the - * buffer. If NULL, function returns immediately. + * @param ios pointer to the IO system structure. + * @param iodesc a pointer to decomposition description. * @returns 0 for success, error code otherwise. */ int compute_maxaggregate_bytes(iosystem_desc_t *ios, io_desc_t *iodesc) @@ -1388,19 +1495,27 @@ int compute_maxaggregate_bytes(iosystem_desc_t *ios, io_desc_t *iodesc) LOG((2, "compute_maxaggregate_bytes iodesc->maxiobuflen = %d iodesc->ndof = %d", iodesc->maxiobuflen, iodesc->ndof)); + /* Determine the max bytes that can be held on IO task. */ if (ios->ioproc && iodesc->maxiobuflen > 0) maxbytesoniotask = pio_buffer_size_limit / iodesc->maxiobuflen; + /* Determine the max bytes that can be held on computation task. */ if (ios->comp_rank >= 0 && iodesc->ndof > 0) maxbytesoncomputetask = pio_cnbuffer_limit / iodesc->ndof; + /* Take the min of the max IO and max comp bytes. */ maxbytes = min(maxbytesoniotask, maxbytesoncomputetask); LOG((2, "compute_maxaggregate_bytes maxbytesoniotask = %d maxbytesoncomputetask = %d", maxbytesoniotask, maxbytesoncomputetask)); + /* Get the min value of this on all tasks. */ + LOG((3, "before allreaduce maxbytes = %d", maxbytes)); if ((mpierr = MPI_Allreduce(MPI_IN_PLACE, &maxbytes, 1, MPI_INT, MPI_MIN, ios->union_comm))) return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); + LOG((3, "after allreaduce maxbytes = %d", maxbytes)); + + /* Remember the result. */ iodesc->maxbytes = maxbytes; return PIO_NOERR; diff --git a/src/externals/pio2/src/clib/pio_file.c b/src/externals/pio2/src/clib/pio_file.c index f799e25f57cc..c0523997cfd4 100644 --- a/src/externals/pio2/src/clib/pio_file.c +++ b/src/externals/pio2/src/clib/pio_file.c @@ -106,8 +106,10 @@ int PIOc_createfile(int iosysid, int *ncidp, int *iotype, const char *filename, return pio_err(ios, NULL, ret, __FILE__, __LINE__); /* Run this on all tasks if async is not in use, but only on - * non-IO tasks if async is in use. */ - if (!ios->async_interface || !ios->ioproc) + * non-IO tasks if async is in use. (Because otherwise, in async + * mode, set_fill would be called twice by each IO task, since + * PIOc_createfile() will already be called on each IO task.) */ + if (!ios->async || !ios->ioproc) { /* Set the fill mode to NOFILL. */ if ((ret = PIOc_set_fill(*ncidp, NC_NOFILL, NULL))) @@ -174,7 +176,7 @@ int PIOc_closefile(int ncid) /* Sync changes before closing on all tasks if async is not in * use, but only on non-IO tasks if async is in use. */ - if (!ios->async_interface || !ios->ioproc) + if (!ios->async || !ios->ioproc) if (file->mode & PIO_WRITE) PIOc_sync(ncid); @@ -182,7 +184,7 @@ int PIOc_closefile(int ncid) * sends a msg to the pio_msg_handler running on the IO master and * waiting for a message. Then broadcast the ncid over the intercomm * to the IO tasks. */ - if (ios->async_interface) + if (ios->async) { if (!ios->ioproc) { @@ -264,7 +266,7 @@ int PIOc_deletefile(int iosysid, const char *filename) return pio_err(NULL, NULL, PIO_EBADID, __FILE__, __LINE__); /* If async is in use, send message to IO master task. */ - if (ios->async_interface) + if (ios->async) { if (!ios->ioproc) { @@ -338,7 +340,7 @@ int PIOc_sync(int ncid) ios = file->iosystem; /* If async is in use, send message to IO master tasks. */ - if (ios->async_interface) + if (ios->async) { if (!ios->ioproc) { @@ -366,7 +368,9 @@ int PIOc_sync(int ncid) wmb = &file->buffer; while (wmb) { - if (wmb->validvars > 0) + /* If there are any data arrays waiting in the + * multibuffer, flush it. */ + if (wmb->num_arrays > 0) flush_buffer(ncid, wmb, true); twmb = wmb; wmb = wmb->next; diff --git a/src/externals/pio2/src/clib/pio_get_nc.c b/src/externals/pio2/src/clib/pio_get_nc.c index e8d542114309..2cd85d8f9476 100644 --- a/src/externals/pio2/src/clib/pio_get_nc.c +++ b/src/externals/pio2/src/clib/pio_get_nc.c @@ -619,7 +619,7 @@ int PIOc_get_vara_longlong(int ncid, int varid, const PIO_Offset *start, */ int PIOc_get_var_text(int ncid, int varid, char *buf) { - return PIOc_get_vars_tc(ncid, varid, NULL, NULL, NULL, NC_CHAR, buf); + return PIOc_get_var_tc(ncid, varid, NC_CHAR, buf); } /** @@ -635,7 +635,7 @@ int PIOc_get_var_text(int ncid, int varid, char *buf) */ int PIOc_get_var_uchar(int ncid, int varid, unsigned char *buf) { - return PIOc_get_vars_tc(ncid, varid, NULL, NULL, NULL, NC_UBYTE, buf); + return PIOc_get_var_tc(ncid, varid, NC_UBYTE, buf); } /** @@ -651,7 +651,7 @@ int PIOc_get_var_uchar(int ncid, int varid, unsigned char *buf) */ int PIOc_get_var_schar(int ncid, int varid, signed char *buf) { - return PIOc_get_vars_tc(ncid, varid, NULL, NULL, NULL, NC_BYTE, buf); + return PIOc_get_var_tc(ncid, varid, NC_BYTE, buf); } /** @@ -667,7 +667,7 @@ int PIOc_get_var_schar(int ncid, int varid, signed char *buf) */ int PIOc_get_var_ushort(int ncid, int varid, unsigned short *buf) { - return PIOc_get_vars_tc(ncid, varid, NULL, NULL, NULL, NC_USHORT, buf); + return PIOc_get_var_tc(ncid, varid, NC_USHORT, buf); } /** @@ -683,7 +683,7 @@ int PIOc_get_var_ushort(int ncid, int varid, unsigned short *buf) */ int PIOc_get_var_short(int ncid, int varid, short *buf) { - return PIOc_get_vars_tc(ncid, varid, NULL, NULL, NULL, NC_SHORT, buf); + return PIOc_get_var_tc(ncid, varid, NC_SHORT, buf); } /** @@ -699,7 +699,7 @@ int PIOc_get_var_short(int ncid, int varid, short *buf) */ int PIOc_get_var_uint(int ncid, int varid, unsigned int *buf) { - return PIOc_get_vars_tc(ncid, varid, NULL, NULL, NULL, NC_UINT, buf); + return PIOc_get_var_tc(ncid, varid, NC_UINT, buf); } /** @@ -715,7 +715,7 @@ int PIOc_get_var_uint(int ncid, int varid, unsigned int *buf) */ int PIOc_get_var_int(int ncid, int varid, int *buf) { - return PIOc_get_vars_tc(ncid, varid, NULL, NULL, NULL, NC_INT, buf); + return PIOc_get_var_tc(ncid, varid, NC_INT, buf); } /** @@ -731,7 +731,7 @@ int PIOc_get_var_int(int ncid, int varid, int *buf) */ int PIOc_get_var_long (int ncid, int varid, long *buf) { - return PIOc_get_vars_tc(ncid, varid, NULL, NULL, NULL, PIO_LONG_INTERNAL, buf); + return PIOc_get_var_tc(ncid, varid, PIO_LONG_INTERNAL, buf); } /** @@ -747,7 +747,7 @@ int PIOc_get_var_long (int ncid, int varid, long *buf) */ int PIOc_get_var_float(int ncid, int varid, float *buf) { - return PIOc_get_vars_tc(ncid, varid, NULL, NULL, NULL, NC_FLOAT, buf); + return PIOc_get_var_tc(ncid, varid, NC_FLOAT, buf); } /** @@ -763,7 +763,7 @@ int PIOc_get_var_float(int ncid, int varid, float *buf) */ int PIOc_get_var_double(int ncid, int varid, double *buf) { - return PIOc_get_vars_tc(ncid, varid, NULL, NULL, NULL, NC_DOUBLE, buf); + return PIOc_get_var_tc(ncid, varid, NC_DOUBLE, buf); } /** @@ -779,7 +779,7 @@ int PIOc_get_var_double(int ncid, int varid, double *buf) */ int PIOc_get_var_ulonglong(int ncid, int varid, unsigned long long *buf) { - return PIOc_get_vars_tc(ncid, varid, NULL, NULL, NULL, NC_UINT64, buf); + return PIOc_get_var_tc(ncid, varid, NC_UINT64, buf); } /** @@ -795,7 +795,7 @@ int PIOc_get_var_ulonglong(int ncid, int varid, unsigned long long *buf) */ int PIOc_get_var_longlong(int ncid, int varid, long long *buf) { - return PIOc_get_vars_tc(ncid, varid, NULL, NULL, NULL, NC_INT64, buf); + return PIOc_get_var_tc(ncid, varid, NC_INT64, buf); } /** @@ -1044,7 +1044,7 @@ int PIOc_get_var1_longlong(int ncid, int varid, const PIO_Offset *index, */ int PIOc_get_var(int ncid, int varid, void *buf) { - return PIOc_get_vars_tc(ncid, varid, NULL, NULL, NULL, NC_NAT, buf); + return PIOc_get_var_tc(ncid, varid, NC_NAT, buf); } /** diff --git a/src/externals/pio2/src/clib/pio_getput_int.c b/src/externals/pio2/src/clib/pio_getput_int.c index 32f1968186f8..187f74d058d6 100644 --- a/src/externals/pio2/src/clib/pio_getput_int.c +++ b/src/externals/pio2/src/clib/pio_getput_int.c @@ -52,7 +52,7 @@ int PIOc_put_att_tc(int ncid, int varid, const char *name, nc_type atttype, /* Run these on all tasks if async is not in use, but only on * non-IO tasks if async is in use. */ - if (!ios->async_interface || !ios->ioproc) + if (!ios->async || !ios->ioproc) { /* Get the length (in bytes) of the type in file. */ if ((ierr = PIOc_inq_type(ncid, atttype, NULL, &atttype_len))) @@ -70,7 +70,7 @@ int PIOc_put_att_tc(int ncid, int varid, const char *name, nc_type atttype, } /* If async is in use, and this is not an IO task, bcast the parameters. */ - if (ios->async_interface) + if (ios->async) { if (!ios->ioproc) { @@ -259,7 +259,7 @@ int PIOc_get_att_tc(int ncid, int varid, const char *name, nc_type memtype, void /* Run these on all tasks if async is not in use, but only on * non-IO tasks if async is in use. */ - if (!ios->async_interface || !ios->ioproc) + if (!ios->async || !ios->ioproc) { /* Get the type and length of the attribute. */ if ((ierr = PIOc_inq_att(ncid, varid, name, &atttype, &attlen))) @@ -284,7 +284,7 @@ int PIOc_get_att_tc(int ncid, int varid, const char *name, nc_type memtype, void /* If async is in use, and this is not an IO task, bcast the * parameters and the attribute and type information we fetched. */ - if (ios->async_interface) + if (ios->async) { if (!ios->ioproc) { @@ -485,19 +485,19 @@ int PIOc_get_vars_tc(int ncid, int varid, const PIO_Offset *start, const PIO_Off { iosystem_desc_t *ios; /* Pointer to io system information. */ file_desc_t *file; /* Pointer to file information. */ - int ierr = PIO_NOERR; /* Return code from function calls. */ - int mpierr = MPI_SUCCESS, mpierr2; /* Return code from MPI function codes. */ - int ndims; /* The number of dimensions in the variable. */ - PIO_Offset typelen; /* Size (in bytes) of the data type of data in buf. */ + int ndims; /* The number of dimensions in the variable. */ + PIO_Offset typelen; /* Size (in bytes) of the data type of data in buf. */ PIO_Offset num_elem = 1; /* Number of data elements in the buffer. */ + nc_type vartype; /* The type of the var we are reading from. */ char start_present = start ? true : false; char count_present = count ? true : false; char stride_present = stride ? true : false; - PIO_Offset *rstart = NULL, *rcount = NULL; - nc_type vartype; /* The type of the var we are reading from. */ + int mpierr = MPI_SUCCESS, mpierr2; /* Return code from MPI function codes. */ + int ierr; /* Return code. */ - LOG((1, "PIOc_get_vars_tc ncid = %d varid = %d start = %d count = %d " - "stride = %d xtype = %d", ncid, varid, start, count, stride, xtype)); + LOG((1, "PIOc_get_vars_tc ncid = %d varid = %d xtype = %d start_present = %d " + "count_present = %d stride_present = %d", ncid, varid, xtype, start_present, + count_present, stride_present)); /* Find the info about this file. */ if ((ierr = pio_get_file(ncid, &file))) @@ -510,7 +510,7 @@ int PIOc_get_vars_tc(int ncid, int varid, const PIO_Offset *start, const PIO_Off /* Run these on all tasks if async is not in use, but only on * non-IO tasks if async is in use. */ - if (!ios->async_interface || !ios->ioproc) + if (!ios->async || !ios->ioproc) { /* Get the type of this var. */ if ((ierr = PIOc_inq_vartype(ncid, varid, &vartype))) @@ -532,63 +532,20 @@ int PIOc_get_vars_tc(int ncid, int varid, const PIO_Offset *start, const PIO_Off /* Get the number of dims for this var. */ if ((ierr = PIOc_inq_varndims(ncid, varid, &ndims))) return check_netcdf(file, ierr, __FILE__, __LINE__); + LOG((3, "ndims = %d", ndims)); - PIO_Offset dimlen[ndims]; - - /* If no count array was passed, we need to know the dimlens - * so we can calculate how many data elements are in the - * buf. */ - if (!count) - { - int dimid[ndims]; - - /* Get the dimids for this var. */ - if ((ierr = PIOc_inq_vardimid(ncid, varid, dimid))) - return check_netcdf(file, ierr, __FILE__, __LINE__); - - /* Get the length of each dimension. */ - for (int vd = 0; vd < ndims; vd++) - if ((ierr = PIOc_inq_dimlen(ncid, dimid[vd], &dimlen[vd]))) - return check_netcdf(file, ierr, __FILE__, __LINE__); - } - - /* Figure out the real start, count, and stride arrays. (The - * user may have passed in NULLs.) */ - /* Allocate memory for these arrays, now that we know ndims. */ - if (!(rstart = malloc(ndims * sizeof(PIO_Offset)))) - return pio_err(ios, file, PIO_ENOMEM, __FILE__, __LINE__); - if (!(rcount = malloc(ndims * sizeof(PIO_Offset)))) - return pio_err(ios, file, PIO_ENOMEM, __FILE__, __LINE__); - - PIO_Offset rstride[ndims]; - for (int vd = 0; vd < ndims; vd++) - { - rstart[vd] = start ? start[vd] : 0; - rcount[vd] = count ? count[vd] : dimlen[vd]; - rstride[vd] = stride ? stride[vd] : 1; - LOG((3, "rstart[%d] = %d rcount[%d] = %d rstride[%d] = %d", vd, - rstart[vd], vd, rcount[vd], vd, rstride[vd])); - } + /* Only scalar vars can pass NULL for start/count. */ + pioassert(ndims == 0 || (start && count), "need start/count", __FILE__, __LINE__); - /* How many elements in buf? */ + /* How many elements in buf? (For scalars, ndims is 0 and + * num_elem will remain 1). */ for (int vd = 0; vd < ndims; vd++) - num_elem *= rcount[vd]; + num_elem *= count[vd]; LOG((2, "PIOc_get_vars_tc num_elem = %d", num_elem)); - - /* Free tmp resources. */ - if (start_present) - free(rstart); - else - start = rstart; - - if (count_present) - free(rcount); - else - count = rcount; } /* If async is in use, and this is not an IO task, bcast the parameters. */ - if (ios->async_interface) + if (ios->async) { if (!ios->ioproc) { @@ -606,8 +563,12 @@ int PIOc_get_vars_tc(int ncid, int varid, const PIO_Offset *start, const PIO_Off if (!mpierr) mpierr = MPI_Bcast(&ndims, 1, MPI_INT, ios->compmaster, ios->intercomm); if (!mpierr) + mpierr = MPI_Bcast(&start_present, 1, MPI_CHAR, ios->compmaster, ios->intercomm); + if (!mpierr && start_present) mpierr = MPI_Bcast((PIO_Offset *)start, ndims, MPI_OFFSET, ios->compmaster, ios->intercomm); if (!mpierr) + mpierr = MPI_Bcast(&count_present, 1, MPI_CHAR, ios->compmaster, ios->intercomm); + if (!mpierr && count_present) mpierr = MPI_Bcast((PIO_Offset *)count, ndims, MPI_OFFSET, ios->compmaster, ios->intercomm); if (!mpierr) mpierr = MPI_Bcast(&stride_present, 1, MPI_CHAR, ios->compmaster, ios->intercomm); @@ -619,9 +580,9 @@ int PIOc_get_vars_tc(int ncid, int varid, const PIO_Offset *start, const PIO_Off mpierr = MPI_Bcast(&num_elem, 1, MPI_OFFSET, ios->compmaster, ios->intercomm); if (!mpierr) mpierr = MPI_Bcast(&typelen, 1, MPI_OFFSET, ios->compmaster, ios->intercomm); - LOG((2, "PIOc_get_vars_tc ncid = %d varid = %d ndims = %d " - "stride_present = %d xtype = %d num_elem = %d", ncid, varid, - ndims, stride_present, xtype, num_elem)); + LOG((2, "PIOc_get_vars_tc ncid = %d varid = %d ndims = %d start_present = %d " + "count_present = %d stride_present = %d xtype = %d num_elem = %d", ncid, varid, + ndims, start_present, count_present, stride_present, xtype, num_elem)); } /* Handle MPI errors. */ @@ -646,7 +607,10 @@ int PIOc_get_vars_tc(int ncid, int varid, const PIO_Offset *start, const PIO_Off #ifdef _PNETCDF if (file->iotype == PIO_IOTYPE_PNETCDF) { - ncmpi_begin_indep_data(file->fh); + LOG((2, "pnetcdf calling ncmpi_get_vars_*() file->fh = %d varid = %d", file->fh, varid)); + /* Turn on independent access for pnetcdf file. */ + if ((ierr = ncmpi_begin_indep_data(file->fh))) + return pio_err(ios, file, ierr, __FILE__, __LINE__); /* Only the IO master does the IO, so we are not really * getting parallel IO here. */ @@ -678,8 +642,11 @@ int PIOc_get_vars_tc(int ncid, int varid, const PIO_Offset *start, const PIO_Off default: return pio_err(ios, file, PIO_EBADIOTYPE, __FILE__, __LINE__); } - }; - ncmpi_end_indep_data(file->fh); + } + + /* Turn off independent access for pnetcdf file. */ + if ((ierr = ncmpi_end_indep_data(file->fh))) + return pio_err(ios, file, ierr, __FILE__, __LINE__); } #endif /* _PNETCDF */ @@ -746,15 +713,6 @@ int PIOc_get_vars_tc(int ncid, int varid, const PIO_Offset *start, const PIO_Off } } - if (!ios->async_interface || !ios->ioproc) - { - /* Free tmp start/count allocated to account for NULL start/counts */ - if (!start_present) - free(rstart); - if (!count_present) - free(rcount); - } - /* Broadcast and check the return code. */ if ((mpierr = MPI_Bcast(&ierr, 1, MPI_INT, ios->ioroot, ios->my_comm))) return check_mpi(file, mpierr, __FILE__, __LINE__); @@ -772,7 +730,8 @@ int PIOc_get_vars_tc(int ncid, int varid, const PIO_Offset *start, const PIO_Off } /** - * Get one value of a variable of any type. + * Get one value of a variable of any type. This is an internal + * function. * * This routine is called collectively by all tasks in the * communicator ios.union_comm. @@ -811,6 +770,74 @@ int PIOc_get_var1_tc(int ncid, int varid, const PIO_Offset *index, nc_type xtype return PIOc_get_vars_tc(ncid, varid, index, count, NULL, xtype, buf); } +/** + * Get a complete variable of any type. This is an internal function. + * + * This routine is called collectively by all tasks in the + * communicator ios.union_comm. + * + * @param ncid identifies the netCDF file + * @param varid the variable ID number + * @param index an array of start indicies (must have same number of + * entries as variable has dimensions). If NULL, indices of 0 will be + * used. + * @param xtype the netcdf type of the variable. + * @param buf pointer that will get the data. + * @return PIO_NOERR on success, error code otherwise. + */ +int PIOc_get_var_tc(int ncid, int varid, nc_type xtype, void *buf) +{ + iosystem_desc_t *ios; /* Pointer to io system information. */ + file_desc_t *file; /* Pointer to file information. */ + PIO_Offset *startp = NULL; /* Pointer to start array. */ + PIO_Offset *countp = NULL; /* Pointer to count array. */ + int ndims; /* The number of dimensions in the variable. */ + PIO_Offset my_start[PIO_MAX_DIMS]; + PIO_Offset dimlen[PIO_MAX_DIMS]; + int ierr; /* Return code from function calls. */ + + LOG((1, "PIOc_get_var_tc ncid = %d varid = %d xtype = %d", ncid, varid, + xtype)); + + /* Find the info about this file. We need this for error handling. */ + if ((ierr = pio_get_file(ncid, &file))) + return pio_err(NULL, NULL, ierr, __FILE__, __LINE__); + ios = file->iosystem; + + /* Find the number of dimensions. */ + if ((ierr = PIOc_inq_varndims(ncid, varid, &ndims))) + return pio_err(ios, file, ierr, __FILE__, __LINE__); + + /* Scalar vars (which have ndims == 0) should just pass NULLs for + * start/count. */ + if (ndims) + { + /* Find the dimension IDs. */ + int dimids[ndims]; + if ((ierr = PIOc_inq_vardimid(ncid, varid, dimids))) + return pio_err(ios, file, ierr, __FILE__, __LINE__); + + /* Find the dimension lengths. */ + for (int d = 0; d < ndims; d++) + if ((ierr = PIOc_inq_dimlen(ncid, dimids[d], &dimlen[d]))) + return pio_err(ios, file, ierr, __FILE__, __LINE__); + + /* Set up start array. */ + for (int d = 0; d < ndims; d++) + { + my_start[d] = 0; + LOG((3, "my_start[%d] = %d dimlen[%d] = %d", d, my_start[d], d, + dimlen[d])); + } + + /* Set the start/count arrays. */ + startp = my_start; + countp = dimlen; + } + + return PIOc_get_vars_tc(ncid, varid, startp, countp, NULL, xtype, buf); +} + /** * Internal PIO function which provides a type-neutral interface to * nc_put_vars. @@ -850,22 +877,22 @@ int PIOc_put_vars_tc(int ncid, int varid, const PIO_Offset *start, const PIO_Off const PIO_Offset *stride, nc_type xtype, const void *buf) { iosystem_desc_t *ios; /* Pointer to io system information. */ - file_desc_t *file; /* Pointer to file information. */ - int ndims; /* The number of dimensions in the variable. */ + file_desc_t *file; /* Pointer to file information. */ + int ndims; /* The number of dimensions in the variable. */ PIO_Offset typelen; /* Size (in bytes) of the data type of data in buf. */ PIO_Offset num_elem = 1; /* Number of data elements in the buffer. */ - char start_present = start ? true : false; /* Is start non-NULL? */ - char count_present = count ? true : false; /* Is count non-NULL? */ - char stride_present = stride ? true : false; /* Is stride non-NULL? */ - PIO_Offset *rstart, *rcount, *rstride; + char start_present = start ? true : false; /* Is start non-NULL? */ + char count_present = count ? true : false; /* Is count non-NULL? */ + char stride_present = stride ? true : false; /* Is stride non-NULL? */ var_desc_t *vdesc; int *request; nc_type vartype; /* The type of the var we are reading from. */ int mpierr = MPI_SUCCESS, mpierr2; /* Return code from MPI function codes. */ int ierr; /* Return code from function calls. */ - LOG((1, "PIOc_put_vars_tc ncid = %d varid = %d start = %d count = %d " - "stride = %d xtype = %d", ncid, varid, start, count, stride, xtype)); + LOG((1, "PIOc_put_vars_tc ncid = %d varid = %d start_present = %d " + "count_present = %d stride_present = %d xtype = %d", ncid, varid, + start_present, count_present, stride_present, xtype)); /* Get file info. */ if ((ierr = pio_get_file(ncid, &file))) @@ -878,7 +905,7 @@ int PIOc_put_vars_tc(int ncid, int varid, const PIO_Offset *start, const PIO_Off /* Run these on all tasks if async is not in use, but only on * non-IO tasks if async is in use. */ - if (!ios->async_interface || !ios->ioproc) + if (!ios->async || !ios->ioproc) { /* Get the type of this var. */ if ((ierr = PIOc_inq_vartype(ncid, varid, &vartype))) @@ -903,69 +930,15 @@ int PIOc_put_vars_tc(int ncid, int varid, const PIO_Offset *start, const PIO_Off LOG((2, "ndims = %d typelen = %d", ndims, typelen)); - PIO_Offset dimlen[ndims]; - - /* If no count array was passed, we need to know the dimlens - * so we can calculate how many data elements are in the - * buf. */ - if (!count) - { - int dimid[ndims]; - - /* Get the dimids for this var. */ - if ((ierr = PIOc_inq_vardimid(ncid, varid, dimid))) - return check_netcdf(file, ierr, __FILE__, __LINE__); - - /* Get the length of each dimension. */ + /* How many elements of data? If no count array was passed, + * this is a scalar. */ + if (count) for (int vd = 0; vd < ndims; vd++) - { - if ((ierr = PIOc_inq_dimlen(ncid, dimid[vd], &dimlen[vd]))) - return check_netcdf(file, ierr, __FILE__, __LINE__); - LOG((3, "dimlen[%d] = %d", vd, dimlen[vd])); - } - } - - /* Allocate memory for these arrays, now that we know ndims. */ - if (!(rstart = malloc(ndims * sizeof(PIO_Offset)))) - return pio_err(ios, file, PIO_ENOMEM, __FILE__, __LINE__); - if (!(rcount = malloc(ndims * sizeof(PIO_Offset)))) - return pio_err(ios, file, PIO_ENOMEM, __FILE__, __LINE__); - if (!(rstride = malloc(ndims * sizeof(PIO_Offset)))) - return pio_err(ios, file, PIO_ENOMEM, __FILE__, __LINE__); - - /* Figure out the real start, count, and stride arrays. (The - * user may have passed in NULLs.) */ - for (int vd = 0; vd < ndims; vd++) - { - rstart[vd] = start ? start[vd] : 0; - rcount[vd] = count ? count[vd] : dimlen[vd]; - rstride[vd] = stride ? stride[vd] : 1; - LOG((3, "rstart[%d] = %d rcount[%d] = %d rstride[%d] = %d", vd, - rstart[vd], vd, rcount[vd], vd, rstride[vd])); - } - - /* How many elements in buf? */ - for (int vd = 0; vd < ndims; vd++) - num_elem *= rcount[vd]; - LOG((2, "PIOc_put_vars_tc num_elem = %d", num_elem)); - - /* Free tmp resources. */ - if (start_present) - free(rstart); - else - start = rstart; - - if (count_present) - free(rcount); - else - count = rcount; - - /* Only PNETCDF requires a non-NULL stride, realocate it later if needed */ - free(rstride); + num_elem *= count[vd]; } /* If async is in use, and this is not an IO task, bcast the parameters. */ - if (ios->async_interface) + if (ios->async) { if (!ios->ioproc) { @@ -983,8 +956,12 @@ int PIOc_put_vars_tc(int ncid, int varid, const PIO_Offset *start, const PIO_Off if (!mpierr) mpierr = MPI_Bcast(&ndims, 1, MPI_INT, ios->compmaster, ios->intercomm); if (!mpierr) + mpierr = MPI_Bcast(&start_present, 1, MPI_CHAR, ios->compmaster, ios->intercomm); + if (!mpierr && start_present) mpierr = MPI_Bcast((PIO_Offset *)start, ndims, MPI_OFFSET, ios->compmaster, ios->intercomm); if (!mpierr) + mpierr = MPI_Bcast(&count_present, 1, MPI_CHAR, ios->compmaster, ios->intercomm); + if (!mpierr && count_present) mpierr = MPI_Bcast((PIO_Offset *)count, ndims, MPI_OFFSET, ios->compmaster, ios->intercomm); if (!mpierr) mpierr = MPI_Bcast(&stride_present, 1, MPI_CHAR, ios->compmaster, ios->intercomm); @@ -1028,69 +1005,121 @@ int PIOc_put_vars_tc(int ncid, int varid, const PIO_Offset *start, const PIO_Off #ifdef _PNETCDF if (file->iotype == PIO_IOTYPE_PNETCDF) { - PIO_Offset *fake_stride; - - if (!stride_present) + /* Scalars have to be handled differently. */ + if (ndims == 0) { - LOG((2, "stride not present")); - if (!(fake_stride = malloc(ndims * sizeof(PIO_Offset)))) - return pio_err(ios, file, PIO_ENOMEM, __FILE__, __LINE__); - for (int d = 0; d < ndims; d++) - fake_stride[d] = 1; + /* This is a scalar var. */ + LOG((2, "pnetcdf writing scalar with ncmpi_put_vars_*() file->fh = %d varid = %d", + file->fh, varid)); + pioassert(!start && !count && !stride, "expected NULLs", __FILE__, __LINE__); + + /* Turn on independent access for pnetcdf file. */ + if ((ierr = ncmpi_begin_indep_data(file->fh))) + return pio_err(ios, file, ierr, __FILE__, __LINE__); + + /* Only the IO master does the IO, so we are not really + * getting parallel IO here. */ + if (ios->iomaster == MPI_ROOT) + { + switch(xtype) + { + case NC_BYTE: + ierr = ncmpi_put_vars_schar(file->fh, varid, start, count, stride, buf); + break; + case NC_CHAR: + ierr = ncmpi_put_vars_text(file->fh, varid, start, count, stride, buf); + break; + case NC_SHORT: + ierr = ncmpi_put_vars_short(file->fh, varid, start, count, stride, buf); + break; + case NC_INT: + ierr = ncmpi_put_vars_int(file->fh, varid, start, count, stride, buf); + break; + case PIO_LONG_INTERNAL: + ierr = ncmpi_put_vars_long(file->fh, varid, start, count, stride, buf); + break; + case NC_FLOAT: + ierr = ncmpi_put_vars_float(file->fh, varid, start, count, stride, buf); + break; + case NC_DOUBLE: + ierr = ncmpi_put_vars_double(file->fh, varid, start, count, stride, buf); + break; + default: + return pio_err(ios, file, PIO_EBADIOTYPE, __FILE__, __LINE__); + } + } + + /* Turn off independent access for pnetcdf file. */ + if ((ierr = ncmpi_end_indep_data(file->fh))) + return pio_err(ios, file, ierr, __FILE__, __LINE__); } else - fake_stride = (PIO_Offset *)stride; - - LOG((2, "PIOc_put_vars_tc calling pnetcdf function")); - vdesc = file->varlist + varid; - if (vdesc->nreqs % PIO_REQUEST_ALLOC_CHUNK == 0) - if (!(vdesc->request = realloc(vdesc->request, - sizeof(int) * (vdesc->nreqs + PIO_REQUEST_ALLOC_CHUNK)))) - return pio_err(ios, file, PIO_ENOMEM, __FILE__, __LINE__); - request = vdesc->request + vdesc->nreqs; - LOG((2, "PIOc_put_vars_tc request = %d", vdesc->request)); - - /* Only the IO master actually does the call. */ - if (ios->iomaster == MPI_ROOT) { - switch(xtype) + /* This is not a scalar var. */ + PIO_Offset *fake_stride; + + if (!stride_present) { - case NC_BYTE: - ierr = ncmpi_bput_vars_schar(file->fh, varid, start, count, fake_stride, buf, request); - break; - case NC_CHAR: - ierr = ncmpi_bput_vars_text(file->fh, varid, start, count, fake_stride, buf, request); - break; - case NC_SHORT: - ierr = ncmpi_bput_vars_short(file->fh, varid, start, count, fake_stride, buf, request); - break; - case NC_INT: - ierr = ncmpi_bput_vars_int(file->fh, varid, start, count, fake_stride, buf, request); - break; - case PIO_LONG_INTERNAL: - ierr = ncmpi_bput_vars_long(file->fh, varid, start, count, fake_stride, buf, request); - break; - case NC_FLOAT: - ierr = ncmpi_bput_vars_float(file->fh, varid, start, count, fake_stride, buf, request); - break; - case NC_DOUBLE: - ierr = ncmpi_bput_vars_double(file->fh, varid, start, count, fake_stride, buf, request); - break; - default: - return pio_err(ios, file, PIO_EBADTYPE, __FILE__, __LINE__); + LOG((2, "stride not present")); + if (!(fake_stride = malloc(ndims * sizeof(PIO_Offset)))) + return pio_err(ios, file, PIO_ENOMEM, __FILE__, __LINE__); + for (int d = 0; d < ndims; d++) + fake_stride[d] = 1; } - LOG((2, "PIOc_put_vars_tc io_rank 0 done with pnetcdf call, ierr=%d", ierr)); - } - else - *request = PIO_REQ_NULL; + else + fake_stride = (PIO_Offset *)stride; + + LOG((2, "PIOc_put_vars_tc calling pnetcdf function")); + vdesc = &file->varlist[varid]; + if (vdesc->nreqs % PIO_REQUEST_ALLOC_CHUNK == 0) + if (!(vdesc->request = realloc(vdesc->request, + sizeof(int) * (vdesc->nreqs + PIO_REQUEST_ALLOC_CHUNK)))) + return pio_err(ios, file, PIO_ENOMEM, __FILE__, __LINE__); + request = vdesc->request + vdesc->nreqs; + LOG((2, "PIOc_put_vars_tc request = %d", vdesc->request)); + + /* Only the IO master actually does the call. */ + if (ios->iomaster == MPI_ROOT) + { + switch(xtype) + { + case NC_BYTE: + ierr = ncmpi_bput_vars_schar(file->fh, varid, start, count, fake_stride, buf, request); + break; + case NC_CHAR: + ierr = ncmpi_bput_vars_text(file->fh, varid, start, count, fake_stride, buf, request); + break; + case NC_SHORT: + ierr = ncmpi_bput_vars_short(file->fh, varid, start, count, fake_stride, buf, request); + break; + case NC_INT: + ierr = ncmpi_bput_vars_int(file->fh, varid, start, count, fake_stride, buf, request); + break; + case PIO_LONG_INTERNAL: + ierr = ncmpi_bput_vars_long(file->fh, varid, start, count, fake_stride, buf, request); + break; + case NC_FLOAT: + ierr = ncmpi_bput_vars_float(file->fh, varid, start, count, fake_stride, buf, request); + break; + case NC_DOUBLE: + ierr = ncmpi_bput_vars_double(file->fh, varid, start, count, fake_stride, buf, request); + break; + default: + return pio_err(ios, file, PIO_EBADTYPE, __FILE__, __LINE__); + } + LOG((2, "PIOc_put_vars_tc io_rank 0 done with pnetcdf call, ierr=%d", ierr)); + } + else + *request = PIO_REQ_NULL; - vdesc->nreqs++; - flush_output_buffer(file, false, 0); - LOG((2, "PIOc_put_vars_tc flushed output buffer")); + vdesc->nreqs++; + flush_output_buffer(file, false, 0); + LOG((2, "PIOc_put_vars_tc flushed output buffer")); - /* Free malloced resources. */ - if (!stride_present) - free(fake_stride); + /* Free malloced resources. */ + if (!stride_present) + free(fake_stride); + } /* endif ndims == 0 */ } #endif /* _PNETCDF */ @@ -1161,15 +1190,6 @@ int PIOc_put_vars_tc(int ncid, int varid, const PIO_Offset *start, const PIO_Off } } - if (!ios->async_interface || !ios->ioproc) - { - /* Free tmp start/count allocated to account for NULL start/counts */ - if (!start_present) - free(rstart); - if (!count_present) - free(rcount); - } - /* Broadcast and check the return code. */ if ((mpierr = MPI_Bcast(&ierr, 1, MPI_INT, ios->ioroot, ios->my_comm))) return check_mpi(file, mpierr, __FILE__, __LINE__); @@ -1232,3 +1252,78 @@ int PIOc_put_var1_tc(int ncid, int varid, const PIO_Offset *index, nc_type xtype return PIOc_put_vars_tc(ncid, varid, index, count, NULL, xtype, op); } + +/** + * Internal PIO function which provides a type-neutral interface to + * nc_put_var calls. + * + * Users should not call this function directly. Instead, call one of + * the derived functions, depending on the type of data you are + * writing: PIOc_put_var_text(), PIOc_put_var_uchar(), + * PIOc_put_var_schar(), PIOc_put_var_ushort(), + * PIOc_put_var_short(), PIOc_put_var_uint(), PIOc_put_var_int(), + * PIOc_put_var_long(), PIOc_put_var_float(), + * PIOc_put_var_longlong(), PIOc_put_var_double(), + * PIOc_put_var_ulonglong(). + * + * This routine is called collectively by all tasks in the + * communicator ios.union_comm. + * + * @param ncid identifies the netCDF file + * @param varid the variable ID number + * @param xtype the netCDF type of the data being passed in buf. Data + * will be automatically covnerted from this type to the type of the + * variable being written to. + * @param op pointer to the data to be written. + * + * @return PIO_NOERR on success, error code otherwise. + */ +int PIOc_put_var_tc(int ncid, int varid, nc_type xtype, const void *op) +{ + iosystem_desc_t *ios; /* Pointer to io system information. */ + file_desc_t *file; /* Pointer to file information. */ + PIO_Offset *startp = NULL; /* Pointer to start array. */ + PIO_Offset *countp = NULL; /* Pointer to count array. */ + PIO_Offset start[PIO_MAX_DIMS]; + PIO_Offset count[PIO_MAX_DIMS]; + int ndims; /* The number of dimensions in the variable. */ + int ierr; /* Return code from function calls. */ + + LOG((1, "PIOc_put_var_tc ncid = %d varid = %d xtype = %d", ncid, + varid, xtype)); + + /* Find the info about this file. We need this for error handling. */ + if ((ierr = pio_get_file(ncid, &file))) + return pio_err(NULL, NULL, ierr, __FILE__, __LINE__); + ios = file->iosystem; + + /* Find the number of dimensions. */ + if ((ierr = PIOc_inq_varndims(ncid, varid, &ndims))) + return pio_err(ios, file, ierr, __FILE__, __LINE__); + + /* Scalar vars (which have ndims == 0) should just pass NULLs for + * start/count. */ + if (ndims) + { + int dimid[ndims]; + + /* Set up start array. */ + for (int d = 0; d < ndims; d++) + start[d] = 0; + + /* Get the dimids for this var. */ + if ((ierr = PIOc_inq_vardimid(ncid, varid, dimid))) + return check_netcdf(file, ierr, __FILE__, __LINE__); + + /* Count array are the dimlens. */ + for (int d = 0; d < ndims; d++) + if ((ierr = PIOc_inq_dimlen(ncid, dimid[d], &count[d]))) + return pio_err(ios, file, ierr, __FILE__, __LINE__); + + /* Set the array pointers. */ + startp = start; + countp = count; + } + + return PIOc_put_vars_tc(ncid, varid, startp, countp, NULL, xtype, op); +} diff --git a/src/externals/pio2/src/clib/pio_internal.h b/src/externals/pio2/src/clib/pio_internal.h index e1c891c761e0..9f1a4a18d5db 100644 --- a/src/externals/pio2/src/clib/pio_internal.h +++ b/src/externals/pio2/src/clib/pio_internal.h @@ -12,6 +12,15 @@ #include +/* These are the sizes of types in netCDF files. Do not replace these + * constants with sizeof() calls for C types. They are not the + * same. Even on a system where sizeof(short) is 4, the size of a + * short in a netCDF file is 2 bytes. */ +#define NETCDF_CHAR_SIZE 1 +#define NETCDF_SHORT_SIZE 2 +#define NETCDF_INT_FLOAT_SIZE 4 +#define NETCDF_DOUBLE_INT64_SIZE 8 + /* It seems that some versions of openmpi fail to define * MPI_OFFSET. */ #ifdef OMPI_OFFSET_DATATYPE @@ -123,8 +132,8 @@ extern "C" { int check_netcdf2(iosystem_desc_t *ios, file_desc_t *file, int status, const char *fname, int line); - /* Find the MPI type that matches a PIO type. */ - int find_mpi_type(int pio_type, MPI_Datatype *mpi_type); + /* Given PIO type, find MPI type and type size. */ + int find_mpi_type(int pio_type, MPI_Datatype *mpi_type, int *type_size); /* Check whether an IO type is valid for this build. */ int iotype_is_valid(int iotype); @@ -135,30 +144,59 @@ extern "C" { /* Assert that an expression is true. */ void pioassert(bool exp, const char *msg, const char *fname, int line); - int CalcStartandCount(int basetype, int ndims, const int *gdims, int num_io_procs, - int myiorank, PIO_Offset *start, PIO_Offset *kount); + /* Compute start and count values for each io task for a decomposition. */ + int CalcStartandCount(int pio_type, int ndims, const int *gdims, int num_io_procs, + int myiorank, PIO_Offset *start, PIO_Offset *count, int *num_aiotasks); + + /* Completes the mapping for the box rearranger. */ + int compute_counts(iosystem_desc_t *ios, io_desc_t *iodesc, const int *dest_ioproc, + const PIO_Offset *dest_ioindex); + + /* Create the MPI communicators needed by the subset rearranger. */ + int default_subset_partition(iosystem_desc_t *ios, io_desc_t *iodesc); /* Check return from MPI function and print error message. */ void CheckMPIReturn(int ierr, const char *file, int line); /* Like MPI_Alltoallw(), but with flow control. */ - int pio_swapm(void *sndbuf, int *sndlths, int *sdispls, MPI_Datatype *stypes, - void *rcvbuf, int *rcvlths, int *rdispls, MPI_Datatype *rtypes, - MPI_Comm comm, bool handshake, bool isend, int max_requests); + int pio_swapm(void *sendbuf, int *sendcounts, int *sdispls, MPI_Datatype *sendtypes, + void *recvbuf, int *recvcounts, int *rdispls, MPI_Datatype *recvtypes, + MPI_Comm comm, rearr_comm_fc_opt_t *fc); long long lgcd_array(int nain, long long* ain); void PIO_Offset_size(MPI_Datatype *dtype, int *tsize); PIO_Offset GCDblocksize(int arrlen, const PIO_Offset *arr_in); - /* Initialize the rearranger options. */ - void init_rearr_opts(iosystem_desc_t *iosys); + /* Convert an index into dimension values. */ + void idx_to_dim_list(int ndims, const int *gdims, PIO_Offset idx, PIO_Offset *dim_list); + + /* Convert a global coordinate value into a local array index. */ + PIO_Offset coord_to_lindex(int ndims, const PIO_Offset *lcoord, const PIO_Offset *count); + + /* Determine whether fill values are needed. */ + int determine_fill(iosystem_desc_t *ios, io_desc_t *iodesc, const int *gsize, + const PIO_Offset *compmap); + + /* Set start and count so that they describe the first region in map.*/ + PIO_Offset find_region(int ndims, const int *gdims, int maplen, const PIO_Offset *map, + PIO_Offset *start, PIO_Offset *count); + + /* Calculate start and count regions for the subset rearranger. */ + int get_regions(int ndims, const int *gdimlen, int maplen, const PIO_Offset *map, + int *maxregions, io_region *firstregion); + + /* Expand a region along dimension dim, by incrementing count[i] as + * much as possible, consistent with the map. */ + void expand_region(int dim, const int *gdims, int maplen, const PIO_Offset *map, + int region_size, int region_stride, const int *max_size, + PIO_Offset *count); /* Compare sets of rearranger options. */ bool cmp_rearr_opts(const rearr_opt_t *rearr_opts, const rearr_opt_t *exp_rearr_opts); - /* Reset rearranger opts in iosystem to valid values. */ - void check_and_reset_rearr_opts(iosystem_desc_t *ios); + /* Check and reset, if needed, rearranger opts to default values. */ + int check_and_reset_rearr_opts(rearr_opt_t *rearr_opt); /* Compare rearranger flow control options. */ bool cmp_rearr_comm_fc_opts(const rearr_comm_fc_opt_t *opt, @@ -178,15 +216,23 @@ extern "C" { int rearrange_io2comp(iosystem_desc_t *ios, io_desc_t *iodesc, void *sbuf, void *rbuf); /* Move data from compute tasks to IO tasks. */ - int rearrange_comp2io(iosystem_desc_t *ios, io_desc_t *iodesc, void *sbuf, void *rbuf, int nvars); + int rearrange_comp2io(iosystem_desc_t *ios, io_desc_t *iodesc, void *sbuf, void *rbuf, + int nvars); /* Allocate and initialize storage for decomposition information. */ int malloc_iodesc(iosystem_desc_t *ios, int piotype, int ndims, io_desc_t **iodesc); void performance_tune_rearranger(iosystem_desc_t *ios, io_desc_t *iodesc); + /* Flush contents of multi-buffer to disk. */ int flush_output_buffer(file_desc_t *file, bool force, PIO_Offset addsize); + + /* Compute the size that the IO tasks will need to hold the data. */ int compute_maxIObuffersize(MPI_Comm io_comm, io_desc_t *iodesc); - io_region *alloc_region(int ndims); + + /* Allocation memory for a data region. */ + int alloc_region2(iosystem_desc_t *ios, int ndims, io_region **region); + + /* Delete an entry from the lost of open IO systems. */ int pio_delete_iosystem_from_list(int piosysid); /* Find greatest commond divisor. */ @@ -211,7 +257,7 @@ extern "C" { /* Create the derived MPI datatypes used for comp2io and io2comp * transfers. */ - int create_mpi_datatypes(MPI_Datatype basetype, int msgcnt, PIO_Offset dlen, const PIO_Offset *mindex, + int create_mpi_datatypes(MPI_Datatype basetype, int msgcnt, const PIO_Offset *mindex, const int *mcount, int *mfrom, MPI_Datatype *mtype); int compare_offsets(const void *a, const void *b) ; @@ -230,8 +276,9 @@ extern "C" { int compute_maxaggregate_bytes(iosystem_desc_t *ios, io_desc_t *iodesc); - /* Announce a memory error with bget memory, and die. */ - void piomemerror(iosystem_desc_t *ios, size_t req, char *fname, int line); + /* Compute an element of start/count arrays. */ + void compute_one_dim(int gdim, int ioprocs, int rank, PIO_Offset *start, + PIO_Offset *count); /* Check the return code from an MPI function call. */ int check_mpi(file_desc_t *file, int mpierr, const char *filename, int line); @@ -249,10 +296,8 @@ extern "C" { const int *frame); /* Write aggregated arrays to file using serial I/O (netCDF-3/netCDF-4 serial) */ - int pio_write_darray_multi_nc_serial(file_desc_t *file, int nvars, const int *vid, int iodesc_ndims, - MPI_Datatype basetype, int maxregions, io_region *firstregion, - PIO_Offset llen, int num_aiotasks, void *iobuf, - const int *frame); + int write_darray_multi_serial(file_desc_t *file, int nvars, const int *vid, + io_desc_t *iodesc, int fill, const int *frame); int pio_read_darray_nc(file_desc_t *file, io_desc_t *iodesc, int vid, void *iobuf); int pio_read_darray_nc_serial(file_desc_t *file, io_desc_t *iodesc, int vid, void *iobuf); @@ -269,13 +314,16 @@ extern "C" { const PIO_Offset *stride, nc_type xtype, void *buf); int PIOc_get_var1_tc(int ncid, int varid, const PIO_Offset *index, nc_type xtype, void *buf); + int PIOc_get_var_tc(int ncid, int varid, nc_type xtype, void *buf); + /* Generalized put functions. */ int PIOc_put_vars_tc(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, const PIO_Offset *stride, nc_type xtype, const void *buf); int PIOc_put_var1_tc(int ncid, int varid, const PIO_Offset *index, nc_type xtype, const void *op); - + int PIOc_put_var_tc(int ncid, int varid, nc_type xtype, const void *op); + /* An internal replacement for a function pnetcdf does not * have. */ int pioc_pnetcdf_inq_type(int ncid, nc_type xtype, char *name, @@ -289,9 +337,9 @@ extern "C" { void pio_finalize_logging(void ); /* Write a netCDF decomp file. */ - int pioc_write_nc_decomp_int(int iosysid, const char *filename, int cmode, int ndims, int *global_dimlen, - int num_tasks, int *task_maplen, int *map, const char *title, - const char *history, int fortran_order); + int pioc_write_nc_decomp_int(iosystem_desc_t *ios, const char *filename, int cmode, int ndims, + int *global_dimlen, int num_tasks, int *task_maplen, int *map, + const char *title, const char *history, int fortran_order); /* Read a netCDF decomp file. */ int pioc_read_nc_decomp_int(int iosysid, const char *filename, int *ndims, int **global_dimlen, diff --git a/src/externals/pio2/src/clib/pio_lists.c b/src/externals/pio2/src/clib/pio_lists.c index 72e774715bc5..df79302cfd19 100644 --- a/src/externals/pio2/src/clib/pio_lists.c +++ b/src/externals/pio2/src/clib/pio_lists.c @@ -115,8 +115,14 @@ int pio_delete_file_from_list(int ncid) if (current_file == cfile) current_file = pfile; + /* Free any fill values that were allocated. */ + for (int v = 0; v < PIO_MAX_VARS; v++) + if (cfile->varlist[v].fillvalue) + free(cfile->varlist[v].fillvalue); + /* Free the memory used for this file. */ free(cfile); + return PIO_NOERR; } pfile = cfile; diff --git a/src/externals/pio2/src/clib/pio_msg.c b/src/externals/pio2/src/clib/pio_msg.c index 75d2a8bfea4f..b8ca0e9917b0 100644 --- a/src/externals/pio2/src/clib/pio_msg.c +++ b/src/externals/pio2/src/clib/pio_msg.c @@ -675,16 +675,18 @@ int put_vars_handler(iosystem_desc_t *ios) { int ncid; int varid; - PIO_Offset typelen; /** Length (in bytes) of this type. */ - nc_type xtype; /** Type of the data being written. */ - char stride_present; /** Zero if user passed a NULL stride. */ + PIO_Offset typelen; /* Length (in bytes) of this type. */ + nc_type xtype; /* Type of the data being written. */ + char start_present; /* Zero if user passed a NULL start. */ + char count_present; /* Zero if user passed a NULL count. */ + char stride_present; /* Zero if user passed a NULL stride. */ PIO_Offset *startp = NULL; PIO_Offset *countp = NULL; PIO_Offset *stridep = NULL; - int ndims; /** Number of dimensions. */ - void *buf; /** Buffer for data storage. */ - PIO_Offset num_elem; /** Number of data elements in the buffer. */ - int mpierr; /** Error code from MPI function calls. */ + int ndims; /* Number of dimensions. */ + void *buf; /* Buffer for data storage. */ + PIO_Offset num_elem; /* Number of data elements in the buffer. */ + int mpierr; /* Error code from MPI function calls. */ LOG((1, "put_vars_handler")); assert(ios); @@ -701,11 +703,17 @@ int put_vars_handler(iosystem_desc_t *ios) /* Now we know how big to make these arrays. */ PIO_Offset start[ndims], count[ndims], stride[ndims]; - if ((mpierr = MPI_Bcast(start, ndims, MPI_OFFSET, 0, ios->intercomm))) + if ((mpierr = MPI_Bcast(&start_present, 1, MPI_CHAR, 0, ios->intercomm))) return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); + if (start_present) + if ((mpierr = MPI_Bcast(start, ndims, MPI_OFFSET, 0, ios->intercomm))) + return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); LOG((1, "put_vars_handler getting start[0] = %d ndims = %d", start[0], ndims)); - if ((mpierr = MPI_Bcast(count, ndims, MPI_OFFSET, 0, ios->intercomm))) + if ((mpierr = MPI_Bcast(&count_present, 1, MPI_CHAR, 0, ios->intercomm))) return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); + if (count_present) + if ((mpierr = MPI_Bcast(count, ndims, MPI_OFFSET, 0, ios->intercomm))) + return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); if ((mpierr = MPI_Bcast(&stride_present, 1, MPI_CHAR, 0, ios->intercomm))) return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); if (stride_present) @@ -718,16 +726,9 @@ int put_vars_handler(iosystem_desc_t *ios) if ((mpierr = MPI_Bcast(&typelen, 1, MPI_OFFSET, 0, ios->intercomm))) return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); LOG((1, "put_vars_handler ncid = %d varid = %d ndims = %d " - "stride_present = %d xtype = %d num_elem = %d typelen = %d", - ncid, varid, ndims, stride_present, xtype, num_elem, typelen)); - - for (int d = 0; d < ndims; d++) - { - LOG((2, "start[%d] = %d", d, start[d])); - LOG((2, "count[%d] = %d", d, count[d])); - if (stride_present) - LOG((2, "stride[%d] = %d", d, stride[d])); - } + "start_present = %d count_present = %d stride_present = %d xtype = %d " + "num_elem = %d typelen = %d", ncid, varid, ndims, start_present, count_present, + stride_present, xtype, num_elem, typelen)); /* Allocate room for our data. */ if (!(buf = malloc(num_elem * typelen))) @@ -735,17 +736,13 @@ int put_vars_handler(iosystem_desc_t *ios) /* Get the data. */ if ((mpierr = MPI_Bcast(buf, num_elem * typelen, MPI_BYTE, 0, ios->intercomm))) - { - free(buf); return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); - } - - /* for (int e = 0; e < num_elem; e++) */ - /* LOG((2, "element %d = %d", e, ((int *)buf)[e])); */ /* Set the non-NULL pointers. */ - startp = start; - countp = count; + if (start_present) + startp = start; + if (count_present) + countp = count; if (stride_present) stridep = stride; @@ -819,6 +816,11 @@ int get_vars_handler(iosystem_desc_t *ios) int mpierr; PIO_Offset typelen; /** Length (in bytes) of this type. */ nc_type xtype; /** Type of the data being written. */ + PIO_Offset *start; + PIO_Offset *count; + PIO_Offset *stride; + char start_present; + char count_present; char stride_present; PIO_Offset *startp = NULL, *countp = NULL, *stridep = NULL; int ndims; /** Number of dimensions. */ @@ -836,20 +838,33 @@ int get_vars_handler(iosystem_desc_t *ios) return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); if ((mpierr = MPI_Bcast(&ndims, 1, MPI_INT, 0, ios->intercomm))) return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); - - /* Now we know how big to make these arrays. */ - PIO_Offset start[ndims], count[ndims], stride[ndims]; - - if ((mpierr = MPI_Bcast(start, ndims, MPI_OFFSET, 0, ios->intercomm))) + if ((mpierr = MPI_Bcast(&start_present, 1, MPI_CHAR, 0, ios->intercomm))) return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); - LOG((1, "put_vars_handler getting start[0] = %d ndims = %d", start[0], ndims)); - if ((mpierr = MPI_Bcast(count, ndims, MPI_OFFSET, 0, ios->intercomm))) + if (start_present) + { + if (!(start = malloc(ndims * sizeof(PIO_Offset)))) + return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); + if ((mpierr = MPI_Bcast(start, ndims, MPI_OFFSET, 0, ios->intercomm))) + return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); + } + if ((mpierr = MPI_Bcast(&count_present, 1, MPI_CHAR, 0, ios->intercomm))) return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); + if (count_present) + { + if (!(count = malloc(ndims * sizeof(PIO_Offset)))) + return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); + if ((mpierr = MPI_Bcast(count, ndims, MPI_OFFSET, 0, ios->intercomm))) + return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); + } if ((mpierr = MPI_Bcast(&stride_present, 1, MPI_CHAR, 0, ios->intercomm))) return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); if (stride_present) + { + if (!(stride = malloc(ndims * sizeof(PIO_Offset)))) + return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); if ((mpierr = MPI_Bcast(stride, ndims, MPI_OFFSET, 0, ios->intercomm))) return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); + } if ((mpierr = MPI_Bcast(&xtype, 1, MPI_INT, 0, ios->intercomm))) return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); if ((mpierr = MPI_Bcast(&num_elem, 1, MPI_OFFSET, 0, ios->intercomm))) @@ -860,21 +875,17 @@ int get_vars_handler(iosystem_desc_t *ios) "stride_present = %d xtype = %d num_elem = %d typelen = %d", ncid, varid, ndims, stride_present, xtype, num_elem, typelen)); - for (int d = 0; d < ndims; d++) - { - LOG((2, "start[%d] = %d", d, start[d])); - LOG((2, "count[%d] = %d", d, count[d])); - if (stride_present) - LOG((2, "stride[%d] = %d", d, stride[d])); - } - /* Allocate room for our data. */ if (!(buf = malloc(num_elem * typelen))) return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); /* Set the non-NULL pointers. */ - startp = start; - countp = count; + if (start_present) + startp = start; + + if (count_present) + countp = count; + if (stride_present) stridep = stride; @@ -928,7 +939,15 @@ int get_vars_handler(iosystem_desc_t *ios) #endif /* _NETCDF4 */ } + /* Free resourses. */ free(buf); + if (start_present) + free(start); + if (count_present) + free(count); + if (stride_present) + free(stride); + LOG((1, "get_vars_handler succeeded!")); return PIO_NOERR; } @@ -1889,7 +1908,7 @@ int delete_file_handler(iosystem_desc_t *ios) int initdecomp_dof_handler(iosystem_desc_t *ios) { int iosysid; - int basetype; + int pio_type; int ndims; int maplen; int ioid; @@ -1910,7 +1929,7 @@ int initdecomp_dof_handler(iosystem_desc_t *ios) * task is broadcasting. */ if ((mpierr = MPI_Bcast(&iosysid, 1, MPI_INT, 0, ios->intercomm))) return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); - if ((mpierr = MPI_Bcast(&basetype, 1, MPI_INT, 0, ios->intercomm))) + if ((mpierr = MPI_Bcast(&pio_type, 1, MPI_INT, 0, ios->intercomm))) return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); if ((mpierr = MPI_Bcast(&ndims, 1, MPI_INT, 0, ios->intercomm))) return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); @@ -1951,9 +1970,9 @@ int initdecomp_dof_handler(iosystem_desc_t *ios) if ((mpierr = MPI_Bcast(iocount, ndims, MPI_OFFSET, 0, ios->intercomm))) return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); - LOG((2, "initdecomp_dof_handler iosysid = %d basetype = %d ndims = %d maplen = %d " + LOG((2, "initdecomp_dof_handler iosysid = %d pio_type = %d ndims = %d maplen = %d " "rearranger_present = %d iostart_present = %d iocount_present = %d ", - iosysid, basetype, ndims, maplen, rearranger_present, iostart_present, iocount_present)); + iosysid, pio_type, ndims, maplen, rearranger_present, iostart_present, iocount_present)); if (rearranger_present) rearrangerp = &rearranger; @@ -1963,7 +1982,7 @@ int initdecomp_dof_handler(iosystem_desc_t *ios) iocountp = iocount; /* Call the function. */ - ret = PIOc_InitDecomp(iosysid, basetype, ndims, dims, maplen, compmap, &ioid, rearrangerp, + ret = PIOc_InitDecomp(iosysid, pio_type, ndims, dims, maplen, compmap, &ioid, rearrangerp, iostartp, iocountp); LOG((1, "PIOc_InitDecomp returned %d", ret)); diff --git a/src/externals/pio2/src/clib/pio_nc.c b/src/externals/pio2/src/clib/pio_nc.c index 8b7fb81ba646..00932bdfd64e 100644 --- a/src/externals/pio2/src/clib/pio_nc.c +++ b/src/externals/pio2/src/clib/pio_nc.c @@ -49,7 +49,7 @@ int PIOc_inq(int ncid, int *ndimsp, int *nvarsp, int *ngattsp, int *unlimdimidp) ios = file->iosystem; /* If async is in use, and this is not an IO task, bcast the parameters. */ - if (ios->async_interface) + if (ios->async) { if (!ios->ioproc) { @@ -89,9 +89,7 @@ int PIOc_inq(int ncid, int *ndimsp, int *nvarsp, int *ngattsp, int *unlimdimidp) #ifdef _PNETCDF if (file->iotype == PIO_IOTYPE_PNETCDF) { - LOG((2, "PIOc_inq calling ncmpi_inq unlimdimidp = %d", unlimdimidp)); ierr = ncmpi_inq(file->fh, ndimsp, nvarsp, ngattsp, unlimdimidp); - LOG((2, "PIOc_inq called ncmpi_inq")); if (unlimdimidp) LOG((2, "PIOc_inq returned from ncmpi_inq unlimdimid = %d", *unlimdimidp)); } @@ -205,7 +203,7 @@ int PIOc_inq_natts(int ncid, int *ngattsp) */ int PIOc_inq_unlimdim(int ncid, int *unlimdimidp) { - LOG((1, "PIOc_inq_unlimdim ncid = %d unlimdimidp = %d", ncid, unlimdimidp)); + LOG((1, "PIOc_inq_unlimdim ncid = %d", ncid)); return PIOc_inq(ncid, NULL, NULL, NULL, unlimdimidp); } @@ -234,7 +232,7 @@ int PIOc_inq_type(int ncid, nc_type xtype, char *name, PIO_Offset *sizep) ios = file->iosystem; /* If async is in use, and this is not an IO task, bcast the parameters. */ - if (ios->async_interface) + if (ios->async) { if (!ios->ioproc) { @@ -323,7 +321,7 @@ int PIOc_inq_format(int ncid, int *formatp) ios = file->iosystem; /* If async is in use, and this is not an IO task, bcast the parameters. */ - if (ios->async_interface) + if (ios->async) { if (!ios->ioproc) { @@ -402,7 +400,7 @@ int PIOc_inq_dim(int ncid, int dimid, char *name, PIO_Offset *lenp) ios = file->iosystem; /* If async is in use, and this is not an IO task, bcast the parameters. */ - if (ios->async_interface) + if (ios->async) { if (!ios->ioproc) { @@ -543,7 +541,7 @@ int PIOc_inq_dimid(int ncid, const char *name, int *idp) LOG((1, "PIOc_inq_dimid ncid = %d name = %s", ncid, name)); /* If using async, and not an IO task, then send parameters. */ - if (ios->async_interface) + if (ios->async) { if (!ios->ioproc) { @@ -629,10 +627,9 @@ int PIOc_inq_var(int ncid, int varid, char *name, nc_type *xtypep, int *ndimsp, if ((ierr = pio_get_file(ncid, &file))) return pio_err(NULL, NULL, ierr, __FILE__, __LINE__); ios = file->iosystem; - LOG((2, "got file and iosystem")); /* If async is in use, and this is not an IO task, bcast the parameters. */ - if (ios->async_interface) + if (ios->async) { if (!ios->ioproc) { @@ -689,8 +686,6 @@ int PIOc_inq_var(int ncid, int varid, char *name, nc_type *xtypep, int *ndimsp, if (file->iotype != PIO_IOTYPE_PNETCDF && file->do_io) { ierr = nc_inq_varndims(file->fh, varid, &ndims); - LOG((2, "file->fh = %d varid = %d xtypep = %d ndimsp = %d dimidsp = %d nattsp = %d", - file->fh, varid, xtypep, ndimsp, dimidsp, nattsp)); if (!ierr) { char my_name[NC_MAX_NAME + 1]; @@ -871,7 +866,7 @@ int PIOc_inq_varid(int ncid, const char *name, int *varidp) LOG((1, "PIOc_inq_varid ncid = %d name = %s", ncid, name)); - if (ios->async_interface) + if (ios->async) { if (!ios->ioproc) { @@ -957,11 +952,10 @@ int PIOc_inq_att(int ncid, int varid, const char *name, nc_type *xtypep, if (!name || strlen(name) > NC_MAX_NAME) return pio_err(ios, file, PIO_EINVAL, __FILE__, __LINE__); - LOG((1, "PIOc_inq_att ncid = %d varid = %d xtpyep = %d lenp = %d", - ncid, varid, xtypep, lenp)); + LOG((1, "PIOc_inq_att ncid = %d varid = %d", ncid, varid)); /* If async is in use, and this is not an IO task, bcast the parameters. */ - if (ios->async_interface) + if (ios->async) { if (!ios->ioproc) { @@ -1086,7 +1080,7 @@ int PIOc_inq_attname(int ncid, int varid, int attnum, char *name) ios = file->iosystem; /* If async is in use, and this is not an IO task, bcast the parameters. */ - if (ios->async_interface) + if (ios->async) { if (!ios->ioproc) { @@ -1180,7 +1174,7 @@ int PIOc_inq_attid(int ncid, int varid, const char *name, int *idp) LOG((1, "PIOc_inq_attid ncid = %d varid = %d name = %s", ncid, varid, name)); /* If async is in use, and this is not an IO task, bcast the parameters. */ - if (ios->async_interface) + if (ios->async) { if (!ios->ioproc) { @@ -1269,7 +1263,7 @@ int PIOc_rename_dim(int ncid, int dimid, const char *name) LOG((1, "PIOc_rename_dim ncid = %d dimid = %d name = %s", ncid, dimid, name)); /* If async is in use, and this is not an IO task, bcast the parameters. */ - if (ios->async_interface) + if (ios->async) { if (!ios->ioproc) { @@ -1354,7 +1348,7 @@ int PIOc_rename_var(int ncid, int varid, const char *name) LOG((1, "PIOc_rename_var ncid = %d varid = %d name = %s", ncid, varid, name)); /* If async is in use, and this is not an IO task, bcast the parameters. */ - if (ios->async_interface) + if (ios->async) { if (!ios->ioproc) { @@ -1443,7 +1437,7 @@ int PIOc_rename_att(int ncid, int varid, const char *name, ncid, varid, name, newname)); /* If async is in use, and this is not an IO task, bcast the parameters. */ - if (ios->async_interface) + if (ios->async) { if (!ios->ioproc) { @@ -1531,7 +1525,7 @@ int PIOc_del_att(int ncid, int varid, const char *name) LOG((1, "PIOc_del_att ncid = %d varid = %d name = %s", ncid, varid, name)); /* If async is in use, and this is not an IO task, bcast the parameters. */ - if (ios->async_interface) + if (ios->async) { if (!ios->ioproc) { @@ -1601,8 +1595,7 @@ int PIOc_set_fill(int ncid, int fillmode, int *old_modep) int ierr; /* Return code from function calls. */ int mpierr = MPI_SUCCESS, mpierr2; /* Return code from MPI functions. */ - LOG((1, "PIOc_set_fill ncid = %d fillmode = %d old_modep = %d", ncid, fillmode, - old_modep)); + LOG((1, "PIOc_set_fill ncid = %d fillmode = %d", ncid, fillmode)); /* Find the info about this file. */ if ((ierr = pio_get_file(ncid, &file))) @@ -1610,7 +1603,7 @@ int PIOc_set_fill(int ncid, int fillmode, int *old_modep) ios = file->iosystem; /* If async is in use, and this is not an IO task, bcast the parameters. */ - if (ios->async_interface) + if (ios->async) { if (!ios->ioproc) { @@ -1739,7 +1732,7 @@ int PIOc_def_dim(int ncid, const char *name, PIO_Offset len, int *idp) LOG((1, "PIOc_def_dim ncid = %d name = %s len = %d", ncid, name, len)); /* If async is in use, and this is not an IO task, bcast the parameters. */ - if (ios->async_interface) + if (ios->async) { if (!ios->ioproc) { @@ -1831,7 +1824,7 @@ int PIOc_def_var(int ncid, const char *name, nc_type xtype, int ndims, xtype, ndims)); /* If using async, and not an IO task, then send parameters. */ - if (ios->async_interface) + if (ios->async) { if (!ios->ioproc) { @@ -1948,7 +1941,7 @@ int PIOc_def_var_fill(int ncid, int varid, int fill_mode, const void *fill_value /* Run this on all tasks if async is not in use, but only on * non-IO tasks if async is in use. Get the size of this vars * type. */ - if (!ios->async_interface || !ios->ioproc) + if (!ios->async || !ios->ioproc) { if ((ierr = PIOc_inq_vartype(ncid, varid, &xtype))) return check_netcdf(file, ierr, __FILE__, __LINE__); @@ -1958,7 +1951,7 @@ int PIOc_def_var_fill(int ncid, int varid, int fill_mode, const void *fill_value LOG((2, "PIOc_def_var_fill type_size = %d", type_size)); /* If async is in use, and this is not an IO task, bcast the parameters. */ - if (ios->async_interface) + if (ios->async) { if (!ios->ioproc) { @@ -2042,7 +2035,7 @@ int PIOc_def_var_fill(int ncid, int varid, int fill_mode, const void *fill_value * @param ncid the ncid of the open file, obtained from * PIOc_openfile() or PIOc_createfile(). * @param varid the variable ID. - * @param fill_mode a pointer to int that will get the fill + * @param no_fill a pointer to int that will get the fill * mode. Ignored if NULL (except with pnetcdf, which seg-faults with * NULL.) * @param fill_valuep pointer to space that gets the fill value for @@ -2050,7 +2043,7 @@ int PIOc_def_var_fill(int ncid, int varid, int fill_mode, const void *fill_value * @return PIO_NOERR for success, error code otherwise. * @ingroup PIO_inq_var_fill */ -int PIOc_inq_var_fill(int ncid, int varid, int *fill_mode, void *fill_valuep) +int PIOc_inq_var_fill(int ncid, int varid, int *no_fill, void *fill_valuep) { iosystem_desc_t *ios; /* Pointer to io system information. */ file_desc_t *file; /* Pointer to file information. */ @@ -2070,7 +2063,7 @@ int PIOc_inq_var_fill(int ncid, int varid, int *fill_mode, void *fill_valuep) /* Run this on all tasks if async is not in use, but only on * non-IO tasks if async is in use. Get the size of this vars * type. */ - if (!ios->async_interface || !ios->ioproc) + if (!ios->async || !ios->ioproc) { if ((ierr = PIOc_inq_vartype(ncid, varid, &xtype))) return check_netcdf(file, ierr, __FILE__, __LINE__); @@ -2080,12 +2073,12 @@ int PIOc_inq_var_fill(int ncid, int varid, int *fill_mode, void *fill_valuep) } /* If async is in use, and this is not an IO task, bcast the parameters. */ - if (ios->async_interface) + if (ios->async) { if (!ios->ioproc) { int msg = PIO_MSG_INQ_VAR_FILL; - char fill_mode_present = fill_mode ? true : false; + char no_fill_present = no_fill ? true : false; char fill_value_present = fill_valuep ? true : false; LOG((2, "sending msg type_size = %d", type_size)); @@ -2099,11 +2092,11 @@ int PIOc_inq_var_fill(int ncid, int varid, int *fill_mode, void *fill_valuep) if (!mpierr) mpierr = MPI_Bcast(&type_size, 1, MPI_OFFSET, ios->compmaster, ios->intercomm); if (!mpierr) - mpierr = MPI_Bcast(&fill_mode_present, 1, MPI_CHAR, ios->compmaster, ios->intercomm); + mpierr = MPI_Bcast(&no_fill_present, 1, MPI_CHAR, ios->compmaster, ios->intercomm); if (!mpierr) mpierr = MPI_Bcast(&fill_value_present, 1, MPI_CHAR, ios->compmaster, ios->intercomm); - LOG((2, "PIOc_inq_var_fill ncid = %d varid = %d type_size = %lld fill_mode_present = %d fill_value_present = %d", - ncid, varid, type_size, fill_mode_present, fill_value_present)); + LOG((2, "PIOc_inq_var_fill ncid = %d varid = %d type_size = %lld no_fill_present = %d fill_value_present = %d", + ncid, varid, type_size, no_fill_present, fill_value_present)); } /* Handle MPI errors. */ @@ -2122,22 +2115,22 @@ int PIOc_inq_var_fill(int ncid, int varid, int *fill_mode, void *fill_valuep) /* If this is an IO task, then call the netCDF function. */ if (ios->ioproc) { - LOG((2, "calling inq_var_fill file->iotype = %d file->fh = %d varid = %d fill_mode = %d", - file->iotype, file->fh, varid, fill_mode)); + LOG((2, "calling inq_var_fill file->iotype = %d file->fh = %d varid = %d", + file->iotype, file->fh, varid)); if (file->iotype == PIO_IOTYPE_PNETCDF) { #ifdef _PNETCDF - ierr = ncmpi_inq_var_fill(file->fh, varid, fill_mode, fill_valuep); + ierr = ncmpi_inq_var_fill(file->fh, varid, no_fill, fill_valuep); #endif /* _PNETCDF */ } - else if (file->iotype == PIO_IOTYPE_NETCDF) + else if (file->iotype == PIO_IOTYPE_NETCDF && file->do_io) { /* Get the file-level fill mode. */ - if (fill_mode) + if (no_fill) { - ierr = nc_set_fill(file->fh, NC_NOFILL, fill_mode); + ierr = nc_set_fill(file->fh, NC_NOFILL, no_fill); if (!ierr) - ierr = nc_set_fill(file->fh, *fill_mode, NULL); + ierr = nc_set_fill(file->fh, *no_fill, NULL); } if (!ierr && fill_valuep) @@ -2183,7 +2176,7 @@ int PIOc_inq_var_fill(int ncid, int varid, int *fill_mode, void *fill_valuep) #ifdef _NETCDF4 /* The inq_var_fill is not supported in classic-only builds. */ if (file->do_io) - ierr = nc_inq_var_fill(file->fh, varid, fill_mode, fill_valuep); + ierr = nc_inq_var_fill(file->fh, varid, no_fill, fill_valuep); #endif /* _NETCDF */ } LOG((2, "after call to inq_var_fill, ierr = %d", ierr)); @@ -2196,8 +2189,8 @@ int PIOc_inq_var_fill(int ncid, int varid, int *fill_mode, void *fill_valuep) return check_netcdf(file, ierr, __FILE__, __LINE__); /* Broadcast results to all tasks. Ignore NULL parameters. */ - if (fill_mode) - if ((mpierr = MPI_Bcast(fill_mode, 1, MPI_INT, ios->ioroot, ios->my_comm))) + if (no_fill) + if ((mpierr = MPI_Bcast(no_fill, 1, MPI_INT, ios->ioroot, ios->my_comm))) check_mpi(file, mpierr, __FILE__, __LINE__); if (fill_valuep) if ((mpierr = MPI_Bcast(fill_valuep, type_size, MPI_CHAR, ios->ioroot, ios->my_comm))) diff --git a/src/externals/pio2/src/clib/pio_nc4.c b/src/externals/pio2/src/clib/pio_nc4.c index b68bba41050d..0b1cdd0e6b7e 100644 --- a/src/externals/pio2/src/clib/pio_nc4.c +++ b/src/externals/pio2/src/clib/pio_nc4.c @@ -48,7 +48,7 @@ int PIOc_def_var_deflate(int ncid, int varid, int shuffle, int deflate, return pio_err(ios, file, PIO_ENOTNC4, __FILE__, __LINE__); /* If async is in use, and this is not an IO task, bcast the parameters. */ - if (ios->async_interface) + if (ios->async) { if (!ios->ioproc) { @@ -137,7 +137,7 @@ int PIOc_inq_var_deflate(int ncid, int varid, int *shufflep, int *deflatep, return pio_err(ios, file, PIO_ENOTNC4, __FILE__, __LINE__); /* If async is in use, and this is not an IO task, bcast the parameters. */ - if (ios->async_interface) + if (ios->async) { if (!ios->ioproc) { @@ -251,13 +251,13 @@ int PIOc_def_var_chunking(int ncid, int varid, int storage, const PIO_Offset *ch /* Run this on all tasks if async is not in use, but only on * non-IO tasks if async is in use. Get the number of * dimensions. */ - if (!ios->async_interface || !ios->ioproc) + if (!ios->async || !ios->ioproc) if ((ierr = PIOc_inq_varndims(ncid, varid, &ndims))) return check_netcdf(file, ierr, __FILE__, __LINE__); LOG((2, "PIOc_def_var_chunking first ndims = %d", ndims)); /* If async is in use, and this is not an IO task, bcast the parameters. */ - if (ios->async_interface) + if (ios->async) { if (!ios->ioproc) { @@ -370,7 +370,7 @@ int PIOc_inq_var_chunking(int ncid, int varid, int *storagep, PIO_Offset *chunks /* Run these on all tasks if async is not in use, but only on * non-IO tasks if async is in use. */ - if (!ios->async_interface || !ios->ioproc) + if (!ios->async || !ios->ioproc) { /* Find the number of dimensions of this variable. */ if ((ierr = PIOc_inq_varndims(ncid, varid, &ndims))) @@ -379,7 +379,7 @@ int PIOc_inq_var_chunking(int ncid, int varid, int *storagep, PIO_Offset *chunks } /* If async is in use, and this is not an IO task, bcast the parameters. */ - if (ios->async_interface) + if (ios->async) { if (!ios->ioproc) { @@ -495,7 +495,7 @@ int PIOc_def_var_endian(int ncid, int varid, int endian) return pio_err(ios, file, PIO_ENOTNC4, __FILE__, __LINE__); /* If async is in use, and this is not an IO task, bcast the parameters. */ - if (ios->async_interface) + if (ios->async) { if (!ios->ioproc) { @@ -572,7 +572,7 @@ int PIOc_inq_var_endian(int ncid, int varid, int *endianp) return pio_err(ios, file, PIO_ENOTNC4, __FILE__, __LINE__); /* If async is in use, and this is not an IO task, bcast the parameters. */ - if (ios->async_interface) + if (ios->async) { if (!ios->ioproc) { @@ -663,7 +663,7 @@ int PIOc_set_chunk_cache(int iosysid, int iotype, PIO_Offset size, PIO_Offset ne return pio_err(ios, NULL, PIO_ENOTNC4, __FILE__, __LINE__); /* If async is in use, and this is not an IO task, bcast the parameters. */ - if (ios->async_interface) + if (ios->async) { if (!ios->ioproc) { @@ -761,7 +761,7 @@ int PIOc_get_chunk_cache(int iosysid, int iotype, PIO_Offset *sizep, PIO_Offset return pio_err(ios, NULL, PIO_ENOTNC4, __FILE__, __LINE__); /* If async is in use, and this is not an IO task, bcast the parameters. */ - if (ios->async_interface) + if (ios->async) { if (!ios->ioproc) { @@ -879,7 +879,7 @@ int PIOc_set_var_chunk_cache(int ncid, int varid, PIO_Offset size, PIO_Offset ne return pio_err(ios, file, PIO_ENOTNC4, __FILE__, __LINE__); /* If async is in use, and this is not an IO task, bcast the parameters. */ - if (ios->async_interface) + if (ios->async) { if (!ios->ioproc) { @@ -966,7 +966,7 @@ int PIOc_get_var_chunk_cache(int ncid, int varid, PIO_Offset *sizep, PIO_Offset return pio_err(ios, file, PIO_ENOTNC4, __FILE__, __LINE__); /* If async is in use, and this is not an IO task, bcast the parameters. */ - if (ios->async_interface) + if (ios->async) { if (!ios->ioproc) { diff --git a/src/externals/pio2/src/clib/pio_put_nc.c b/src/externals/pio2/src/clib/pio_put_nc.c index 74279c9871e3..7c74144da3b4 100644 --- a/src/externals/pio2/src/clib/pio_put_nc.c +++ b/src/externals/pio2/src/clib/pio_put_nc.c @@ -860,7 +860,7 @@ int PIOc_put_vara_double(int ncid, int varid, const PIO_Offset *start, */ int PIOc_put_var_text(int ncid, int varid, const char *op) { - return PIOc_put_vars_text(ncid, varid, NULL, NULL, NULL, op); + return PIOc_put_var_tc(ncid, varid, PIO_CHAR, op); } /** @@ -882,7 +882,7 @@ int PIOc_put_var_text(int ncid, int varid, const char *op) */ int PIOc_put_var_uchar(int ncid, int varid, const unsigned char *op) { - return PIOc_put_vars_uchar(ncid, varid, NULL, NULL, NULL, op); + return PIOc_put_var_tc(ncid, varid, PIO_UBYTE, op); } /** @@ -904,7 +904,7 @@ int PIOc_put_var_uchar(int ncid, int varid, const unsigned char *op) */ int PIOc_put_var_schar(int ncid, int varid, const signed char *op) { - return PIOc_put_vars_schar(ncid, varid, NULL, NULL, NULL, op); + return PIOc_put_var_tc(ncid, varid, PIO_BYTE, op); } /** @@ -926,7 +926,7 @@ int PIOc_put_var_schar(int ncid, int varid, const signed char *op) */ int PIOc_put_var_ushort(int ncid, int varid, const unsigned short *op) { - return PIOc_put_vars_tc(ncid, varid, NULL, NULL, NULL, NC_USHORT, op); + return PIOc_put_var_tc(ncid, varid, NC_USHORT, op); } /** @@ -948,7 +948,7 @@ int PIOc_put_var_ushort(int ncid, int varid, const unsigned short *op) */ int PIOc_put_var_short(int ncid, int varid, const short *op) { - return PIOc_put_vars_short(ncid, varid, NULL, NULL, NULL, op); + return PIOc_put_var_tc(ncid, varid, PIO_SHORT, op); } /** @@ -970,7 +970,7 @@ int PIOc_put_var_short(int ncid, int varid, const short *op) */ int PIOc_put_var_uint(int ncid, int varid, const unsigned int *op) { - return PIOc_put_vars_uint(ncid, varid, NULL, NULL, NULL, op); + return PIOc_put_var_tc(ncid, varid, PIO_UINT, op); } /** @@ -992,7 +992,7 @@ int PIOc_put_var_uint(int ncid, int varid, const unsigned int *op) */ int PIOc_put_var_int(int ncid, int varid, const int *op) { - return PIOc_put_vars_int(ncid, varid, NULL, NULL, NULL, op); + return PIOc_put_var_tc(ncid, varid, PIO_INT, op); } /** @@ -1014,7 +1014,7 @@ int PIOc_put_var_int(int ncid, int varid, const int *op) */ int PIOc_put_var_long(int ncid, int varid, const long *op) { - return PIOc_put_vars_long(ncid, varid, NULL, NULL, NULL, op); + return PIOc_put_var_tc(ncid, varid, PIO_LONG_INTERNAL, op); } /** @@ -1036,7 +1036,7 @@ int PIOc_put_var_long(int ncid, int varid, const long *op) */ int PIOc_put_var_float(int ncid, int varid, const float *op) { - return PIOc_put_vars_float(ncid, varid, NULL, NULL, NULL, op); + return PIOc_put_var_tc(ncid, varid, PIO_FLOAT, op); } /** @@ -1058,7 +1058,7 @@ int PIOc_put_var_float(int ncid, int varid, const float *op) */ int PIOc_put_var_ulonglong(int ncid, int varid, const unsigned long long *op) { - return PIOc_put_vars_ulonglong(ncid, varid, NULL, NULL, NULL, op); + return PIOc_put_var_tc(ncid, varid, PIO_UINT64, op); } /** @@ -1080,7 +1080,7 @@ int PIOc_put_var_ulonglong(int ncid, int varid, const unsigned long long *op) */ int PIOc_put_var_longlong(int ncid, int varid, const long long *op) { - return PIOc_put_vars_longlong(ncid, varid, NULL, NULL, NULL, op); + return PIOc_put_var_tc(ncid, varid, PIO_INT64, op); } /** @@ -1102,7 +1102,7 @@ int PIOc_put_var_longlong(int ncid, int varid, const long long *op) */ int PIOc_put_var_double(int ncid, int varid, const double *op) { - return PIOc_put_vars_double(ncid, varid, NULL, NULL, NULL, op); + return PIOc_put_var_tc(ncid, varid, PIO_DOUBLE, op); } /** @@ -1118,7 +1118,7 @@ int PIOc_put_var_double(int ncid, int varid, const double *op) */ int PIOc_put_var(int ncid, int varid, const void *op) { - return PIOc_put_vars_tc(ncid, varid, NULL, NULL, NULL, NC_NAT, op); + return PIOc_put_var_tc(ncid, varid, NC_NAT, op); } /** diff --git a/src/externals/pio2/src/clib/pio_rearrange.c b/src/externals/pio2/src/clib/pio_rearrange.c index 458e7e3844ff..74b27d45b06e 100644 --- a/src/externals/pio2/src/clib/pio_rearrange.c +++ b/src/externals/pio2/src/clib/pio_rearrange.c @@ -8,50 +8,40 @@ #include /** - * Internal library util function to initialize rearranger options. + * Convert a 1-D index into a coordinate value in an arbitrary + * dimension space. E.g., for index 4 into a array defined as a[3][2], + * will return 2,0. * - * @param iosys pointer to iosystem descriptor - */ -void init_rearr_opts(iosystem_desc_t *iosys) -{ - /* The old default for max pending requests was 64 - we no longer use it*/ - - /* Disable handshake /isend and set max_pend_req = 0 to turn of throttling */ - const rearr_comm_fc_opt_t def_coll_comm_fc_opts = { false, false, 0 }; - - assert(iosys); - - /* Default to coll - i.e., no flow control */ - iosys->rearr_opts.comm_type = PIO_REARR_COMM_COLL; - iosys->rearr_opts.fcd = PIO_REARR_COMM_FC_2D_DISABLE; - iosys->rearr_opts.comm_fc_opts_comp2io = def_coll_comm_fc_opts; - iosys->rearr_opts.comm_fc_opts_io2comp = def_coll_comm_fc_opts; -} - -/** - * Convert an index into a list of dimensions. E.g., for index 4 into a - * array defined as a[3][2], will return 1 1. + * Sometimes (from box_rearranger_create()) this function is called + * with -1 for idx. Not clear if this makes sense. * - * @param ndims number of dimensions - * @param gdims - * @param idx - * @param dim_list - * @returns 0 on success, error code otherwise. + * @param ndims number of dimensions. + * @param gdimlen array of length ndims with the dimension sizes. + * @param idx the index to convert. This is the index into a 1-D array + * of data. + * @param dim_list array of length ndims that will get the dimensions + * corresponding to this index. */ -void idx_to_dim_list(int ndims, const int *gdims, PIO_Offset idx, +void idx_to_dim_list(int ndims, const int *gdimlen, PIO_Offset idx, PIO_Offset *dim_list) { - int i, curr_idx, next_idx; - curr_idx = idx; + /* Check inputs. */ + pioassert(ndims >= 0 && gdimlen && idx >= -1 && dim_list, "invalid input", + __FILE__, __LINE__); + LOG((2, "idx_to_dim_list ndims = %d idx = %d", ndims, idx)); /* Easiest to start from the right and move left. */ - for (i = ndims - 1; i >= 0; --i) + for (int i = ndims - 1; i >= 0; --i) { + int next_idx; + /* This way of doing div/mod is slightly faster than using "/" * and "%". */ - next_idx = curr_idx / gdims[i]; - dim_list[i] = curr_idx - (next_idx * gdims[i]); - curr_idx = next_idx; + next_idx = idx / gdimlen[i]; + dim_list[i] = idx - (next_idx * gdimlen[i]); + LOG((3, "next_idx = %d idx = %d gdimlen[%d] = %d dim_list[%d] = %d", + next_idx, idx, i, gdimlen[i], i, dim_list[i])); + idx = next_idx; } } @@ -64,30 +54,34 @@ void idx_to_dim_list(int ndims, const int *gdims, PIO_Offset idx, * outermost dimension, until the region has been expanded as much as * possible along all dimensions. * - * @param dim - * @param gdims array of dimension ids - * @param maplen the length of the map - * @param map - * @param region_size - * @param region_stride - * @param max_size array of maximum sizes - * @param count array of counts - * @returns 0 on success, error code otherwise. + * Precondition: maplen >= region_size (thus loop runs at least + * once). + * + * @param dim the dimension number to start with. + * @param gdimlen array of global dimension lengths. + * @param maplen the length of the map. + * @param map array (length maplen) with the the 1-based compmap. + * @param region_size ??? + * @param region_stride amount incremented along dimension. + * @param max_size array of size dim + 1 that contains the maximum + * sizes along that dimension. + * @param count array of size dim + 1 that gets the new counts. */ -void expand_region(int dim, const int *gdims, int maplen, const PIO_Offset *map, +void expand_region(int dim, const int *gdimlen, int maplen, const PIO_Offset *map, int region_size, int region_stride, const int *max_size, PIO_Offset *count) { - int i, j, test_idx, expansion_done; - /* Precondition: maplen >= region_size (thus loop runs at least - * once). */ - /* Flag used to signal that we can no longer expand the region along dimension dim. */ - expansion_done = 0; + int expansion_done = 0; + + /* Check inputs. */ + pioassert(dim >= 0 && gdimlen && maplen >= 0 && map && region_size >= 0 && + maplen >= region_size && region_stride >= 0 && max_size && count, + "invalid input", __FILE__, __LINE__); /* Expand no greater than max_size along this dimension. */ - for (i = 1; i <= max_size[dim]; ++i) + for (int i = 1; i <= max_size[dim]; ++i) { /* Count so far is at least i. */ count[dim] = i; @@ -97,8 +91,10 @@ void expand_region(int dim, const int *gdims, int maplen, const PIO_Offset *map, Assuming monotonicity in the map, we could skip this for the innermost dimension, but it's necessary past that because the region does not necessarily comprise contiguous values. */ - for (j = 0; j < region_size; ++j) + for (int j = 0; j < region_size; j++) { + int test_idx; /* Index we are testing. */ + test_idx = j + i * region_size; /* If we have exhausted the map, or the map no longer matches, @@ -116,65 +112,91 @@ void expand_region(int dim, const int *gdims, int maplen, const PIO_Offset *map, /* Move on to next outermost dimension if there are more left, * else return. */ if (dim > 0) - expand_region(dim-1, gdims, maplen, map, region_size * count[dim], - region_stride * gdims[dim], max_size, count); + expand_region(dim - 1, gdimlen, maplen, map, region_size * count[dim], + region_stride * gdimlen[dim], max_size, count); } /** - * Set start and count so that they describe the first region in map. + * Set start and count so that they describe the first region in + * map. * - * @param ndims the number of dimensions - * @param gdims pointer to an array of dimension ids - * @param maplen the length of the map + * This function is used when creating the subset rearranger (it + * is not used for the box rearranger). It is called by get_regions(). + * + * Preconditions: + *
    + *
  • ndims is > 0 + *
  • maplen is > 0 + *
  • All elements of map are inside the bounds specified by gdimlen. + *
+ * + * Note that the map array is 1 based, but calculations are 0 based. + * + * @param ndims the number of dimensions. + * @param gdimlen an array length ndims with the sizes of the global + * dimensions. + * @param maplen the length of the map. * @param map - * @param start array of start indicies - * @param count array of counts - * @returns 0 on success, error code otherwise. + * @param start array (length ndims) that will get start indicies of + * found region. + * @param count array (length ndims) that will get counts of found + * region. + * @returns length of the region found. */ -PIO_Offset find_region(int ndims, const int *gdims, int maplen, const PIO_Offset *map, +PIO_Offset find_region(int ndims, const int *gdimlen, int maplen, const PIO_Offset *map, PIO_Offset *start, PIO_Offset *count) { - int dim; - int max_size[ndims]; PIO_Offset regionlen = 1; - /* Preconditions (which might be useful to check/assert): - ndims is > 0 - maplen is > 0 - all elements of map are inside the bounds specified by gdims - The map array is 1 based, but calculations are 0 based */ - idx_to_dim_list(ndims, gdims, map[0] - 1, start); + /* Check inputs. */ + pioassert(ndims > 0 && gdimlen && maplen > 0 && map && start && count, + "invalid input", __FILE__, __LINE__); + LOG((2, "find_region ndims = %d maplen = %d", ndims, maplen)); + + int max_size[ndims]; + + /* Convert the index which is the first element of map into global + * data space. */ + idx_to_dim_list(ndims, gdimlen, map[0] - 1, start); - /* Can't expand beyond the array edge.*/ - for (dim = 0; dim < ndims; ++dim) - max_size[dim] = gdims[dim] - start[dim]; + /* Can't expand beyond the array edge. Set up max_size array for + * expand_region call below. */ + for (int dim = 0; dim < ndims; ++dim) + { + max_size[dim] = gdimlen[dim] - start[dim]; + LOG((3, "max_size[%d] = %d", max_size[dim])); + } /* For each dimension, figure out how far we can expand in that dimension while staying contiguous in the input array. Start with the innermost dimension (ndims-1), and it will recurse through to the outermost dimensions. */ - expand_region(ndims - 1, gdims, maplen, map, 1, 1, max_size, count); + expand_region(ndims - 1, gdimlen, maplen, map, 1, 1, max_size, count); - for (dim = 0; dim < ndims; dim++) + /* Calculate the number of data elements in this region. */ + for (int dim = 0; dim < ndims; dim++) regionlen *= count[dim]; - return(regionlen); + return regionlen; } /** * Convert a global coordinate value into a local array index. * - * @param ndims the number of dimensions - * @param lcoord pointer to an offset - * @param count array of counts - * @returns 0 on success, error code otherwise. + * @param ndims the number of dimensions. + * @param lcoord pointer to an offset. + * @param count array of counts. + * @returns the local array index. */ PIO_Offset coord_to_lindex(int ndims, const PIO_Offset *lcoord, const PIO_Offset *count) { PIO_Offset lindex = 0; PIO_Offset stride = 1; + /* Check inputs. */ + pioassert(ndims > 0 && lcoord && count, "invalid input", __FILE__, __LINE__); + for (int i = ndims - 1; i >= 0; i--) { lindex += lcoord[i] * stride; @@ -184,7 +206,10 @@ PIO_Offset coord_to_lindex(int ndims, const PIO_Offset *lcoord, const PIO_Offset } /** - * Compute the max io buffersize needed for a given variable + * Compute the max io buffer size needed for an iodesc. It is the + * combined size (in number of data elements) of all the regions of + * data stored in the buffer of this iodesc. The max size is then set + * in the iodesc. * * @param io_comm the IO communicator * @param iodesc a pointer to the io_desc_t struct. @@ -192,38 +217,34 @@ PIO_Offset coord_to_lindex(int ndims, const PIO_Offset *lcoord, const PIO_Offset */ int compute_maxIObuffersize(MPI_Comm io_comm, io_desc_t *iodesc) { - PIO_Offset iosize, totiosize; - int i; - io_region *region; + PIO_Offset totiosize = 0; int mpierr; /* Return code from MPI calls. */ - assert(iodesc); + pioassert(iodesc, "need iodesc", __FILE__, __LINE__); /* compute the max io buffer size, for conveneance it is the * combined size of all regions */ - totiosize = 0; - region = iodesc->firstregion; - while(region) + for (io_region *region = iodesc->firstregion; region; region = region->next) { if (region->count[0] > 0) { - iosize = 1; - for (i = 0; i < iodesc->ndims; i++) + PIO_Offset iosize = 1; + for (int i = 0; i < iodesc->ndims; i++) iosize *= region->count[i]; totiosize += iosize; } - region = region->next; } + LOG((2, "compute_maxIObuffersize got totiosize = %lld", totiosize)); /* Share the max io buffer size with all io tasks. */ if ((mpierr = MPI_Allreduce(MPI_IN_PLACE, &totiosize, 1, MPI_OFFSET, MPI_MAX, io_comm))) return check_mpi(NULL, mpierr, __FILE__, __LINE__); + pioassert(totiosize > 0, "totiosize <= 0", __FILE__, __LINE__); + LOG((2, "after allreduce compute_maxIObuffersize got totiosize = %lld", totiosize)); + /* Remember the result. */ iodesc->maxiobuflen = totiosize; - if (iodesc->maxiobuflen <= 0) - return pio_err(NULL, NULL, PIO_EINVAL, __FILE__, __LINE__); - return PIO_NOERR; } @@ -231,41 +252,47 @@ int compute_maxIObuffersize(MPI_Comm io_comm, io_desc_t *iodesc) * Create the derived MPI datatypes used for comp2io and io2comp * transfers. Used in define_iodesc_datatypes(). * - * @param basetype The type of data (int,real,double). - * @param msgcnt The number of MPI messages/tasks to use. - * @param dlen The length of the data array. - * @param mindex An array of indexes into the data array from the comp - * map - * @param mcount The number of indexes to be put on each mpi - * message/task + * @param basetype The MPI type of data (MPI_INT, etc.). + * @param msgcnt This is the number of MPI types that are created. + * @param mindex An array (length numinds) of indexes into the data + * array from the comp map. Will be NULL when count is zero. + * @param mcount An array (length msgcnt) with the number of indexes + * to be put on each mpi message/task. * @param mfrom A pointer to the previous structure in the read/write - * list - * @param mtype The final data structure sent through MPI to the - * read/write + * list. This is always NULL for the BOX rearranger. + * @param mtype pointer to an array (length msgcnt) which gets the + * created datatypes. Will be NULL when iodesc->nrecvs == 0. * @returns 0 on success, error code otherwise. */ -int create_mpi_datatypes(MPI_Datatype basetype, int msgcnt, PIO_Offset dlen, +int create_mpi_datatypes(MPI_Datatype basetype, int msgcnt, const PIO_Offset *mindex, const int *mcount, int *mfrom, MPI_Datatype *mtype) { - PIO_Offset bsizeT[msgcnt]; int blocksize; int numinds = 0; PIO_Offset *lindex = NULL; int mpierr; /* Return code from MPI functions. */ + /* Check inputs. */ + pioassert(msgcnt > 0 && mcount, "invalid input", __FILE__, __LINE__); + + PIO_Offset bsizeT[msgcnt]; + + LOG((1, "create_mpi_datatypes basetype = %d msgcnt = %d", basetype, msgcnt)); + LOG((2, "MPI_BYTE = %d MPI_CHAR = %d MPI_SHORT = %d MPI_INT = %d MPI_DOUBLE = %d", + MPI_BYTE, MPI_CHAR, MPI_SHORT, MPI_INT, MPI_DOUBLE)); + + /* How many indicies in the array? */ for (int j = 0; j < msgcnt; j++) numinds += mcount[j]; - - pioassert(dlen >= 0, "dlen < 0", __FILE__, __LINE__); - pioassert(numinds >= 0, "num inds < 0", __FILE__, __LINE__); + LOG((2, "numinds = %d", numinds)); if (mindex) { - /* memcpy(lindex, mindex, (size_t) (dlen*sizeof(PIO_Offset)));*/ if (!(lindex = malloc(numinds * sizeof(PIO_Offset)))) - return PIO_ENOMEM; + return pio_err(NULL, NULL, PIO_ENOMEM, __FILE__, __LINE__); memcpy(lindex, mindex, (size_t)(numinds * sizeof(PIO_Offset))); + LOG((3, "allocated lindex, copied mindex")); } bsizeT[0] = 0; @@ -273,82 +300,100 @@ int create_mpi_datatypes(MPI_Datatype basetype, int msgcnt, PIO_Offset dlen, int pos = 0; int ii = 0; - /* If there are no messages don't need to create any datatypes. */ - if (msgcnt > 0) + if (mfrom == NULL) { - if (mfrom == NULL) + LOG((3, "mfrom is NULL")); + for (int i = 0; i < msgcnt; i++) { - for (int i = 0; i < msgcnt; i++) + if (mcount[i] > 0) { - if (mcount[i] > 0) - { - bsizeT[ii] = GCDblocksize(mcount[i], lindex+pos); - ii++; - pos += mcount[i]; - } + /* Look for the largest block of data for io which + * can be expressed in terms of start and + * count. */ + bsizeT[ii] = GCDblocksize(mcount[i], lindex + pos); + ii++; + pos += mcount[i]; } - blocksize = (int)lgcd_array(ii ,bsizeT); - } - else - { - blocksize=1; } + blocksize = (int)lgcd_array(ii, bsizeT); + } + else + { + blocksize = 1; + } + LOG((3, "blocksize = %d", blocksize)); - /* pos is an index to the start of each message block. */ - pos = 0; - for (int i = 0; i < msgcnt; i++) + /* pos is an index to the start of each message block. */ + pos = 0; + for (int i = 0; i < msgcnt; i++) + { + if (mcount[i] > 0) { - if (mcount[i] > 0) + int len = mcount[i] / blocksize; + int displace[len]; + if (blocksize == 1) { - int len = mcount[i] / blocksize; - int displace[len]; - if (blocksize == 1) + if (!mfrom) { - if (!mfrom) - { - for (int j = 0; j < len; j++) - displace[j] = (int)(lindex[pos + j]); - } - else - { - int k = 0; - for (int j = 0; j < numinds; j++) - if (mfrom[j] == i) - displace[k++] = (int)(lindex[j]); - } - + for (int j = 0; j < len; j++) + displace[j] = (int)(lindex[pos + j]); } else { - for (int j = 0; j < mcount[i]; j++) - (lindex + pos)[j]++; - - for (int j = 0; j < len; j++) - displace[j] = ((lindex + pos)[j * blocksize] - 1); + int k = 0; + for (int j = 0; j < numinds; j++) + if (mfrom[j] == i) + displace[k++] = (int)(lindex[j]); } - if ((mpierr = MPI_Type_create_indexed_block(len, blocksize, displace, basetype, mtype + i))) - return check_mpi(NULL, mpierr, __FILE__, __LINE__); - - if (mtype[i] == PIO_DATATYPE_NULL) - return pio_err(NULL, NULL, PIO_EINVAL, __FILE__, __LINE__); + } + else + { + for (int j = 0; j < mcount[i]; j++) + (lindex + pos)[j]++; - /* Commit the MPI data type. */ - if ((mpierr = MPI_Type_commit(mtype + i))) - return check_mpi(NULL, mpierr, __FILE__, __LINE__); - pos += mcount[i]; + for (int j = 0; j < len; j++) + displace[j] = ((lindex + pos)[j * blocksize] - 1); } + +#if PIO_ENABLE_LOGGING + for (int j = 0; j < len; j++) + LOG((3, "displace[%d] = %d", j, displace[j])); +#endif /* PIO_ENABLE_LOGGING */ + + LOG((3, "calling MPI_Type_create_indexed_block len = %d blocksize = %d " + "basetype = %d", len, blocksize, basetype)); + /* Create an indexed datatype with constant-sized blocks. */ + if ((mpierr = MPI_Type_create_indexed_block(len, blocksize, displace, + basetype, &mtype[i]))) + return check_mpi(NULL, mpierr, __FILE__, __LINE__); + + if (mtype[i] == PIO_DATATYPE_NULL) + return pio_err(NULL, NULL, PIO_EINVAL, __FILE__, __LINE__); + + /* Commit the MPI data type. */ + LOG((3, "about to commit type")); + if ((mpierr = MPI_Type_commit(&mtype[i]))) + return check_mpi(NULL, mpierr, __FILE__, __LINE__); + pos += mcount[i]; } - if (lindex) - free(lindex); } + /* Free resources. */ + if (lindex) + free(lindex); + + LOG((3, "done with create_mpi_datatypes()")); return PIO_NOERR; } /** - * Create the derived MPI datatypes used for comp2io and io2comp - * transfers. + * If needed, create the derived MPI datatypes used for comp2io and + * io2comp transfers. + * + * If iodesc->stype and iodesc->rtype arrays already exist, this + * function does nothing. This function is called from + * rearrange_io2comp() and rearrange_comp2io(). * * NOTE from Jim: I am always oriented toward write so recieve * always means io tasks and send always means comp tasks. The @@ -366,142 +411,140 @@ int define_iodesc_datatypes(iosystem_desc_t *ios, io_desc_t *iodesc) { int ret; /* Return value. */ + pioassert(ios && iodesc, "invalid input", __FILE__, __LINE__); + LOG((1, "define_iodesc_datatypes ios->ioproc = %d", ios->ioproc)); + /* Set up the to transfer data to and from the IO tasks. */ if (ios->ioproc) { + /* If the types for the IO tasks have not been created, then + * create them. */ if (!iodesc->rtype) { if (iodesc->nrecvs > 0) { - /* Allocate memory for array of data. */ + /* Allocate memory for array of MPI types for the IO tasks. */ if (!(iodesc->rtype = malloc(iodesc->nrecvs * sizeof(MPI_Datatype)))) return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); + LOG((2, "allocated memory for IO task MPI types iodesc->nrecvs = %d " + "iodesc->rearranger = %d", iodesc->nrecvs, iodesc->rearranger)); /* Initialize data types to NULL. */ for (int i = 0; i < iodesc->nrecvs; i++) iodesc->rtype[i] = PIO_DATATYPE_NULL; - /* Create the datatypes, which will be used both to - * receive and to send data. */ - if (iodesc->rearranger == PIO_REARR_SUBSET) - { - if ((ret = create_mpi_datatypes(iodesc->basetype, iodesc->nrecvs, iodesc->llen, - iodesc->rindex, iodesc->rcount, iodesc->rfrom, - iodesc->rtype))) - return pio_err(ios, NULL, ret, __FILE__, __LINE__); - } - else - { - if ((ret = create_mpi_datatypes(iodesc->basetype, iodesc->nrecvs, iodesc->llen, - iodesc->rindex, iodesc->rcount, NULL, iodesc->rtype))) - return pio_err(ios, NULL, ret, __FILE__, __LINE__); - } + /* The different rearrangers get different values for mfrom. */ + int *mfrom = iodesc->rearranger == PIO_REARR_SUBSET ? iodesc->rfrom : NULL; + + /* Create the MPI datatypes. */ + if ((ret = create_mpi_datatypes(iodesc->basetype, iodesc->nrecvs, iodesc->rindex, + iodesc->rcount, mfrom, iodesc->rtype))) + return pio_err(ios, NULL, ret, __FILE__, __LINE__); } } } - /* Define the send datatypes if they don't exist. */ + /* Define the datatypes for the computation components if they + * don't exist. (These will be the send side in a write + * operation.) */ if (!iodesc->stype) { int ntypes; - int ncnt; - if (iodesc->rearranger == PIO_REARR_SUBSET) - { - /* Subset rearranger gets one type. */ - ntypes = 1; - ncnt = iodesc->scount[0]; - } - else - { - /* Box rearranger gets one type per IO task. */ - ntypes = ios->num_iotasks; - ncnt = iodesc->ndof; - } + /* Subset rearranger gets one type; box rearranger gets one + * type per IO task. */ + ntypes = iodesc->rearranger == PIO_REARR_SUBSET ? 1 : ios->num_iotasks; - /* Allocate memory for array of send types. */ + /* Allocate memory for array of MPI types for the computation tasks. */ if (!(iodesc->stype = malloc(ntypes * sizeof(MPI_Datatype)))) return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); + LOG((3, "allocated memory for computation MPI types ntypes = %d", ntypes)); /* Initialize send types to NULL. */ for (int i = 0; i < ntypes; i++) iodesc->stype[i] = PIO_DATATYPE_NULL; + /* Remember how many types we created for the send side. */ iodesc->num_stypes = ntypes; /* Create the MPI data types. */ - if ((ret = create_mpi_datatypes(iodesc->basetype, ntypes, ncnt, iodesc->sindex, + LOG((3, "about to call create_mpi_datatypes for computation MPI types")); + if ((ret = create_mpi_datatypes(iodesc->basetype, ntypes, iodesc->sindex, iodesc->scount, NULL, iodesc->stype))) return pio_err(ios, NULL, ret, __FILE__, __LINE__); } + LOG((3, "done with define_iodesc_datatypes()")); return PIO_NOERR; } /** - * Completes the mapping for the box rearranger. + * Completes the mapping for the box rearranger. This function is + * called from box_rearrange_create(). It is not used for the subset + * rearranger. + * + * This function: + *
    + *
  • Allocates and inits iodesc->scount, an array (length + * ios->num_iotasks) containing number of data elements sent to each + * IO task from current compute task. + *
  • Uses pio_swapm() to send iodesc->scount array from each + * computation task to all IO tasks. + *
  • On IO tasks, allocates and inits iodesc->rcount and + * iodesc->rfrom arrays (length max(1, nrecvs)) which holds the amount + * of data to expect from each compute task and the rank of that + * task. . + *
  • Allocates and inits iodesc->sindex arrays (length iodesc->ndof) + * which holds indecies for computation tasks. + *
  • On IO tasks, allocates and init iodesc->rindex (length + * totalrecv) with indices of the data to be sent/received from this + * io task to each compute task. + *
  • Uses pio_swapm() to send list of indicies on each compute task + * to the IO tasks. + *
* - * @param ios pointer to the iosystem_desc_t struct + * @param ios pointer to the iosystem_desc_t struct. * @param iodesc a pointer to the io_desc_t struct. - * @param maplen the length of the map - * @param dest_ioproc an array of IO task numbers - * @param dest_ioindex - * @param mycomm an MPI communicator + * @param dest_ioproc an array (length maplen) of IO task numbers. + * @param dest_ioindex an array (length maplen) of IO indicies. * @returns 0 on success, error code otherwise. */ -int compute_counts(iosystem_desc_t *ios, io_desc_t *iodesc, int maplen, - const int *dest_ioproc, const PIO_Offset *dest_ioindex, MPI_Comm mycomm) +int compute_counts(iosystem_desc_t *ios, io_desc_t *iodesc, + const int *dest_ioproc, const PIO_Offset *dest_ioindex) { - int i; - int iorank; - int rank; - int ntasks; - int mpierr; /* Return call from MPI functions. */ - - /* Find size of communicator, and task rank. */ - if ((mpierr = MPI_Comm_rank(mycomm, &rank))) - return check_mpi(NULL, mpierr, __FILE__, __LINE__); - if ((mpierr = MPI_Comm_size(mycomm, &ntasks))) - return check_mpi(NULL, mpierr, __FILE__, __LINE__); - - MPI_Datatype sr_types[ntasks]; - int send_counts[ntasks]; - int send_displs[ntasks]; - int recv_counts[ntasks]; - int recv_displs[ntasks]; int *recv_buf = NULL; - int nrecvs; + int nrecvs = 0; int ierr; - int io_comprank; - int ioindex; - int tsize; - int numiotasks; - PIO_Offset s2rindex[iodesc->ndof]; - pioassert(iodesc, "iodesc must be provided", __FILE__, __LINE__); + /* Check inputs. */ + pioassert(ios && iodesc && dest_ioproc && dest_ioindex && + iodesc->rearranger == PIO_REARR_BOX && ios->num_uniontasks > 0, + "invalid input", __FILE__, __LINE__); + LOG((1, "compute_counts ios->num_uniontasks = %d", ios->num_uniontasks)); - /* Subset rearranger always gets 1 IO task. */ - if (iodesc->rearranger == PIO_REARR_BOX) - numiotasks = ios->num_iotasks; - else - numiotasks = 1; + /* Arrays for swapm all to all gather calls. */ + MPI_Datatype sr_types[ios->num_uniontasks]; + int send_counts[ios->num_uniontasks]; + int send_displs[ios->num_uniontasks]; + int recv_counts[ios->num_uniontasks]; + int recv_displs[ios->num_uniontasks]; - /* Allocate memory for the array of counts. */ - if (!(iodesc->scount = malloc(numiotasks * sizeof(int)))) - return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); + /* The list of indeces on each compute task */ + PIO_Offset s2rindex[iodesc->ndof]; - /* Initialize counts to zero. */ - for (i = 0; i < numiotasks; i++) - iodesc->scount[i] = 0; + /* Allocate memory for the array of counts and init to zero. */ + if (!(iodesc->scount = calloc(ios->num_iotasks, sizeof(int)))) + return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); - /* iodesc->scount is the amount of data sent to each task from the - * current task */ - for (i = 0; i < maplen; i++) + /* iodesc->scount is the number of data elements sent to each IO + * task from the current compute task. dest_ioindex[i] may be + * -1. */ + for (int i = 0; i < iodesc->ndof; i++) if (dest_ioindex[i] >= 0) (iodesc->scount[dest_ioproc[i]])++; - /* Initialize arrays. */ - for (i = 0; i < ntasks; i++) + /* Initialize arrays used in swapm call. */ + for (int i = 0; i < ios->num_uniontasks; i++) { send_counts[i] = 0; send_displs[i] = 0; @@ -510,66 +553,74 @@ int compute_counts(iosystem_desc_t *ios, io_desc_t *iodesc, int maplen, sr_types[i] = MPI_INT; } - /* ??? */ - for (i = 0; i < numiotasks; i++) + /* Setup for the swapm call. iodesc->scount is the amount of data + * this compute task will transfer to/from each iotask. For the + * box rearranger there can be more than one IO task per compute + * task. This provides enough information to know the size of data + * on the iotask, so at line 557 we allocate arrays to hold the + * map on the iotasks. iodesc->rcount is an array of the amount of + * data to expect from each compute task and iodesc->rfrom is the + * rank of that task. */ + if (ios->compproc) { - int io_comprank; - if (iodesc->rearranger == PIO_REARR_SUBSET) - io_comprank = 0; - else - io_comprank = ios->ioranks[i]; - send_counts[io_comprank] = 1; - send_displs[io_comprank] = i * sizeof(int); + for (int i = 0; i < ios->num_iotasks; i++) + { + send_counts[ios->ioranks[i]] = 1; + send_displs[ios->ioranks[i]] = i * sizeof(int); + LOG((3, "send_counts[%d] = %d send_displs[%d] = %d", ios->ioranks[i], + send_counts[ios->ioranks[i]], ios->ioranks[i], send_displs[ios->ioranks[i]])); + } } - - /* ??? */ + + /* IO tasks need to know how many data elements they will receive + * from each compute task. Allocate space for that, and set up + * swapm call. */ if (ios->ioproc) { - /* Allocate memory to hold array of tasks that have recieved - * data ??? */ - if (!(recv_buf = calloc(ntasks, sizeof(int)))) + /* Allocate memory to hold array of the scounts from all + * computation tasks. */ + if (!(recv_buf = calloc(ios->num_comptasks, sizeof(int)))) return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); - /* Initialize arrays that keep track of receives. */ - for (i = 0; i < ntasks; i++) + /* Initialize arrays that keep track of ???. */ + for (int i = 0; i < ios->num_comptasks; i++) { - recv_counts[i] = 1; - recv_displs[i] = i * sizeof(int); + recv_counts[ios->compranks[i]] = 1; + recv_displs[ios->compranks[i]] = i * sizeof(int); } } - /* Share the iodesc->scount from each compute task to all io tasks. */ - ierr = pio_swapm(iodesc->scount, send_counts, send_displs, sr_types, - recv_buf, recv_counts, recv_displs, sr_types, - mycomm, - iodesc->rearr_opts.comm_fc_opts_comp2io.enable_hs, - iodesc->rearr_opts.comm_fc_opts_comp2io.enable_isend, - iodesc->rearr_opts.comm_fc_opts_comp2io.max_pend_req); + LOG((2, "about to share scount from each compute task to all IO tasks.")); + /* Share the iodesc->scount from each compute task to all IO + * tasks. The scounts will end up in array recv_buf. */ + if ((ierr = pio_swapm(iodesc->scount, send_counts, send_displs, sr_types, + recv_buf, recv_counts, recv_displs, sr_types, ios->union_comm, + &iodesc->rearr_opts.comp2io))) + return pio_err(ios, NULL, ierr, __FILE__, __LINE__); - /* ??? */ - nrecvs = 0; + /* On IO tasks, set up data receives. */ if (ios->ioproc) { - for (i = 0; i < ntasks; i++) + /* Count the number of non-zero scounts from the compute + * tasks. */ + for (int i = 0; i < ios->num_comptasks; i++) + { if (recv_buf[i] != 0) nrecvs++; + LOG((3, "recv_buf[%d] = %d", i, recv_buf[i])); + } /* Get memory to hold the count of data receives. */ - if (!(iodesc->rcount = malloc(max(1, nrecvs) * sizeof(int)))) + if (!(iodesc->rcount = calloc(max(1, nrecvs), sizeof(int)))) return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); /* Get memory to hold the list of task data was from. */ - if (!(iodesc->rfrom = malloc(max(1, nrecvs) * sizeof(int)))) + if (!(iodesc->rfrom = calloc(max(1, nrecvs), sizeof(int)))) return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); - - for (i = 0; i < max(1, nrecvs); i++) - { - iodesc->rcount[i] = 0; - iodesc->rfrom[i] = 0; - } + LOG((3, "allocared rfrom max(1, nrecvs) = %d", max(1, nrecvs))); nrecvs = 0; - for (i = 0; i < ntasks; i++) + for (int i = 0; i < ios->num_comptasks; i++) { if (recv_buf[i] != 0) { @@ -583,39 +634,50 @@ int compute_counts(iosystem_desc_t *ios, io_desc_t *iodesc, int maplen, /* ??? */ iodesc->nrecvs = nrecvs; + LOG((3, "iodesc->nrecvs = %d", iodesc->nrecvs)); + + /* Allocate an array for indicies on the computation tasks (the + * send side when writing). */ if (iodesc->sindex == NULL && iodesc->ndof > 0) if (!(iodesc->sindex = malloc(iodesc->ndof * sizeof(PIO_Offset)))) return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); + LOG((2, "iodesc->ndof = %d", iodesc->ndof)); - int tempcount[numiotasks]; - int spos[numiotasks]; + int tempcount[ios->num_iotasks]; + int spos[ios->num_iotasks]; /* ??? */ spos[0] = 0; tempcount[0] = 0; - for (i = 1; i < numiotasks; i++) + for (int i = 1; i < ios->num_iotasks; i++) { spos[i] = spos[i - 1] + iodesc->scount[i - 1]; tempcount[i] = 0; + LOG((3, "spos[%d] = %d tempcount[%d] = %d", i, spos[i], i, tempcount[i])); } - for (i = 0; i < maplen; i++) + /* ??? */ + for (int i = 0; i < iodesc->ndof; i++) { + int iorank; + int ioindex; + iorank = dest_ioproc[i]; ioindex = dest_ioindex[i]; if (iorank > -1) { /* this should be moved to create_box */ - if (iodesc->rearranger == PIO_REARR_BOX) - iodesc->sindex[spos[iorank] + tempcount[iorank]] = i; + iodesc->sindex[spos[iorank] + tempcount[iorank]] = i; s2rindex[spos[iorank] + tempcount[iorank]] = ioindex; (tempcount[iorank])++; + LOG((3, "iorank = %d ioindex = %d tempcount[iorank] = %d", iorank, ioindex, + tempcount[iorank])); } } /* Initialize arrays to zeros. */ - for (i = 0; i < ntasks; i++) + for (int i = 0; i < ios->num_uniontasks; i++) { send_counts[i] = 0; send_displs[i] = 0; @@ -623,97 +685,83 @@ int compute_counts(iosystem_desc_t *ios, io_desc_t *iodesc, int maplen, recv_displs[i] = 0; } - /* Find the size of the offset type. */ - if ((mpierr = MPI_Type_size(MPI_OFFSET, &tsize))) - return check_mpi(NULL, mpierr, __FILE__, __LINE__); - - for (i = 0; i < ntasks; i++) - sr_types[i] = MPI_OFFSET; - - for (i = 0; i < numiotasks; i++) + /* ??? */ + for (int i = 0; i < ios->num_iotasks; i++) { - if (iodesc->rearranger == PIO_REARR_BOX) - io_comprank = ios->ioranks[i]; - else - io_comprank = 0; - - send_counts[io_comprank] = iodesc->scount[i]; - if (send_counts[io_comprank] > 0) - send_displs[io_comprank] = spos[i] * tsize; + /* Subset rearranger needs one type, box rearranger needs one for + * each IO task. */ + send_counts[ios->ioranks[i]] = iodesc->scount[i]; + if (send_counts[ios->ioranks[i]] > 0) + send_displs[ios->ioranks[i]] = spos[i] * SIZEOF_MPI_OFFSET; + LOG((3, "ios->ioranks[i] = %d iodesc->scount[%d] = %d spos[%d] = %d", + ios->ioranks[i], i, iodesc->scount[i], i, spos[i])); } + /* Only do this on IO tasks. */ if (ios->ioproc) { int totalrecv = 0; - for (i = 0; i < nrecvs; i++) + for (int i = 0; i < nrecvs; i++) { recv_counts[iodesc->rfrom[i]] = iodesc->rcount[i]; - totalrecv+=iodesc->rcount[i]; + totalrecv += iodesc->rcount[i]; } - recv_displs[0] = 0; - for (i = 1; i < nrecvs; i++) - recv_displs[iodesc->rfrom[i]] = recv_displs[iodesc->rfrom[i-1]] + iodesc->rcount[i-1] * tsize; + recv_displs[0] = 0; + for (int i = 1; i < nrecvs; i++) + { + recv_displs[iodesc->rfrom[i]] = recv_displs[iodesc->rfrom[i - 1]] + + iodesc->rcount[i - 1] * SIZEOF_MPI_OFFSET; + LOG((3, "iodesc->rfrom[%d] = %d recv_displs[iodesc->rfrom[i]] = %d", i, + iodesc->rfrom[i], recv_displs[iodesc->rfrom[i]])); + } + /* rindex is an array of the indices of the data to be sent from + this io task to each compute task. */ + LOG((3, "totalrecv = %d", totalrecv)); if (totalrecv > 0) { totalrecv = iodesc->llen; /* can reduce memory usage here */ if (!(iodesc->rindex = calloc(totalrecv, sizeof(PIO_Offset)))) return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); + LOG((3, "allocated totalrecv elements in rindex array")); } } + /* For the swapm call below, init the types to MPI_OFFSET. */ + for (int i = 0; i < ios->num_uniontasks; i++) + sr_types[i] = MPI_OFFSET; + + /* Here we are sending the mapping from the index on the compute + * task to the index on the io task. */ /* s2rindex is the list of indeces on each compute task */ - /* - printf("%d s2rindex: ", ios->comp_rank); - for (i = 0;indof;i++) - printf("%ld ",s2rindex[i]); - printf("\n"); - */ - ierr = pio_swapm(s2rindex, send_counts, send_displs, sr_types, iodesc->rindex, - recv_counts, recv_displs, sr_types, mycomm, - iodesc->rearr_opts.comm_fc_opts_comp2io.enable_hs, - iodesc->rearr_opts.comm_fc_opts_comp2io.enable_isend, - iodesc->rearr_opts.comm_fc_opts_comp2io.max_pend_req); - - /* rindex is an array of the indices of the data to be sent from - this io task to each compute task. */ - /* - if (ios->ioproc){ - printf("%d rindex: ",ios->io_rank); - for (int j = 0;jllen;j++) - printf(" %ld ",iodesc->rindex[j]); - printf("\n"); - } - */ - return ierr; + LOG((3, "sending mapping")); + if ((ierr = pio_swapm(s2rindex, send_counts, send_displs, sr_types, iodesc->rindex, + recv_counts, recv_displs, sr_types, ios->union_comm, + &iodesc->rearr_opts.comp2io))) + return pio_err(ios, NULL, ierr, __FILE__, __LINE__); + + return PIO_NOERR; } /** - * Moves data from compute tasks to IO tasks. + * Moves data from compute tasks to IO tasks. This is called from + * PIOc_write_darray_multi(). * - * @param ios pointer to the iosystem_desc_t struct + * @param ios pointer to the iosystem_desc_t struct. * @param iodesc a pointer to the io_desc_t struct. - * @param sbuf send buffer. - * @param rbuf receive buffer. + * @param sbuf send buffer. May be NULL. + * @param rbuf receive buffer. May be NULL. * @param nvars number of variables. * @returns 0 on success, error code otherwise. */ int rearrange_comp2io(iosystem_desc_t *ios, io_desc_t *iodesc, void *sbuf, void *rbuf, int nvars) { - int ntasks; - int niotasks; - int *scount = iodesc->scount; - int i, tsize; - int *sendcounts; - int *recvcounts; - int *sdispls; - int *rdispls; - MPI_Datatype *sendtypes; - MPI_Datatype *recvtypes; - MPI_Comm mycomm; - int mpierr; /* Return code from MPI calls. */ + int ntasks; /* Number of tasks in communicator. */ + int niotasks; /* Number of IO tasks. */ + MPI_Comm mycomm; /* Communicator that data is transferred over. */ + int mpierr; /* Return code from MPI calls. */ int ret; #ifdef TIMING @@ -721,11 +769,12 @@ int rearrange_comp2io(iosystem_desc_t *ios, io_desc_t *iodesc, void *sbuf, #endif /* Caller must provide these. */ - pioassert(iodesc && nvars > 0, "invalid input", __FILE__, __LINE__); + pioassert(ios && iodesc && nvars > 0, "invalid input", __FILE__, __LINE__); - LOG((2, "rearrange_comp2io nvars = %d iodesc->rearranger = %d", nvars, + LOG((1, "rearrange_comp2io nvars = %d iodesc->rearranger = %d", nvars, iodesc->rearranger)); + /* Different rearraangers use different communicators. */ if (iodesc->rearranger == PIO_REARR_BOX) { mycomm = ios->union_comm; @@ -741,83 +790,89 @@ int rearrange_comp2io(iosystem_desc_t *ios, io_desc_t *iodesc, void *sbuf, if ((mpierr = MPI_Comm_size(mycomm, &ntasks))) return check_mpi(NULL, mpierr, __FILE__, __LINE__); - /* Get the size of the MPI type. */ - if ((mpierr = MPI_Type_size(iodesc->basetype, &tsize))) - return check_mpi(NULL, mpierr, __FILE__, __LINE__); - LOG((3, "ntasks = %d tsize = %d", ntasks, tsize)); - - /* Define the MPI data types that will be used for this - * io_desc_t. */ - if ((ret = define_iodesc_datatypes(ios, iodesc))) - return pio_err(ios, NULL, ret, __FILE__, __LINE__); - - /* Allocate arrays needed by the pio_swapm() function. */ - if (!(sendcounts = calloc(ntasks, sizeof(int)))) - return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); - - if (!(recvcounts = calloc(ntasks, sizeof(int)))) - return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); - - if (!(sdispls = calloc(ntasks, sizeof(int)))) - return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); - - if (!(rdispls = calloc(ntasks, sizeof(int)))) - return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); - - if (!(sendtypes = malloc(ntasks * sizeof(MPI_Datatype)))) - return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); - - if (!(recvtypes = malloc(ntasks * sizeof(MPI_Datatype)))) - return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); + /* These are parameters to pio_swapm to send data from compute to + * IO tasks. */ + int sendcounts[ntasks]; + int recvcounts[ntasks]; + int sdispls[ntasks]; + int rdispls[ntasks]; + MPI_Datatype sendtypes[ntasks]; + MPI_Datatype recvtypes[ntasks]; - /* Initialize arrays. */ - for (i = 0; i < ntasks; i++) + /* Initialize pio_swapm parameter arrays. */ + for (int i = 0; i < ntasks; i++) { + sendcounts[i] = 0; + recvcounts[i] = 0; + sdispls[i] = 0; + rdispls[i] = 0; recvtypes[i] = PIO_DATATYPE_NULL; sendtypes[i] = PIO_DATATYPE_NULL; } + LOG((3, "ntasks = %d iodesc->basetype_size = %d niotasks = %d", ntasks, + iodesc->basetype_size, niotasks)); + + /* If it has not already been done, define the MPI data types that + * will be used for this io_desc_t. */ + if ((ret = define_iodesc_datatypes(ios, iodesc))) + return pio_err(ios, NULL, ret, __FILE__, __LINE__); /* If this io proc will exchange data with compute tasks create a * MPI DataType for that exchange. */ + LOG((2, "ios->ioproc %d iodesc->nrecvs = %d", ios->ioproc, iodesc->nrecvs)); if (ios->ioproc && iodesc->nrecvs > 0) { - for (i = 0; i < iodesc->nrecvs; i++) + for (int i = 0; i < iodesc->nrecvs; i++) { if (iodesc->rtype[i] != PIO_DATATYPE_NULL) { + LOG((3, "iodesc->rtype[%d] = %d iodesc->rearranger = %d", i, iodesc->rtype[i], + iodesc->rearranger)); if (iodesc->rearranger == PIO_REARR_SUBSET) { - recvcounts[ i ] = 1; - - /* The stride here is the length of the collected array (llen) */ - - if ((mpierr = MPI_Type_hvector(nvars, 1, (MPI_Aint) iodesc->llen * tsize, iodesc->rtype[i], - recvtypes + i))) + LOG((3, "exchanging data for subset rearranger")); + recvcounts[i] = 1; + + /* Create an MPI derived data type from equally + * spaced blocks of the same size. The block size + * is 1, the stride here is the length of the + * collected array (llen). */ +#if PIO_USE_MPISERIAL + if ((mpierr = MPI_Type_hvector(nvars, 1, (MPI_Aint)iodesc->llen * iodesc->basetype_size, + iodesc->rtype[i], &recvtypes[i]))) return check_mpi(NULL, mpierr, __FILE__, __LINE__); - - if (recvtypes[i] == PIO_DATATYPE_NULL) - return pio_err(NULL, NULL, PIO_EINVAL, __FILE__, __LINE__); - - if ((mpierr = MPI_Type_commit(recvtypes + i))) +#else + if ((mpierr = MPI_Type_create_hvector(nvars, 1, (MPI_Aint)iodesc->llen * iodesc->basetype_size, + iodesc->rtype[i], &recvtypes[i]))) return check_mpi(NULL, mpierr, __FILE__, __LINE__); +#endif /* PIO_USE_MPISERIAL */ + pioassert(recvtypes[i] != PIO_DATATYPE_NULL, "bad mpi type", __FILE__, __LINE__); - /*recvtypes[ i ] = iodesc->rtype[i]; */ + if ((mpierr = MPI_Type_commit(&recvtypes[i]))) + return check_mpi(NULL, mpierr, __FILE__, __LINE__); } else { + LOG((3, "exchanging data for box rearranger")); + LOG((3, "i = %d iodesc->rfrom[i] = %d recvcounts[iodesc->rfrom[i]] = %d", i, + iodesc->rfrom[i], recvcounts[iodesc->rfrom[i]])); recvcounts[iodesc->rfrom[i]] = 1; - if ((mpierr = MPI_Type_hvector(nvars, 1, (MPI_Aint)iodesc->llen * tsize, iodesc->rtype[i], - recvtypes + iodesc->rfrom[i]))) +#if PIO_USE_MPISERIAL + if ((mpierr = MPI_Type_hvector(nvars, 1, (MPI_Aint)iodesc->llen * iodesc->basetype_size, + iodesc->rtype[i], &recvtypes[iodesc->rfrom[i]]))) return check_mpi(NULL, mpierr, __FILE__, __LINE__); +#else + if ((mpierr = MPI_Type_create_hvector(nvars, 1, (MPI_Aint)iodesc->llen * iodesc->basetype_size, + iodesc->rtype[i], &recvtypes[iodesc->rfrom[i]]))) + return check_mpi(NULL, mpierr, __FILE__, __LINE__); +#endif /* PIO_USE_MPISERIAL */ + pioassert(recvtypes[iodesc->rfrom[i]] != PIO_DATATYPE_NULL, "bad mpi type", + __FILE__, __LINE__); - if (recvtypes[iodesc->rfrom[i]] == PIO_DATATYPE_NULL) - return pio_err(NULL, NULL, PIO_EINVAL, __FILE__, __LINE__); - - if ((mpierr = MPI_Type_commit(recvtypes+iodesc->rfrom[i]))) + if ((mpierr = MPI_Type_commit(&recvtypes[iodesc->rfrom[i]]))) return check_mpi(NULL, mpierr, __FILE__, __LINE__); - /* recvtypes[ iodesc->rfrom[i] ] = iodesc->rtype[i]; */ rdispls[iodesc->rfrom[i]] = 0; } } @@ -826,57 +881,57 @@ int rearrange_comp2io(iosystem_desc_t *ios, io_desc_t *iodesc, void *sbuf, /* On compute tasks loop over iotasks and create a data type for * each exchange. */ - for (i = 0; i < niotasks; i++) + for (int i = 0; i < niotasks; i++) { int io_comprank = ios->ioranks[i]; if (iodesc->rearranger == PIO_REARR_SUBSET) - io_comprank=0; + io_comprank = 0; - if (scount[i] > 0 && sbuf != NULL) + LOG((3, "i = %d iodesc->scount[i] = %d", i, iodesc->scount[i])); + if (iodesc->scount[i] > 0 && sbuf) { + LOG((3, "io task %d creating sendtypes[%d]", i, io_comprank)); sendcounts[io_comprank] = 1; - if ((mpierr = MPI_Type_hvector(nvars, 1, (MPI_Aint)iodesc->ndof * tsize, iodesc->stype[i], - sendtypes + io_comprank))) +#if PIO_USE_MPISERIAL + if ((mpierr = MPI_Type_hvector(nvars, 1, (MPI_Aint)iodesc->ndof * iodesc->basetype_size, + iodesc->stype[i], &sendtypes[io_comprank]))) return check_mpi(NULL, mpierr, __FILE__, __LINE__); +#else + if ((mpierr = MPI_Type_create_hvector(nvars, 1, (MPI_Aint)iodesc->ndof * iodesc->basetype_size, + iodesc->stype[i], &sendtypes[io_comprank]))) + return check_mpi(NULL, mpierr, __FILE__, __LINE__); +#endif /* PIO_USE_MPISERIAL */ + pioassert(sendtypes[io_comprank] != PIO_DATATYPE_NULL, "bad mpi type", __FILE__, __LINE__); - if (sendtypes[io_comprank] == PIO_DATATYPE_NULL) - return pio_err(NULL, NULL, PIO_EINVAL, __FILE__, __LINE__); - - if ((mpierr = MPI_Type_commit(sendtypes + io_comprank))) + if ((mpierr = MPI_Type_commit(&sendtypes[io_comprank]))) return check_mpi(NULL, mpierr, __FILE__, __LINE__); } else { - sendcounts[io_comprank]=0; + sendcounts[io_comprank] = 0; } } + /* Data in sbuf on the compute nodes is sent to rbuf on the ionodes */ - pio_swapm(sbuf, sendcounts, sdispls, sendtypes, - rbuf, recvcounts, rdispls, recvtypes, mycomm, - iodesc->rearr_opts.comm_fc_opts_comp2io.enable_hs, - iodesc->rearr_opts.comm_fc_opts_comp2io.enable_isend, - iodesc->rearr_opts.comm_fc_opts_comp2io.max_pend_req); + LOG((2, "about to call pio_swapm for sbuf")); + if ((ret = pio_swapm(sbuf, sendcounts, sdispls, sendtypes, + rbuf, recvcounts, rdispls, recvtypes, mycomm, + &iodesc->rearr_opts.comp2io))) + return pio_err(ios, NULL, ret, __FILE__, __LINE__); /* Free the MPI types. */ - for (i = 0; i < ntasks; i++) + for (int i = 0; i < ntasks; i++) { + LOG((3, "freeing MPI types for task %d", i)); if (sendtypes[i] != PIO_DATATYPE_NULL) - if ((mpierr = MPI_Type_free(sendtypes + i))) + if ((mpierr = MPI_Type_free(&sendtypes[i]))) return check_mpi(NULL, mpierr, __FILE__, __LINE__); if (recvtypes[i] != PIO_DATATYPE_NULL) - if ((mpierr = MPI_Type_free(recvtypes + i))) + if ((mpierr = MPI_Type_free(&recvtypes[i]))) return check_mpi(NULL, mpierr, __FILE__, __LINE__); } - /* Free memory. */ - free(sendcounts); - free(recvcounts); - free(sdispls); - free(rdispls); - free(sendtypes); - free(recvtypes); - #ifdef TIMING GPTLstop("PIO:rearrange_comp2io"); #endif @@ -885,7 +940,8 @@ int rearrange_comp2io(iosystem_desc_t *ios, io_desc_t *iodesc, void *sbuf, } /** - * Moves data from IO tasks to compute tasks. + * Moves data from IO tasks to compute tasks. This function is used in + * PIOc_read_darray(). * * @param ios pointer to the iosystem_desc_t struct. * @param iodesc a pointer to the io_desc_t struct. @@ -899,18 +955,11 @@ int rearrange_io2comp(iosystem_desc_t *ios, io_desc_t *iodesc, void *sbuf, MPI_Comm mycomm; int ntasks; int niotasks; - int *scount = iodesc->scount; - int *sendcounts; - int *recvcounts; - int *sdispls; - int *rdispls; - MPI_Datatype *sendtypes; - MPI_Datatype *recvtypes; - int i; int mpierr; /* Return code from MPI calls. */ int ret; - assert(iodesc); + /* Check inputs. */ + pioassert(ios && iodesc, "invalid input", __FILE__, __LINE__); #ifdef TIMING GPTLstart("PIO:rearrange_io2comp"); @@ -926,8 +975,9 @@ int rearrange_io2comp(iosystem_desc_t *ios, io_desc_t *iodesc, void *sbuf, else { mycomm = iodesc->subset_comm; - niotasks=1; + niotasks = 1; } + LOG((3, "niotasks = %d", niotasks)); /* Get the size of this communicator. */ if ((mpierr = MPI_Comm_size(mycomm, &ntasks))) @@ -939,34 +989,30 @@ int rearrange_io2comp(iosystem_desc_t *ios, io_desc_t *iodesc, void *sbuf, return pio_err(ios, NULL, ret, __FILE__, __LINE__); /* Allocate arrays needed by the pio_swapm() function. */ - if (!(sendcounts = calloc(ntasks, sizeof(int)))) - return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); - - if (!(recvcounts = calloc(ntasks, sizeof(int)))) - return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); - - if (!(sdispls = calloc(ntasks, sizeof(int)))) - return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); - - if (!(rdispls = calloc(ntasks, sizeof(int)))) - return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); - - if (!(sendtypes = malloc(ntasks * sizeof(MPI_Datatype)))) - return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); - - if (!(recvtypes = malloc(ntasks * sizeof(MPI_Datatype)))) - return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); + int sendcounts[ntasks]; + int recvcounts[ntasks]; + int sdispls[ntasks]; + int rdispls[ntasks]; + MPI_Datatype sendtypes[ntasks]; + MPI_Datatype recvtypes[ntasks]; /* Initialize arrays. */ - for (i = 0; i < ntasks; i++) + for (int i = 0; i < ntasks; i++) { + sendcounts[i] = 0; + recvcounts[i] = 0; + sdispls[i] = 0; + rdispls[i] = 0; sendtypes[i] = PIO_DATATYPE_NULL; recvtypes[i] = PIO_DATATYPE_NULL; } - /* In IO tasks ??? */ + /* In IO tasks set up sendcounts/sendtypes for pio_swapm() call + * below. */ if (ios->ioproc) - for (i = 0; i < iodesc->nrecvs; i++) + { + for (int i = 0; i < iodesc->nrecvs; i++) + { if (iodesc->rtype[i] != PIO_DATATYPE_NULL) { if (iodesc->rearranger == PIO_REARR_SUBSET) @@ -983,18 +1029,21 @@ int rearrange_io2comp(iosystem_desc_t *ios, io_desc_t *iodesc, void *sbuf, sendtypes[iodesc->rfrom[i]] = iodesc->rtype[i]; } } + } + } /* In the box rearranger each comp task may communicate with * multiple IO tasks here we are setting the count and data type * of the communication of a given compute task with each io * task. */ - for (i = 0; i < niotasks; i++) + for (int i = 0; i < niotasks; i++) { int io_comprank = ios->ioranks[i]; + if (iodesc->rearranger == PIO_REARR_SUBSET) io_comprank = 0; - if (scount[i] > 0 && iodesc->stype[i] != PIO_DATATYPE_NULL) + if (iodesc->scount[i] > 0 && iodesc->stype[i] != PIO_DATATYPE_NULL) { recvcounts[io_comprank] = 1; recvtypes[io_comprank] = iodesc->stype[i]; @@ -1002,19 +1051,9 @@ int rearrange_io2comp(iosystem_desc_t *ios, io_desc_t *iodesc, void *sbuf, } /* Data in sbuf on the ionodes is sent to rbuf on the compute nodes */ - pio_swapm(sbuf, sendcounts, sdispls, sendtypes, - rbuf, recvcounts, rdispls, recvtypes, mycomm, - iodesc->rearr_opts.comm_fc_opts_io2comp.enable_hs, - iodesc->rearr_opts.comm_fc_opts_io2comp.enable_isend, - iodesc->rearr_opts.comm_fc_opts_io2comp.max_pend_req); - - /* Release memory. */ - free(sendcounts); - free(recvcounts); - free(sdispls); - free(rdispls); - free(sendtypes); - free(recvtypes); + if ((ret = pio_swapm(sbuf, sendcounts, sdispls, sendtypes, rbuf, recvcounts, + rdispls, recvtypes, mycomm, &iodesc->rearr_opts.io2comp))) + return pio_err(ios, NULL, ret, __FILE__, __LINE__); #ifdef TIMING GPTLstop("PIO:rearrange_io2comp"); @@ -1024,44 +1063,53 @@ int rearrange_io2comp(iosystem_desc_t *ios, io_desc_t *iodesc, void *sbuf, } /** - * Determine fill value. + * Determine whether fill values are needed. This function compares + * how much data we have to how much data is in a record (or + * non-record var). If we have enough data to completely fill the + * variable, then fill is not needed. * - * @param ios pointer to the iosystem_desc_t struct + * @param ios pointer to the iosystem_desc_t struct. * @param iodesc a pointer to the io_desc_t struct. - * @param gsize pointer to an array of sizes - * @param compmap + * @param gdimlen pointer to an array length iodesc->ndims with the + * global array sizes for one record (for record vars) or for the + * entire var (for non-record vars). + * @param compmap only used for the box communicator. * @returns 0 on success, error code otherwise. */ -int determine_fill(iosystem_desc_t *ios, io_desc_t *iodesc, const int *gsize, +int determine_fill(iosystem_desc_t *ios, io_desc_t *iodesc, const int *gdimlen, const PIO_Offset *compmap) { PIO_Offset totalllen = 0; PIO_Offset totalgridsize = 1; - int i; int mpierr; /* Return code from MPI calls. */ - assert(iodesc); + /* Check inputs. */ + pioassert(ios && iodesc && gdimlen && compmap, "invalid input", + __FILE__, __LINE__); - for (i = 0; i < iodesc->ndims; i++) - totalgridsize *= gsize[i]; + /* Determine size of data space. */ + for (int i = 0; i < iodesc->ndims; i++) + totalgridsize *= gdimlen[i]; + /* Determine how many values we have locally. */ if (iodesc->rearranger == PIO_REARR_SUBSET) - totalllen=iodesc->llen; + totalllen = iodesc->llen; else - for (i = 0; indof; i++) + for (int i = 0; i < iodesc->ndof; i++) if (compmap[i] > 0) totalllen++; + /* Add results accross communicator. */ + LOG((2, "determine_fill before allreduce totalllen = %d totalgridsize = %d", + totalllen, totalgridsize)); if ((mpierr = MPI_Allreduce(MPI_IN_PLACE, &totalllen, 1, PIO_OFFSET, MPI_SUM, ios->union_comm))) check_mpi(NULL, mpierr, __FILE__, __LINE__); + LOG((2, "after allreduce totalllen = %d", totalllen)); /* If the total size of the data provided to be written is < the * total data size then we need fill values. */ - if (totalllen < totalgridsize) - iodesc->needsfill = true; - else - iodesc->needsfill = false; + iodesc->needsfill = totalllen < totalgridsize; /* TURN OFF FILL for timing test iodesc->needsfill=false; */ @@ -1069,174 +1117,214 @@ int determine_fill(iosystem_desc_t *ios, io_desc_t *iodesc, const int *gsize, return PIO_NOERR; } -/** - * Prints the IO desc information to stdout. - * - * @param iodesc a pointer to the io_desc_t struct. - * @returns 0 on success, error code otherwise. - */ -void iodesc_dump(io_desc_t *iodesc) -{ - assert(iodesc); - - printf("ioid= %d\n", iodesc->ioid); -/* printf("async_id= %d\n",iodesc->async_id);*/ - printf("nrecvs= %d\n", iodesc->nrecvs); - printf("ndof= %d\n", iodesc->ndof); - printf("ndims= %d\n", iodesc->ndims); - printf("num_aiotasks= %d\n", iodesc->num_aiotasks); - printf("rearranger= %d\n", iodesc->rearranger); - printf("maxregions= %d\n", iodesc->maxregions); - printf("needsfill= %d\n", (int)iodesc->needsfill); - - printf("llen= %lld\n", iodesc->llen); - printf("maxiobuflen= %d\n", iodesc->maxiobuflen); - - printf("rindex= "); - for (int j = 0; j < iodesc->llen; j++) - printf(" %lld ", iodesc->rindex[j]); - printf("\n"); -} - /** * The box rearranger computes a mapping between IO tasks and compute - * tasks such that the data on io tasks can be written with a single - * call to the underlying netcdf library. This may involve an all to + * tasks such that the data on IO tasks can be written with a single + * call to the underlying netCDF library. This may involve an all to * all rearrangement in the mapping, but should minimize data movement - * in lower level libraries + * in lower level libraries. * - * @param ios pointer to the iosystem_desc_t struct - * @param maplen the length of the map - * @param compmap - * @param gsize pointer to an array of sizes - * @param ndims the number of dimensions - * @param iodesc a pointer to the io_desc_t struct. + * On each compute task the application program passes a compmap array + * of length ndof. This array describes the arrangement of data in + * memory on that compute task. + * + * These arrays are gathered and rearranged to the IO-tasks (which are + * sometimes collocated with compute tasks), each IO task contains + * data from the compmap of one or more compute tasks in the iomap + * array and the length of that array is llen. + * + * This function: + *
    + *
  • For IO tasks, determines llen. + *
  • Determine whether fill values will be needed. + *
  • Do an allgether of llen values into array iomaplen. + *
  • For each IO task, send starts/counts to all compute tasks. + *
  • Find dest_ioindex and dest_ioproc for each element in the map. + *
  • Call compute_counts(). + *
  • On IO tasks, compute the max IO buffer size. + *
  • Call compute_maxaggregate_bytes(). + *
+ * + * @param ios pointer to the iosystem_desc_t struct. + * @param maplen the length of the map. This is the number of data + * elements on the compute task. + * @param compmap a 1 based array of offsets into the global space. A + * 0 in this array indicates a value which should not be transfered. + * @param gdimlen an array length ndims with the sizes of the global + * dimensions. + * @param ndims the number of dimensions. + * @param iodesc a pointer to the io_desc_t struct, which must be + * allocated before this function is called. * @returns 0 on success, error code otherwise. */ int box_rearrange_create(iosystem_desc_t *ios, int maplen, const PIO_Offset *compmap, - const int *gsize, int ndims, io_desc_t *iodesc) + const int *gdimlen, int ndims, io_desc_t *iodesc) { - int nprocs = ios->num_comptasks; - int nioprocs = ios->num_iotasks; - PIO_Offset gstride[ndims]; - PIO_Offset start[ndims], count[ndims]; - int tsize, i, j, k; - int dest_ioproc[maplen]; - PIO_Offset dest_ioindex[maplen]; - int sndlths[nprocs]; - int sdispls[nprocs]; - int recvlths[nprocs]; - int rdispls[nprocs]; - MPI_Datatype dtypes[nprocs]; - PIO_Offset iomaplen[nioprocs]; - int mpierr; /* Return code from MPI functions. */ int ret; - assert(iodesc); - + /* Check inputs. */ + pioassert(ios && maplen >= 0 && compmap && gdimlen && ndims > 0 && iodesc, + "invalid input", __FILE__, __LINE__); + LOG((1, "box_rearrange_create maplen = %d ndims = %d ios->num_comptasks = %d " + "ios->num_iotasks = %d", maplen, ndims, ios->num_comptasks, ios->num_iotasks)); + + /* Allocate arrays needed for this function. */ + int dest_ioproc[maplen]; /* Destination IO task for each data element on compute task. */ + PIO_Offset dest_ioindex[maplen]; /* Offset into IO task array for each data element. */ + int sendcounts[ios->num_uniontasks]; /* Send counts for swapm call. */ + int sdispls[ios->num_uniontasks]; /* Send displacements for swapm. */ + int recvcounts[ios->num_uniontasks]; /* Receive counts for swapm. */ + int rdispls[ios->num_uniontasks]; /* Receive displacements for swapm. */ + MPI_Datatype dtypes[ios->num_uniontasks]; /* Array of MPI_OFFSET types for swapm. */ + PIO_Offset iomaplen[ios->num_iotasks]; /* Gets the llen of all IO tasks. */ + + /* This is the box rearranger. */ iodesc->rearranger = PIO_REARR_BOX; + /* Number of elements of data on compute node. */ iodesc->ndof = maplen; - gstride[ndims-1] = 1; - for (int i= ndims - 2; i >= 0; i--) - gstride[i] = gstride[i + 1] * gsize[i + 1]; - - if ((mpierr = MPI_Type_size(MPI_OFFSET, &tsize))) - return check_mpi(NULL, mpierr, __FILE__, __LINE__); - for (i = 0; i < maplen; i++) + /* Initialize array values. */ + for (int i = 0; i < maplen; i++) { dest_ioproc[i] = -1; dest_ioindex[i] = -1; } - for (i = 0; i < nprocs; i++) + + /* Initialize arrays used in swapm. */ + for (int i = 0; i < ios->num_uniontasks; i++) { - sndlths[i] = 0; + sendcounts[i] = 0; sdispls[i] = 0; - recvlths[i] = 0; + recvcounts[i] = 0; rdispls[i] = 0; dtypes[i] = MPI_OFFSET; } - iodesc->llen = 0; + + /* For IO tasks, determine llen, the length of the data array on + * the IO task. For computation tasks, llen will remain at 0. Also + * set up arrays for the allgather which will give every IO task a + * complete list of llens for each IO task. */ + LOG((3, "ios->ioproc = %d ios->num_uniontasks = %d", ios->ioproc, + ios->num_uniontasks)); + pioassert(iodesc->llen == 0, "error", __FILE__, __LINE__); if (ios->ioproc) { - for (i = 0; i < nprocs; i++) - sndlths[i] = 1; - - /* llen here is the number that will be read on each io task */ + /* Set up send counts for sending llen in all to all + * gather. We are sending to all tasks, IO and computation. */ + for (int i = 0; i < ios->num_comptasks; i++) + sendcounts[ios->compranks[i]] = 1; + for (int i = 0; i < ios->num_iotasks; i++) + sendcounts[ios->ioranks[i]] = 1; + + /* Determine llen, the lenght of the data array on this IO + * node, by multipliying the counts in the + * iodesc->firstregion. */ iodesc->llen = 1; - for (i = 0; i < ndims; i++) + for (int i = 0; i < ndims; i++) + { iodesc->llen *= iodesc->firstregion->count[i]; + LOG((3, "iodesc->firstregion->start[%d] = %d iodesc->firstregion->count[%d] = %d", + i, iodesc->firstregion->start[i], i, iodesc->firstregion->count[i])); + } + LOG((2, "iodesc->llen = %d", iodesc->llen)); } - if ((ret = determine_fill(ios, iodesc, gsize, compmap))) - return pio_err(ios, NULL, ret, __FILE__, __LINE__); - /* - if (ios->ioproc){ - for (i = 0; ifirstregion->start[i],iodesc->firstregion->count[i]); - } - printf("\n%s %d\n",__FILE__,__LINE__); - } - */ + /* Determine whether fill values will be needed. */ + if ((ret = determine_fill(ios, iodesc, gdimlen, compmap))) + return pio_err(ios, NULL, ret, __FILE__, __LINE__); + LOG((2, "iodesc->needsfill = %d ios->num_iotasks = %d", iodesc->needsfill, + ios->num_iotasks)); - for (i = 0; i < nioprocs; i++) + /* Set up receive counts and displacements to for an AllToAll + * gather of llen. */ + for (int i = 0; i < ios->num_iotasks; i++) { - int io_comprank = ios->ioranks[i]; - recvlths[io_comprank] = 1; - rdispls[io_comprank] = i * tsize; + recvcounts[ios->ioranks[i]] = 1; + rdispls[ios->ioranks[i]] = i * SIZEOF_MPI_OFFSET; + LOG((3, "i = %d ios->ioranks[%d] = %d recvcounts[%d] = %d rdispls[%d] = %d", + i, i, ios->ioranks[i], ios->ioranks[i], recvcounts[ios->ioranks[i]], + ios->ioranks[i], rdispls[ios->ioranks[i]])); } - /* The length of each iomap - iomaplen = calloc(nioprocs, sizeof(PIO_Offset)); */ - pio_swapm(&(iodesc->llen), sndlths, sdispls, dtypes, - iomaplen, recvlths, rdispls, dtypes, - ios->union_comm, - iodesc->rearr_opts.comm_fc_opts_io2comp.enable_hs, - iodesc->rearr_opts.comm_fc_opts_io2comp.enable_isend, - iodesc->rearr_opts.comm_fc_opts_io2comp.max_pend_req); - - for (i = 0; i < nioprocs; i++) + /* All-gather the llen to all tasks into array iomaplen. */ + LOG((3, "calling pio_swapm to allgather llen into array iomaplen, ndims = %d dtypes[0] = %d", + ndims, dtypes)); + if ((ret = pio_swapm(&iodesc->llen, sendcounts, sdispls, dtypes, iomaplen, recvcounts, + rdispls, dtypes, ios->union_comm, &iodesc->rearr_opts.io2comp))) + return pio_err(ios, NULL, ret, __FILE__, __LINE__); + LOG((3, "iodesc->llen = %d", iodesc->llen)); +#if PIO_ENABLE_LOGGING + for (int i = 0; i < ios->num_iotasks; i++) + LOG((3, "iomaplen[%d] = %d", i, iomaplen[i])); +#endif /* PIO_ENABLE_LOGGING */ + + /* For each IO task send starts/counts to all compute tasks. */ + for (int i = 0; i < ios->num_iotasks; i++) { + /* The ipmaplen contains the llen (number of data elements) + * for this IO task. */ + LOG((2, "iomaplen[%d] = %d", i, iomaplen[i])); + /* If there is data for this IO task, send start/count to all + * compute tasks. */ if (iomaplen[i] > 0) { - int io_comprank = ios->ioranks[i]; - for (j = 0; j < nprocs; j++) + PIO_Offset start[ndims]; + PIO_Offset count[ndims]; + + /* Set up send/recv parameters for all to all gather of + * counts and starts. */ + for (int j = 0; j < ios->num_uniontasks; j++) { - sndlths[j] = 0; + sendcounts[j] = 0; sdispls[j] = 0; rdispls[j] = 0; - recvlths[j] = 0; - if (ios->union_rank == io_comprank) - sndlths[j] = ndims; + recvcounts[j] = 0; + if (ios->union_rank == ios->ioranks[i]) + sendcounts[j] = ndims; } - recvlths[io_comprank] = ndims; - - /* The count from iotask i is sent to all compute tasks */ - pio_swapm(iodesc->firstregion->count, sndlths, sdispls, dtypes, - count, recvlths, rdispls, dtypes, - ios->union_comm, - iodesc->rearr_opts.comm_fc_opts_io2comp.enable_hs, - iodesc->rearr_opts.comm_fc_opts_io2comp.enable_isend, - iodesc->rearr_opts.comm_fc_opts_io2comp.max_pend_req); - - /* The start from iotask i is sent to all compute tasks. */ - pio_swapm(iodesc->firstregion->start, sndlths, sdispls, dtypes, - start, recvlths, rdispls, dtypes, - ios->union_comm, - iodesc->rearr_opts.comm_fc_opts_io2comp.enable_hs, - iodesc->rearr_opts.comm_fc_opts_io2comp.enable_isend, - iodesc->rearr_opts.comm_fc_opts_io2comp.max_pend_req); - - for (k = 0; k < maplen; k++) + recvcounts[ios->ioranks[i]] = ndims; + + /* The count array from iotask i is sent to all compute tasks. */ + LOG((3, "about to call pio_swapm with count from iotask %d ndims = %d", + i, ndims)); + if ((ret = pio_swapm(iodesc->firstregion->count, sendcounts, sdispls, dtypes, count, + recvcounts, rdispls, dtypes, ios->union_comm, + &iodesc->rearr_opts.io2comp))) + return pio_err(ios, NULL, ret, __FILE__, __LINE__); + + /* The start array from iotask i is sent to all compute tasks. */ + LOG((3, "about to call pio_swapm with start from iotask %d ndims = %d", + i, ndims)); + if ((ret = pio_swapm(iodesc->firstregion->start, sendcounts, sdispls, dtypes, + start, recvcounts, rdispls, dtypes, ios->union_comm, + &iodesc->rearr_opts.io2comp))) + return pio_err(ios, NULL, ret, __FILE__, __LINE__); + +#if PIO_ENABLE_LOGGING + for (int d = 0; d < ndims; d++) + LOG((3, "start[%d] = %lld count[%d] = %lld", d, start[d], d, count[d])); +#endif /* PIO_ENABLE_LOGGING */ + + /* For each element of the data array on the compute task, + * find the IO task to send the data element to, and its + * offset into the global data array. */ + for (int k = 0; k < maplen; k++) { PIO_Offset gcoord[ndims], lcoord[ndims]; bool found = true; - /* The compmap array is 1 based but calculations are 0 based */ - idx_to_dim_list(ndims, gsize, compmap[k] - 1, gcoord); - for (j = 0; j < ndims; j++) + /* The compmap array is 1 based but calculations are 0 based */ + LOG((3, "about to call idx_to_dim_list ndims = %d ", ndims)); + idx_to_dim_list(ndims, gdimlen, compmap[k] - 1, gcoord); +#if PIO_ENABLE_LOGGING + for (int d = 0; d < ndims; d++) + LOG((3, "gcoord[%d] = %lld", d, gcoord[d])); +#endif /* PIO_ENABLE_LOGGING */ + + /* Find a destination for each entry in the compmap. */ + for (int j = 0; j < ndims; j++) { if (gcoord[j] >= start[j] && gcoord[j] < start[j] + count[j]) { @@ -1248,44 +1336,56 @@ int box_rearrange_create(iosystem_desc_t *ios, int maplen, const PIO_Offset *com break; } } + + /* Did we find a destination IO task for this element + * of the computation task data array? If so, remember + * the destination IO task, and determine the index + * for that element in the IO task data. */ if (found) { dest_ioindex[k] = coord_to_lindex(ndims, lcoord, count); dest_ioproc[k] = i; + LOG((3, "found dest_ioindex[%d] = %d dest_ioproc[%d] = %d", k, dest_ioindex[k], + k, dest_ioproc[k])); } } } } /* Check that a destination is found for each compmap entry. */ - for (k = 0; k < maplen; k++) + for (int k = 0; k < maplen; k++) if (dest_ioproc[k] < 0 && compmap[k] > 0) return pio_err(ios, NULL, PIO_EINVAL, __FILE__, __LINE__); - compute_counts(ios, iodesc, maplen, dest_ioproc, dest_ioindex, ios->union_comm); + /* Completes the mapping for the box rearranger. */ + LOG((2, "calling compute_counts maplen = %d", maplen)); + if ((ret = compute_counts(ios, iodesc, dest_ioproc, dest_ioindex))) + return pio_err(ios, NULL, ret, __FILE__, __LINE__); + /* Compute the max io buffer size needed for an iodesc. */ if (ios->ioproc) + { if ((ret = compute_maxIObuffersize(ios->io_comm, iodesc))) return pio_err(ios, NULL, ret, __FILE__, __LINE__); + LOG((3, "iodesc->maxiobuflen = %d", iodesc->maxiobuflen)); + } - /* Using maxiobuflen compute the maximum number of vars of this type that the io - task buffer can handle. */ + /* Using maxiobuflen compute the maximum number of bytes that the + * io task buffer can handle. */ if ((ret = compute_maxaggregate_bytes(ios, iodesc))) return pio_err(ios, NULL, ret, __FILE__, __LINE__); + LOG((3, "iodesc->maxbytes = %d", iodesc->maxbytes)); -#ifdef DEBUG - iodesc_dump(iodesc); -#endif return PIO_NOERR; } /** - * Compare offsets is used by the sort in the subset rearrange. This + * Compare offsets is used by the sort in the subset rearranger. This * function is passed to qsort. * - * @param a - * @param b - * @returns 0 if offsets are the same. + * @param a pointer to an offset. + * @param b pointer to another offset. + * @returns 0 if offsets are the same or either pointer is NULL. */ int compare_offsets(const void *a, const void *b) { @@ -1297,60 +1397,75 @@ int compare_offsets(const void *a, const void *b) } /** + * Calculate start and count regions for the subset rearranger. This + * function is not used in the box rearranger. + * * Each region is a block of output which can be represented in a * single call to the underlying netcdf library. This can be as small * as a single data point, but we hope we've aggragated better than * that. * * @param ndims the number of dimensions - * @param gdims pointer to an array of dimension ids + * @param gdimlen an array length ndims with the sizes of the global + * dimensions. * @param maplen the length of the map - * @param map + * @param map may be NULL (when ???). * @param maxregions * @param firstregion pointer to the first region. * @returns 0 on success, error code otherwise. */ -void get_start_and_count_regions(int ndims, const int *gdims, int maplen, const PIO_Offset *map, - int *maxregions, io_region *firstregion) +int get_regions(int ndims, const int *gdimlen, int maplen, const PIO_Offset *map, + int *maxregions, io_region *firstregion) { - int i; - int nmaplen; + int nmaplen = 0; int regionlen; io_region *region; + int ret; - assert(maxregions); - assert(firstregion); + /* Check inputs. */ + pioassert(ndims >= 0 && gdimlen && maplen >= 0 && maxregions && firstregion, + "invalid input", __FILE__, __LINE__); + LOG((1, "get_regions ndims = %d maplen = %d", ndims, maplen)); - nmaplen = 0; region = firstregion; if (map) { - while(map[nmaplen++] <= 0); + while (map[nmaplen++] <= 0) + { + LOG((3, "map[%d] = %d", nmaplen, map[nmaplen])); + ; + } nmaplen--; } region->loffset = nmaplen; + LOG((2, "region->loffset = %d", region->loffset)); *maxregions = 1; - while(nmaplen < maplen) + while (nmaplen < maplen) { /* Here we find the largest region from the current offset - into the iomap regionlen is the size of that region and we + into the iomap. regionlen is the size of that region and we step to that point in the map array until we reach the - end */ - for (i = 0; i < ndims; i++) + end. */ + for (int i = 0; i < ndims; i++) region->count[i] = 1; - regionlen = find_region(ndims, gdims, maplen-nmaplen, - map+nmaplen, region->start, region->count); - + /* Set start/count to describe first region in map. */ + regionlen = find_region(ndims, gdimlen, maplen-nmaplen, + &map[nmaplen], region->start, region->count); pioassert(region->start[0] >= 0, "failed to find region", __FILE__, __LINE__); nmaplen = nmaplen + regionlen; + LOG((2, "regionlen = %d nmaplen = %d", regionlen, nmaplen)); + /* If we need to, allocate the next region. */ if (region->next == NULL && nmaplen < maplen) { - region->next = alloc_region(ndims); + LOG((2, "allocating next region")); + if ((ret = alloc_region2(NULL, ndims, ®ion->next))) + return ret; + /* The offset into the local array buffer is the sum of * the sizes of all of the previous regions (loffset) */ region = region->next; @@ -1361,28 +1476,39 @@ void get_start_and_count_regions(int ndims, const int *gdims, int maplen, const maxregions will be the total number of regions on this task. */ (*maxregions)++; + LOG((2, "*maxregions = %d", *maxregions)); } } + + return PIO_NOERR; } /** + * Create the MPI communicators needed by the subset rearranger. + * * The subset rearranger needs a mapping from compute tasks to IO * task, the only requirement is that each compute task map to one and * only one IO task. This mapping groups by mpi task id others are * possible and may be better for certain decompositions * - * @param ios pointer to the iosystem_desc_t struct + * The as yet unrealized vision here is that the user would be able to + * supply an alternative subset partitioning function. Requirements of + * this function are that there be exactly one io task per compute + * task group. + * + * @param ios pointer to the iosystem_desc_t struct. * @param iodesc a pointer to the io_desc_t struct. * @returns 0 on success, error code otherwise. */ int default_subset_partition(iosystem_desc_t *ios, io_desc_t *iodesc) { - int taskratio = ios->num_comptasks/ios->num_iotasks; int color; int key; int mpierr; /* Return value from MPI functions. */ - assert(ios && iodesc); + pioassert(ios && iodesc, "invalid input", __FILE__, __LINE__); + LOG((1, "default_subset_partition ios->ioproc = %d ios->io_rank = %d " + "ios->comp_rank = %d", ios->ioproc, ios->io_rank, ios->comp_rank)); /* Create a new comm for each subset group with the io task in rank 0 and only 1 io task per group */ @@ -1393,10 +1519,13 @@ int default_subset_partition(iosystem_desc_t *ios, io_desc_t *iodesc) } else { + int taskratio = ios->num_comptasks / ios->num_iotasks; key = max(1, ios->comp_rank % taskratio + 1); color = min(ios->num_iotasks - 1, ios->comp_rank / taskratio); } + LOG((3, "key = %d color = %d", key, color)); + /* Create new communicators. */ if ((mpierr = MPI_Comm_split(ios->comp_comm, color, key, &iodesc->subset_comm))) return check_mpi(NULL, mpierr, __FILE__, __LINE__); @@ -1404,34 +1533,72 @@ int default_subset_partition(iosystem_desc_t *ios, io_desc_t *iodesc) } /** + * Create the subset rearranger. + * * The subset rearranger computes a mapping between IO tasks and * compute tasks such that each compute task communicates with one and * only one IO task. * + * This function is called from PIOc_InitDecomp(). + * + * This function: + *
    + *
  • Calls default_subset_partition() to create subset_comm. + *
  • For IO tasks, allocates iodesc->rcount array (length ntasks). + *
  • Allocates iodesc->scount array (length 1) + *
  • Determins value of iodesc->scount[0], the number of data + * elements on this compute task which are read/written. + *
  • Allocated and inits iodesc->sindex (length iodesc->scount[0]), + * init it to contain indicies to data. + *
  • Pass the reduced maplen (without holes) from each compute task + * to its associated IO task. + *
  • On IO tasks, determine llen. + *
  • Determine whether fill values will be needed. + *
  • Pass iodesc->sindex from each compute task to its associated IO + * task. + *
  • Create shrtmap, which is compmap without the holes. + *
  • Gather shrtmaps from each task into iomap. + *
  • On IO tasks, sort the mapping, this will transpose the data + * into IO order. + *
  • On IO tasks, allocate and init iodesc->rindex and iodesc->rfrom + * (length iodesc->llen). + *
  • On IO tasks, handle fill values, if needed. + *
  • On IO tasks, scatter values of srcindex to subset communicator. + *
  • On IO tasks, call get_regions() and distribute the max + * maxregions to all tasks in IO communicator. + *
  • On IO tasks, call compute_maxIObuffersize(). + *
  • Call compute_maxaggregate_bytes(). + *
+ * * @param ios pointer to the iosystem_desc_t struct. * @param maplen the length of the map. - * @param compmap - * @param gsize pointer to an array of sizes. + * @param compmap a 1 based array of offsets into the array record on + * file. A 0 in this array indicates a value which should not be + * transfered. + * @param gdimlen an array length ndims with the sizes of the global + * dimensions. * @param ndims the number of dimensions. * @param iodesc a pointer to the io_desc_t struct. * @returns 0 on success, error code otherwise. */ int subset_rearrange_create(iosystem_desc_t *ios, int maplen, PIO_Offset *compmap, - const int *gsize, int ndims, io_desc_t *iodesc) + const int *gdimlen, int ndims, io_desc_t *iodesc) { int i, j; PIO_Offset *iomap = NULL; - int ierr = PIO_NOERR; mapsort *map = NULL; PIO_Offset totalgridsize; PIO_Offset *srcindex = NULL; PIO_Offset *myfillgrid = NULL; int maxregions; - int rank, ntasks, rcnt; + int rank, ntasks; + int rcnt = 0; int mpierr; /* Return call from MPI function calls. */ int ret; - pioassert(iodesc, "iodesc must be provided", __FILE__, __LINE__); + /* Check inputs. */ + pioassert(ios && maplen >= 0 && compmap && gdimlen && ndims >= 0 && iodesc, + "invalid input", __FILE__, __LINE__); LOG((2, "subset_rearrange_create maplen = %d ndims = %d", maplen, ndims)); @@ -1442,7 +1609,7 @@ int subset_rearrange_create(iosystem_desc_t *ios, int maplen, PIO_Offset *compma return pio_err(ios, NULL, ret, __FILE__, __LINE__); iodesc->rearranger = PIO_REARR_SUBSET; - /* Get size of this subset communicator, and rank in it. */ + /* Get size of this subset communicator and rank of this task in it. */ if ((mpierr = MPI_Comm_rank(iodesc->subset_comm, &rank))) return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); if ((mpierr = MPI_Comm_size(iodesc->subset_comm, &ntasks))) @@ -1455,8 +1622,9 @@ int subset_rearrange_create(iosystem_desc_t *ios, int maplen, PIO_Offset *compma pioassert(rank > 0 && rank < ntasks, "Bad comp rank in subset create", __FILE__, __LINE__); - rcnt = 0; + /* Remember the maplen for this computation task. */ iodesc->ndof = maplen; + if (ios->ioproc) { /* Allocate space to hold count of data to be received in pio_swapm(). */ @@ -1471,10 +1639,15 @@ int subset_rearrange_create(iosystem_desc_t *ios, int maplen, PIO_Offset *compma return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); iodesc->scount[0] = 0; + + /* Find the total size of the global data array. */ totalgridsize = 1; for (i = 0; i < ndims; i++) - totalgridsize *= gsize[i]; + totalgridsize *= gdimlen[i]; + /* Determine scount[0], the number of data elements in the + * computation task that are to be written, by looking at + * compmap. */ for (i = 0; i < maplen; i++) { /* turns out this can be allowed in some cases @@ -1484,6 +1657,8 @@ int subset_rearrange_create(iosystem_desc_t *ios, int maplen, PIO_Offset *compma (iodesc->scount[0])++; } + /* Allocate an array for indicies on the computation tasks (the + * send side when writing). */ if (iodesc->scount[0] > 0) if (!(iodesc->sindex = calloc(iodesc->scount[0], sizeof(PIO_Offset)))) return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); @@ -1493,24 +1668,25 @@ int subset_rearrange_create(iosystem_desc_t *ios, int maplen, PIO_Offset *compma if (compmap[i] > 0) iodesc->sindex[j++] = i; - /* Pass the reduced maplen (without holes) from each compute task to its associated IO task - printf("%s %d %ld\n",__FILE__,__LINE__,iodesc->scount); */ - if ((mpierr = MPI_Gather(iodesc->scount, 1, MPI_INT, iodesc->rcount, rcnt, MPI_INT, - 0, iodesc->subset_comm))) + /* Pass the reduced maplen (without holes) from each compute task + * to its associated IO task. */ + if ((mpierr = MPI_Gather(iodesc->scount, 1, MPI_INT, iodesc->rcount, rcnt, + MPI_INT, 0, iodesc->subset_comm))) return check_mpi(NULL, mpierr, __FILE__, __LINE__); iodesc->llen = 0; int rdispls[ntasks]; - int recvlths[ntasks]; + int recvcounts[ntasks]; + /* On IO tasks determine llen. */ if (ios->ioproc) { - for (i = 0;i < ntasks; i++) + for (i = 0; i < ntasks; i++) { iodesc->llen += iodesc->rcount[i]; rdispls[i] = 0; - recvlths[i] = iodesc->rcount[i]; + recvcounts[i] = iodesc->rcount[i]; if (i > 0) rdispls[i] = rdispls[i - 1] + iodesc->rcount[i - 1]; } @@ -1528,20 +1704,24 @@ int subset_rearrange_create(iosystem_desc_t *ios, int maplen, PIO_Offset *compma { for (i = 0; i < ntasks; i++) { - recvlths[i] = 0; + recvcounts[i] = 0; rdispls[i] = 0; } } - if ((ret = determine_fill(ios, iodesc, gsize, compmap))) + + /* Determine whether fill values will be needed. */ + if ((ret = determine_fill(ios, iodesc, gdimlen, compmap))) return pio_err(ios, NULL, ret, __FILE__, __LINE__); /* Pass the sindex from each compute task to its associated IO task. */ if ((mpierr = MPI_Gatherv(iodesc->sindex, iodesc->scount[0], PIO_OFFSET, - srcindex, recvlths, rdispls, PIO_OFFSET, 0, + srcindex, recvcounts, rdispls, PIO_OFFSET, 0, iodesc->subset_comm))) return check_mpi(NULL, mpierr, __FILE__, __LINE__); - if (ios->ioproc && iodesc->llen>0) + /* On IO tasks which need it, allocate memory for the map and the + * iomap. */ + if (ios->ioproc && iodesc->llen > 0) { if (!(map = calloc(iodesc->llen, sizeof(mapsort)))) return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); @@ -1552,13 +1732,13 @@ int subset_rearrange_create(iosystem_desc_t *ios, int maplen, PIO_Offset *compma /* Now pass the compmap, skipping the holes. */ PIO_Offset *shrtmap; - if (maplen>iodesc->scount[0] && iodesc->scount[0] > 0) + if (maplen > iodesc->scount[0] && iodesc->scount[0] > 0) { if (!(shrtmap = calloc(iodesc->scount[0], sizeof(PIO_Offset)))) return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); j = 0; - for (i = 0; i < maplen; i++) + for (int i = 0; i < maplen; i++) if (compmap[i] > 0) shrtmap[j++] = compmap[i]; } @@ -1567,13 +1747,16 @@ int subset_rearrange_create(iosystem_desc_t *ios, int maplen, PIO_Offset *compma shrtmap = compmap; } - if ((mpierr = MPI_Gatherv(shrtmap, iodesc->scount[0], PIO_OFFSET, iomap, recvlths, rdispls, - PIO_OFFSET, 0, iodesc->subset_comm))) + /* Gather shrtmap from each task in the subset communicator, and + * put gathered results into iomap. */ + if ((mpierr = MPI_Gatherv(shrtmap, iodesc->scount[0], PIO_OFFSET, iomap, recvcounts, + rdispls, PIO_OFFSET, 0, iodesc->subset_comm))) return check_mpi(NULL, mpierr, __FILE__, __LINE__); if (shrtmap != compmap) free(shrtmap); + /* On IO tasks that have data in the local array ??? */ if (ios->ioproc && iodesc->llen > 0) { int pos = 0; @@ -1583,7 +1766,7 @@ int subset_rearrange_create(iosystem_desc_t *ios, int maplen, PIO_Offset *compma { for (j = 0; j < iodesc->rcount[i]; j++) { - mptr = map + k; + mptr = &map[k]; mptr->rfrom = i; mptr->soffset = srcindex[pos + j]; mptr->iomap = iomap[pos + j]; @@ -1591,6 +1774,7 @@ int subset_rearrange_create(iosystem_desc_t *ios, int maplen, PIO_Offset *compma } pos += iodesc->rcount[i]; } + /* sort the mapping, this will transpose the data into IO order */ qsort(map, iodesc->llen, sizeof(mapsort), compare_offsets); @@ -1605,21 +1789,20 @@ int subset_rearrange_create(iosystem_desc_t *ios, int maplen, PIO_Offset *compma for (i = 0; i < ntasks; i++) { cnt[i] = rdispls[i]; - - /* offsets to swapm are in bytes */ - /* rdispls[i]*=pio_offset_size; */ } - mapsort *mptr; + /* For IO tasks init rfrom and rindex arrays (compute tasks have + * llen of 0). */ for (i = 0; i < iodesc->llen; i++) { - mptr = map+i; + mapsort *mptr = &map[i]; iodesc->rfrom[i] = mptr->rfrom; iodesc->rindex[i] = i; iomap[i] = mptr->iomap; srcindex[(cnt[iodesc->rfrom[i]])++] = mptr->soffset; } + /* Handle fill values if needed. */ if (ios->ioproc && iodesc->needsfill) { /* we need the list of offsets which are not in the union of iomap */ @@ -1657,6 +1840,7 @@ int subset_rearrange_create(iosystem_desc_t *ios, int maplen, PIO_Offset *compma } } + /* Gather cnt from all tasks in the IO communicator into array gcnt. */ if ((mpierr = MPI_Gather(&cnt, 1, MPI_INT, gcnt, 1, MPI_INT, nio, ios->io_comm))) return check_mpi(NULL, mpierr, __FILE__, __LINE__); @@ -1675,11 +1859,12 @@ int subset_rearrange_create(iosystem_desc_t *ios, int maplen, PIO_Offset *compma myusegrid[i] = -1; } - if ((mpierr = MPI_Gatherv((iomap + imin), cnt, PIO_OFFSET, myusegrid, gcnt, + if ((mpierr = MPI_Gatherv(&iomap[imin], cnt, PIO_OFFSET, myusegrid, gcnt, displs, PIO_OFFSET, nio, ios->io_comm))) return check_mpi(NULL, mpierr, __FILE__, __LINE__); } + /* Allocate and initialize a grid to fill in missing values. ??? */ PIO_Offset grid[thisgridsize[ios->io_rank]]; for (i = 0; i < thisgridsize[ios->io_rank]; i++) grid[i] = 0; @@ -1688,7 +1873,8 @@ int subset_rearrange_create(iosystem_desc_t *ios, int maplen, PIO_Offset *compma for (i = 0; i < thisgridsize[ios->io_rank]; i++) { int j = myusegrid[i] - thisgridmin[ios->io_rank]; - pioassert(j < thisgridsize[ios->io_rank], "out of bounds array index", __FILE__, __LINE__); + pioassert(j < thisgridsize[ios->io_rank], "out of bounds array index", + __FILE__, __LINE__); if (j >= 0) { grid[j] = 1; @@ -1698,7 +1884,7 @@ int subset_rearrange_create(iosystem_desc_t *ios, int maplen, PIO_Offset *compma if (myusegrid) free(myusegrid); - iodesc->holegridsize=thisgridsize[ios->io_rank] - cnt; + iodesc->holegridsize = thisgridsize[ios->io_rank] - cnt; if (iodesc->holegridsize > 0) { /* Allocate space for the fillgrid. */ @@ -1716,7 +1902,7 @@ int subset_rearrange_create(iosystem_desc_t *ios, int maplen, PIO_Offset *compma if (grid[i] == 0) { if (myfillgrid[j] == -1) - myfillgrid[j++]=thisgridmin[ios->io_rank] + i; + myfillgrid[j++] = thisgridmin[ios->io_rank] + i; else return pio_err(ios, NULL, PIO_EINVAL, __FILE__, __LINE__); } @@ -1725,34 +1911,52 @@ int subset_rearrange_create(iosystem_desc_t *ios, int maplen, PIO_Offset *compma iodesc->maxfillregions = 0; if (myfillgrid) { - iodesc->fillregion = alloc_region(iodesc->ndims); - get_start_and_count_regions(iodesc->ndims, gsize, iodesc->holegridsize, myfillgrid, - &iodesc->maxfillregions, iodesc->fillregion); + /* Allocate a data region to hold fill values. */ + if ((ret = alloc_region2(ios, iodesc->ndims, &iodesc->fillregion))) + return pio_err(ios, NULL, ret, __FILE__, __LINE__); + if ((ret = get_regions(iodesc->ndims, gdimlen, iodesc->holegridsize, myfillgrid, + &iodesc->maxfillregions, iodesc->fillregion))) + return pio_err(ios, NULL, ret, __FILE__, __LINE__); free(myfillgrid); maxregions = iodesc->maxfillregions; } - if ((mpierr = MPI_Allreduce(MPI_IN_PLACE, &maxregions, 1, MPI_INT, MPI_MAX, ios->io_comm))) - return check_mpi(NULL, mpierr, __FILE__, __LINE__); + /* Get the max maxregions, and distribute it to all tasks in + * the IO communicator. */ + if ((mpierr = MPI_Allreduce(MPI_IN_PLACE, &maxregions, 1, MPI_INT, MPI_MAX, + ios->io_comm))) + return check_mpi(NULL, mpierr, __FILE__, __LINE__); iodesc->maxfillregions = maxregions; + /* Get the max maxholegridsize, and distribute it to all tasks + * in the IO communicator. */ iodesc->maxholegridsize = iodesc->holegridsize; - if ((mpierr = MPI_Allreduce(MPI_IN_PLACE, &(iodesc->maxholegridsize), 1, MPI_INT, MPI_MAX, ios->io_comm))) + if ((mpierr = MPI_Allreduce(MPI_IN_PLACE, &(iodesc->maxholegridsize), 1, MPI_INT, + MPI_MAX, ios->io_comm))) return check_mpi(NULL, mpierr, __FILE__, __LINE__); } - if ((mpierr = MPI_Scatterv((void *)srcindex, recvlths, rdispls, PIO_OFFSET, (void *)iodesc->sindex, - iodesc->scount[0], PIO_OFFSET, 0, iodesc->subset_comm))) + /* Scatter values of srcindex to subset communicator. ??? */ + if ((mpierr = MPI_Scatterv((void *)srcindex, recvcounts, rdispls, PIO_OFFSET, + (void *)iodesc->sindex, iodesc->scount[0], PIO_OFFSET, + 0, iodesc->subset_comm))) return check_mpi(NULL, mpierr, __FILE__, __LINE__); + if (ios->ioproc) { iodesc->maxregions = 0; - get_start_and_count_regions(iodesc->ndims,gsize,iodesc->llen, iomap,&(iodesc->maxregions), - iodesc->firstregion); + if ((ret = get_regions(iodesc->ndims, gdimlen, iodesc->llen, iomap, + &iodesc->maxregions, iodesc->firstregion))) + return pio_err(ios, NULL, ret, __FILE__, __LINE__); maxregions = iodesc->maxregions; + + /* Get the max maxregions, and distribute it to all tasks in + * the IO communicator. */ if ((mpierr = MPI_Allreduce(MPI_IN_PLACE, &maxregions, 1, MPI_INT, MPI_MAX, ios->io_comm))) return check_mpi(NULL, mpierr, __FILE__, __LINE__); iodesc->maxregions = maxregions; + + /* Free resources. */ if (iomap) free(iomap); @@ -1762,12 +1966,11 @@ int subset_rearrange_create(iosystem_desc_t *ios, int maplen, PIO_Offset *compma if (srcindex) free(srcindex); - compute_maxIObuffersize(ios->io_comm, iodesc); + /* Compute the max io buffer size needed for an iodesc. */ + if ((ret = compute_maxIObuffersize(ios->io_comm, iodesc))) + return pio_err(ios, NULL, ret, __FILE__, __LINE__); iodesc->nrecvs = ntasks; -#ifdef DEBUG - iodesc_dump(iodesc); -#endif } /* Using maxiobuflen compute the maximum number of vars of this type that the io @@ -1775,7 +1978,7 @@ int subset_rearrange_create(iosystem_desc_t *ios, int maplen, PIO_Offset *compma if ((ret = compute_maxaggregate_bytes(ios, iodesc))) return pio_err(ios, NULL, ret, __FILE__, __LINE__); - return ierr; + return PIO_NOERR; } /** @@ -1803,11 +2006,11 @@ void performance_tune_rearranger(iosystem_desc_t *ios, io_desc_t *iodesc) ibuf = NULL; if (iodesc->ndof > 0) if (!(cbuf = bget(iodesc->ndof * tsize))) - piomemerror(ios, iodesc->ndof * tsize, __FILE__, __LINE__); + return pio_err(ios, file, PIO_ENOMEM, __FILE__, __LINE__); if (iodesc->llen > 0) if (!(ibuf = bget(iodesc->llen * tsize))) - piomemerror(ios, iodesc->llen * tsize, __FILE__, __LINE__); + return pio_err(ios, file, PIO_ENOMEM, __FILE__, __LINE__); if (iodesc->rearranger == PIO_REARR_BOX) mycomm = ios->union_comm; @@ -1821,7 +2024,7 @@ void performance_tune_rearranger(iosystem_desc_t *ios, io_desc_t *iodesc) int log2 = log(nprocs) / log(2) + 1; if (!(wall = bget(2 * 4 * log2 * sizeof(double)))) - piomemerror(ios, 2 * 4 *log2 * sizeof(double), __FILE__, __LINE__); + return pio_err(ios, file, PIO_ENOMEM, __FILE__, __LINE__); double mintime; int k = 0; @@ -1835,16 +2038,16 @@ void performance_tune_rearranger(iosystem_desc_t *ios, io_desc_t *iodesc) if ((mpierr = MPI_Allreduce(MPI_IN_PLACE, &mintime, 1, MPI_DOUBLE, MPI_MAX, mycomm))) return check_mpi(NULL, mpierr, __FILE__, __LINE__); - handshake = iodesc->rearr_opts.comm_fc_opts_comp2io.enable_hs; + handshake = iodesc->rearr_opts.comp2io.hs; isend = iodesc->isend; maxreqs = iodesc->max_requests; for (int i = 0; i < 2; i++) { if (i == 0) - iodesc->rearr_opts.comm_fc_opts_comp2io.enable_hs = false; + iodesc->rearr_opts.comp2io.hs = false; else - iodesc->rearr_opts.comm_fc_opts_comp2io.enable_hs = true; + iodesc->rearr_opts.comp2io.hs = true; for (int j = 0; j < 2; j++) { @@ -1871,7 +2074,7 @@ void performance_tune_rearranger(iosystem_desc_t *ios, io_desc_t *iodesc) if (wall[1] < mintime * 0.95) { - handshake = iodesc->rearr_opts.comm_fc_opts_comp2io.enable_hs; + handshake = iodesc->rearr_opts.comp2io.hs; isend = iodesc->isend; maxreqs = nreqs; mintime = wall[1]; @@ -1884,7 +2087,7 @@ void performance_tune_rearranger(iosystem_desc_t *ios, io_desc_t *iodesc) } } - iodesc->rearr_opts.comm_fc_opts_comp2io.enable_hs = handshake; + iodesc->rearr_opts.comp2io.hs = handshake; iodesc->isend = isend; iodesc->max_requests = maxreqs; diff --git a/src/externals/pio2/src/clib/pio_spmd.c b/src/externals/pio2/src/clib/pio_spmd.c index decf3f7e3bb1..c42fd60eff90 100644 --- a/src/externals/pio2/src/clib/pio_spmd.c +++ b/src/externals/pio2/src/clib/pio_spmd.c @@ -67,19 +67,12 @@ int pair(int np, int p, int k) * @param recvtypes array of datatypes (of length ntasks). Entry i * specifies the type of data received from process i. * @param comm MPI communicator for the MPI_Alltoallw call. - * @param handshake if true, use handshaking. - * @param isend the isend bool indicates whether sends should be - * posted using mpi_irsend which can be faster than blocking - * sends. When flow control is used max_requests > 0 and the number of - * irecvs posted from a given task will not exceed this value. On some - * networks too many outstanding irecvs will cause a communications - * bottleneck. - * @param max_requests If 0, no flow control is used. + * @param fc pointer to the struct that provided flow control options. * @returns 0 for success, error code otherwise. */ int pio_swapm(void *sendbuf, int *sendcounts, int *sdispls, MPI_Datatype *sendtypes, void *recvbuf, int *recvcounts, int *rdispls, MPI_Datatype *recvtypes, - MPI_Comm comm, bool handshake, bool isend, int max_requests) + MPI_Comm comm, rearr_comm_fc_opt_t *fc) { int ntasks; /* Number of tasks in communicator comm. */ int my_rank; /* Rank of this task in comm. */ @@ -96,8 +89,8 @@ int pio_swapm(void *sendbuf, int *sendcounts, int *sdispls, MPI_Datatype *sendty MPI_Status status; /* Not actually used - replace with MPI_STATUSES_IGNORE. */ int mpierr; /* Return code from MPI functions. */ - LOG((2, "pio_swapm handshake = %d isend = %d max_requests = %d", handshake, - isend, max_requests)); + LOG((2, "pio_swapm fc->hs = %d fc->isend = %d fc->max_pend_req = %d", fc->hs, + fc->isend, fc->max_pend_req)); /* Get my rank and size of communicator. */ if ((mpierr = MPI_Comm_size(comm, &ntasks))) @@ -117,23 +110,15 @@ int pio_swapm(void *sendbuf, int *sendcounts, int *sdispls, MPI_Datatype *sendty #if PIO_ENABLE_LOGGING { for (int p = 0; p < ntasks; p++) - LOG((3, "sendcounts[%d] = %d", p, sendcounts[p])); - for (int p = 0; p < ntasks; p++) - LOG((3, "sdispls[%d] = %d", p, sdispls[p])); - for (int p = 0; p < ntasks; p++) - LOG((3, "sendtypes[%d] = %d", p, sendtypes[p])); - for (int p = 0; p < ntasks; p++) - LOG((3, "recvcounts[%d] = %d", p, recvcounts[p])); - for (int p = 0; p < ntasks; p++) - LOG((3, "rdispls[%d] = %d", p, rdispls[p])); - for (int p = 0; p < ntasks; p++) - LOG((3, "recvtypes[%d] = %d", p, recvtypes[p])); + LOG((3, "sendcounts[%d] = %d sdispls[%d] = %d sendtypes[%d] = %d recvcounts[%d] = %d " + "rdispls[%d] = %d recvtypes[%d] = %d", p, sendcounts[p], p, sdispls[p], p, + sendtypes[p], p, recvcounts[p], p, rdispls[p], p, recvtypes[p])); } #endif /* PIO_ENABLE_LOGGING */ - /* If max_requests == 0 no throttling is requested and the default + /* If fc->max_pend_req == 0 no throttling is requested and the default * mpi_alltoallw function is used. */ - if (max_requests == 0) + if (fc->max_pend_req == 0) { /* Call the MPI alltoall without flow control. */ LOG((3, "Calling MPI_Alltoallw without flow control.")); @@ -194,11 +179,11 @@ int pio_swapm(void *sendbuf, int *sendcounts, int *sdispls, MPI_Datatype *sendty swapids[i] = 0; } - if (isend) + if (fc->isend) for (int i = 0; i < ntasks; i++) sndids[i] = MPI_REQUEST_NULL; - if (handshake) + if (fc->hs) for (int i = 0; i < ntasks; i++) hs_rcvids[i] = MPI_REQUEST_NULL; @@ -222,17 +207,17 @@ int pio_swapm(void *sendbuf, int *sendcounts, int *sdispls, MPI_Datatype *sendty } else { - if (max_requests == PIO_REARR_COMM_UNLIMITED_PEND_REQ) + if (fc->max_pend_req == PIO_REARR_COMM_UNLIMITED_PEND_REQ) { maxreq = steps; maxreqh = steps; } - else if (max_requests > 1 && max_requests < steps) + else if (fc->max_pend_req > 1 && fc->max_pend_req < steps) { - maxreq = max_requests; + maxreq = fc->max_pend_req; maxreqh = maxreq / 2; } - else if (max_requests == 1) + else if (fc->max_pend_req == 1) { /* Note that steps >= 2 here */ maxreq = 2; @@ -245,11 +230,11 @@ int pio_swapm(void *sendbuf, int *sendcounts, int *sdispls, MPI_Datatype *sendty } } - LOG((2, "max_requests=%d, maxreq=%d, maxreqh=%d", max_requests, maxreq, maxreqh)); + LOG((2, "fc->max_pend_req=%d, maxreq=%d, maxreqh=%d", fc->max_pend_req, maxreq, maxreqh)); /* If handshaking is in use, do a nonblocking recieve to listen * for it. */ - if (handshake) + if (fc->hs) { for (istep = 0; istep < maxreq; istep++) { @@ -276,7 +261,7 @@ int pio_swapm(void *sendbuf, int *sendcounts, int *sdispls, MPI_Datatype *sendty rcvids + istep))) return check_mpi(NULL, mpierr, __FILE__, __LINE__); - if (handshake) + if (fc->hs) if ((mpierr = MPI_Send(&hs, 1, MPI_INT, p, tag, comm))) return check_mpi(NULL, mpierr, __FILE__, __LINE__); } @@ -292,7 +277,7 @@ int pio_swapm(void *sendbuf, int *sendcounts, int *sdispls, MPI_Datatype *sendty tag = my_rank + offset_t; /* If handshake is enabled don't post sends until the * receiving task has posted recvs. */ - if (handshake) + if (fc->hs) { if ((mpierr = MPI_Wait(hs_rcvids + istep, &status))) return check_mpi(NULL, mpierr, __FILE__, __LINE__); @@ -308,7 +293,7 @@ int pio_swapm(void *sendbuf, int *sendcounts, int *sdispls, MPI_Datatype *sendty * used to choose between mpi_irsends and mpi_isends - the default * is still mpi_irsend */ - if (handshake && isend) + if (fc->hs && fc->isend) { #ifdef USE_MPI_ISEND_FOR_FC if ((mpierr = MPI_Isend(ptr, sendcounts[p], sendtypes[p], p, tag, comm, @@ -320,7 +305,7 @@ int pio_swapm(void *sendbuf, int *sendcounts, int *sdispls, MPI_Datatype *sendty return check_mpi(NULL, mpierr, __FILE__, __LINE__); #endif } - else if (isend) + else if (fc->isend) { if ((mpierr = MPI_Isend(ptr, sendcounts[p], sendtypes[p], p, tag, comm, sndids + istep))) @@ -347,7 +332,7 @@ int pio_swapm(void *sendbuf, int *sendcounts, int *sdispls, MPI_Datatype *sendty if (rstep < steps) { p = swapids[rstep]; - if (handshake && sendcounts[p] > 0) + if (fc->hs && sendcounts[p] > 0) { tag = my_rank + offset_t; if ((mpierr = MPI_Irecv(&hs, 1, MPI_INT, p, tag, comm, hs_rcvids+rstep))) @@ -360,7 +345,7 @@ int pio_swapm(void *sendbuf, int *sendcounts, int *sdispls, MPI_Datatype *sendty ptr = (char *)recvbuf + rdispls[p]; if ((mpierr = MPI_Irecv(ptr, recvcounts[p], recvtypes[p], p, tag, comm, rcvids + rstep))) return check_mpi(NULL, mpierr, __FILE__, __LINE__); - if (handshake) + if (fc->hs) if ((mpierr = MPI_Send(&hs, 1, MPI_INT, p, tag, comm))) return check_mpi(NULL, mpierr, __FILE__, __LINE__); } @@ -376,7 +361,7 @@ int pio_swapm(void *sendbuf, int *sendcounts, int *sdispls, MPI_Datatype *sendty LOG((2, "Waiting for outstanding msgs")); if ((mpierr = MPI_Waitall(steps, rcvids, MPI_STATUSES_IGNORE))) return check_mpi(NULL, mpierr, __FILE__, __LINE__); - if (isend) + if (fc->isend) if ((mpierr = MPI_Waitall(steps, sndids, MPI_STATUSES_IGNORE))) return check_mpi(NULL, mpierr, __FILE__, __LINE__); } diff --git a/src/externals/pio2/src/clib/pio_varm.c b/src/externals/pio2/src/clib/pio_varm.c index 18d55c5aea88..f02cb196c911 100644 --- a/src/externals/pio2/src/clib/pio_varm.c +++ b/src/externals/pio2/src/clib/pio_varm.c @@ -29,7 +29,7 @@ int PIOc_put_varm (int ncid, int varid, const PIO_Offset start[], const PIO_Offs ios = file->iosystem; /* Sorry, but varm functions are not supported by the async interface. */ - if (ios->async_interface) + if (ios->async) return PIO_EINVAL; if (ios->ioproc){ @@ -98,7 +98,7 @@ int PIOc_put_varm_uchar (int ncid, int varid, const PIO_Offset start[], const PI ios = file->iosystem; /* Sorry, but varm functions are not supported by the async interface. */ - if (ios->async_interface) + if (ios->async) return PIO_EINVAL; if (ios->ioproc){ @@ -167,7 +167,7 @@ int PIOc_put_varm_short (int ncid, int varid, const PIO_Offset start[], const PI ios = file->iosystem; /* Sorry, but varm functions are not supported by the async interface. */ - if (ios->async_interface) + if (ios->async) return PIO_EINVAL; if (ios->ioproc){ @@ -235,7 +235,7 @@ int PIOc_put_varm_text (int ncid, int varid, const PIO_Offset start[], const PIO ios = file->iosystem; /* Sorry, but varm functions are not supported by the async interface. */ - if (ios->async_interface) + if (ios->async) return PIO_EINVAL; @@ -306,7 +306,7 @@ int PIOc_put_varm_ushort (int ncid, int varid, const PIO_Offset start[], const P ios = file->iosystem; /* Sorry, but varm functions are not supported by the async interface. */ - if (ios->async_interface) + if (ios->async) return PIO_EINVAL; @@ -377,7 +377,7 @@ int PIOc_put_varm_ulonglong (int ncid, int varid, const PIO_Offset start[], cons ios = file->iosystem; /* Sorry, but varm functions are not supported by the async interface. */ - if (ios->async_interface) + if (ios->async) return PIO_EINVAL; @@ -447,7 +447,7 @@ int PIOc_put_varm_int (int ncid, int varid, const PIO_Offset start[], const PIO_ ios = file->iosystem; /* Sorry, but varm functions are not supported by the async interface. */ - if (ios->async_interface) + if (ios->async) return PIO_EINVAL; @@ -518,7 +518,7 @@ int PIOc_put_varm_float (int ncid, int varid, const PIO_Offset start[], const PI ios = file->iosystem; /* Sorry, but varm functions are not supported by the async interface. */ - if (ios->async_interface) + if (ios->async) return PIO_EINVAL; @@ -588,7 +588,7 @@ int PIOc_put_varm_long (int ncid, int varid, const PIO_Offset start[], const PIO ios = file->iosystem; /* Sorry, but varm functions are not supported by the async interface. */ - if (ios->async_interface) + if (ios->async) return PIO_EINVAL; @@ -660,7 +660,7 @@ int PIOc_put_varm_uint (int ncid, int varid, const PIO_Offset start[], const PIO /* Sorry, but varm functions are not supported by the async interface. */ - if (ios->async_interface) + if (ios->async) return PIO_EINVAL; @@ -731,7 +731,7 @@ int PIOc_put_varm_double (int ncid, int varid, const PIO_Offset start[], const P ios = file->iosystem; /* Sorry, but varm functions are not supported by the async interface. */ - if (ios->async_interface) + if (ios->async) return PIO_EINVAL; @@ -801,7 +801,7 @@ int PIOc_put_varm_schar (int ncid, int varid, const PIO_Offset start[], const PI ios = file->iosystem; /* Sorry, but varm functions are not supported by the async interface. */ - if (ios->async_interface) + if (ios->async) return PIO_EINVAL; @@ -872,7 +872,7 @@ int PIOc_put_varm_longlong (int ncid, int varid, const PIO_Offset start[], const ios = file->iosystem; /* Sorry, but varm functions are not supported by the async interface. */ - if (ios->async_interface) + if (ios->async) return PIO_EINVAL; if (ios->ioproc){ @@ -941,7 +941,7 @@ int PIOc_get_varm_uchar (int ncid, int varid, const PIO_Offset start[], const PI ierr = PIO_NOERR; /* Sorry, but varm functions are not supported by the async interface. */ - if (ios->async_interface) + if (ios->async) return PIO_EINVAL; if (ios->ioproc){ @@ -979,7 +979,7 @@ int PIOc_get_varm_uchar (int ncid, int varid, const PIO_Offset start[], const PI ierr = check_netcdf(file, ierr, __FILE__,__LINE__); - if (ios->async_interface || bcast || + if (ios->async || bcast || (ios->num_iotasks < ios->num_comptasks)){ MPI_Bcast(buf, ibufcnt, ibuftype, ios->ioroot, ios->my_comm); } @@ -1010,7 +1010,7 @@ int PIOc_get_varm_schar (int ncid, int varid, const PIO_Offset start[], const PI ierr = PIO_NOERR; /* Sorry, but varm functions are not supported by the async interface. */ - if (ios->async_interface) + if (ios->async) return PIO_EINVAL; if (ios->ioproc){ @@ -1048,7 +1048,7 @@ int PIOc_get_varm_schar (int ncid, int varid, const PIO_Offset start[], const PI ierr = check_netcdf(file, ierr, __FILE__,__LINE__); - if (ios->async_interface || bcast || + if (ios->async || bcast || (ios->num_iotasks < ios->num_comptasks)){ MPI_Bcast(buf, ibufcnt, ibuftype, ios->ioroot, ios->my_comm); } @@ -1079,7 +1079,7 @@ int PIOc_get_varm_double (int ncid, int varid, const PIO_Offset start[], const P ierr = PIO_NOERR; /* Sorry, but varm functions are not supported by the async interface. */ - if (ios->async_interface) + if (ios->async) return PIO_EINVAL; if (ios->ioproc){ @@ -1117,7 +1117,7 @@ int PIOc_get_varm_double (int ncid, int varid, const PIO_Offset start[], const P ierr = check_netcdf(file, ierr, __FILE__,__LINE__); - if (ios->async_interface || bcast || + if (ios->async || bcast || (ios->num_iotasks < ios->num_comptasks)){ MPI_Bcast(buf, ibufcnt, ibuftype, ios->ioroot, ios->my_comm); } @@ -1148,7 +1148,7 @@ int PIOc_get_varm_text (int ncid, int varid, const PIO_Offset start[], const PIO ierr = PIO_NOERR; /* Sorry, but varm functions are not supported by the async interface. */ - if (ios->async_interface) + if (ios->async) return PIO_EINVAL; if (ios->ioproc){ @@ -1186,7 +1186,7 @@ int PIOc_get_varm_text (int ncid, int varid, const PIO_Offset start[], const PIO ierr = check_netcdf(file, ierr, __FILE__,__LINE__); - if (ios->async_interface || bcast || + if (ios->async || bcast || (ios->num_iotasks < ios->num_comptasks)){ MPI_Bcast(buf, ibufcnt, ibuftype, ios->ioroot, ios->my_comm); } @@ -1217,7 +1217,7 @@ int PIOc_get_varm_int (int ncid, int varid, const PIO_Offset start[], const PIO_ ierr = PIO_NOERR; /* Sorry, but varm functions are not supported by the async interface. */ - if (ios->async_interface) + if (ios->async) return PIO_EINVAL; if (ios->ioproc){ @@ -1255,7 +1255,7 @@ int PIOc_get_varm_int (int ncid, int varid, const PIO_Offset start[], const PIO_ ierr = check_netcdf(file, ierr, __FILE__,__LINE__); - if (ios->async_interface || bcast || + if (ios->async || bcast || (ios->num_iotasks < ios->num_comptasks)){ MPI_Bcast(buf, ibufcnt, ibuftype, ios->ioroot, ios->my_comm); } @@ -1286,7 +1286,7 @@ int PIOc_get_varm_uint (int ncid, int varid, const PIO_Offset start[], const PIO ierr = PIO_NOERR; /* Sorry, but varm functions are not supported by the async interface. */ - if (ios->async_interface) + if (ios->async) return PIO_EINVAL; if (ios->ioproc){ @@ -1324,7 +1324,7 @@ int PIOc_get_varm_uint (int ncid, int varid, const PIO_Offset start[], const PIO ierr = check_netcdf(file, ierr, __FILE__,__LINE__); - if (ios->async_interface || bcast || + if (ios->async || bcast || (ios->num_iotasks < ios->num_comptasks)){ MPI_Bcast(buf, ibufcnt, ibuftype, ios->ioroot, ios->my_comm); } @@ -1350,7 +1350,7 @@ int PIOc_get_varm (int ncid, int varid, const PIO_Offset start[], const PIO_Offs ierr = PIO_NOERR; /* Sorry, but varm functions are not supported by the async interface. */ - if (ios->async_interface) + if (ios->async) return PIO_EINVAL; if (ios->ioproc){ @@ -1388,7 +1388,7 @@ int PIOc_get_varm (int ncid, int varid, const PIO_Offset start[], const PIO_Offs ierr = check_netcdf(file, ierr, __FILE__,__LINE__); - if (ios->async_interface || bcast || + if (ios->async || bcast || (ios->num_iotasks < ios->num_comptasks)){ MPI_Bcast(buf, ibufcnt, ibuftype, ios->ioroot, ios->my_comm); } @@ -1419,7 +1419,7 @@ int PIOc_get_varm_float (int ncid, int varid, const PIO_Offset start[], const PI ierr = PIO_NOERR; /* Sorry, but varm functions are not supported by the async interface. */ - if (ios->async_interface) + if (ios->async) return PIO_EINVAL; if (ios->ioproc){ @@ -1457,7 +1457,7 @@ int PIOc_get_varm_float (int ncid, int varid, const PIO_Offset start[], const PI ierr = check_netcdf(file, ierr, __FILE__,__LINE__); - if (ios->async_interface || bcast || + if (ios->async || bcast || (ios->num_iotasks < ios->num_comptasks)){ MPI_Bcast(buf, ibufcnt, ibuftype, ios->ioroot, ios->my_comm); } @@ -1488,7 +1488,7 @@ int PIOc_get_varm_long (int ncid, int varid, const PIO_Offset start[], const PIO ierr = PIO_NOERR; /* Sorry, but varm functions are not supported by the async interface. */ - if (ios->async_interface) + if (ios->async) return PIO_EINVAL; if (ios->ioproc){ @@ -1526,7 +1526,7 @@ int PIOc_get_varm_long (int ncid, int varid, const PIO_Offset start[], const PIO ierr = check_netcdf(file, ierr, __FILE__,__LINE__); - if (ios->async_interface || bcast || + if (ios->async || bcast || (ios->num_iotasks < ios->num_comptasks)){ MPI_Bcast(buf, ibufcnt, ibuftype, ios->ioroot, ios->my_comm); } @@ -1557,7 +1557,7 @@ int PIOc_get_varm_ushort (int ncid, int varid, const PIO_Offset start[], const P ierr = PIO_NOERR; /* Sorry, but varm functions are not supported by the async interface. */ - if (ios->async_interface) + if (ios->async) return PIO_EINVAL; if (ios->ioproc){ @@ -1595,7 +1595,7 @@ int PIOc_get_varm_ushort (int ncid, int varid, const PIO_Offset start[], const P ierr = check_netcdf(file, ierr, __FILE__,__LINE__); - if (ios->async_interface || bcast || + if (ios->async || bcast || (ios->num_iotasks < ios->num_comptasks)){ MPI_Bcast(buf, ibufcnt, ibuftype, ios->ioroot, ios->my_comm); } @@ -1626,7 +1626,7 @@ int PIOc_get_varm_longlong (int ncid, int varid, const PIO_Offset start[], const ierr = PIO_NOERR; /* Sorry, but varm functions are not supported by the async interface. */ - if (ios->async_interface) + if (ios->async) return PIO_EINVAL; if (ios->ioproc){ @@ -1664,7 +1664,7 @@ int PIOc_get_varm_longlong (int ncid, int varid, const PIO_Offset start[], const ierr = check_netcdf(file, ierr, __FILE__,__LINE__); - if (ios->async_interface || bcast || + if (ios->async || bcast || (ios->num_iotasks < ios->num_comptasks)){ MPI_Bcast(buf, ibufcnt, ibuftype, ios->ioroot, ios->my_comm); } @@ -1695,7 +1695,7 @@ int PIOc_get_varm_short (int ncid, int varid, const PIO_Offset start[], const PI ierr = PIO_NOERR; /* Sorry, but varm functions are not supported by the async interface. */ - if (ios->async_interface) + if (ios->async) return PIO_EINVAL; if (ios->ioproc){ @@ -1733,7 +1733,7 @@ int PIOc_get_varm_short (int ncid, int varid, const PIO_Offset start[], const PI ierr = check_netcdf(file, ierr, __FILE__,__LINE__); - if (ios->async_interface || bcast || + if (ios->async || bcast || (ios->num_iotasks < ios->num_comptasks)){ MPI_Bcast(buf, ibufcnt, ibuftype, ios->ioroot, ios->my_comm); } @@ -1764,7 +1764,7 @@ int PIOc_get_varm_ulonglong (int ncid, int varid, const PIO_Offset start[], cons ierr = PIO_NOERR; /* Sorry, but varm functions are not supported by the async interface. */ - if (ios->async_interface) + if (ios->async) return PIO_EINVAL; if (ios->ioproc){ @@ -1802,7 +1802,7 @@ int PIOc_get_varm_ulonglong (int ncid, int varid, const PIO_Offset start[], cons ierr = check_netcdf(file, ierr, __FILE__,__LINE__); - if (ios->async_interface || bcast || + if (ios->async || bcast || (ios->num_iotasks < ios->num_comptasks)){ MPI_Bcast(buf, ibufcnt, ibuftype, ios->ioroot, ios->my_comm); } diff --git a/src/externals/pio2/src/clib/pioc.c b/src/externals/pio2/src/clib/pioc.c index 9612f65c8eed..0dc4fed89fd6 100644 --- a/src/externals/pio2/src/clib/pioc.c +++ b/src/externals/pio2/src/clib/pioc.c @@ -208,7 +208,7 @@ int PIOc_Set_IOSystem_Error_Handling(int iosysid, int method) /* Get the iosystem info. */ if (iosysid != PIO_DEFAULT) if (!(ios = pio_get_iosystem_from_id(iosysid))) - return pio_err(NULL, NULL, PIO_EBADID, __FILE__, __LINE__); + piodie("Could not find IO system.", __FILE__, __LINE__); /* Set the error handler. */ if (PIOc_set_iosystem_error_handling(iosysid, method, &oldmethod)) @@ -249,7 +249,7 @@ int PIOc_set_iosystem_error_handling(int iosysid, int method, int *old_method) /* If using async, and not an IO task, then send parameters. */ if (iosysid != PIO_DEFAULT) - if (ios->async_interface) + if (ios->async) { if (!ios->ioproc) { @@ -290,11 +290,25 @@ int PIOc_set_iosystem_error_handling(int iosysid, int method, int *old_method) * decomposition describes how the data will be distributed between * tasks. * + * Internally, this function will: + *
    + *
  • Allocate and initialize an iodesc struct for this + * decomposition. (This also allocates an io_region struct for the + * first region.) + *
  • (Box rearranger only) If iostart or iocount are NULL, call + * CalcStartandCount() to determine starts/counts. Then call + * compute_maxIObuffersize() to compute the max IO buffer size needed. + *
  • Create the rearranger. + *
  • Assign an ioid and add this decomposition to the list of open + * decompositions. + *
+ * * @param iosysid the IO system ID. - * @param basetype the basic PIO data type used. + * @param pio_type the basic PIO data type used. * @param ndims the number of dimensions in the variable, not * including the unlimited dimension. - * @param dims an array of global size of each dimension. + * @param gdimlen an array length ndims with the sizes of the global + * dimensions. * @param maplen the local length of the compmap array. * @param compmap a 1 based array of offsets into the array record on * file. A 0 in this array indicates a value which should not be @@ -303,13 +317,17 @@ int PIOc_set_iosystem_error_handling(int iosysid, int method, int *old_method) * @param rearranger pointer to the rearranger to be used for this * decomp or NULL to use the default. * @param iostart An array of start values for block cyclic - * decompositions. If NULL ??? + * decompositions for the SUBSET rearranger. Ignored if block + * rearranger is used. If NULL and SUBSET rearranger is used, the + * iostarts are generated. * @param iocount An array of count values for block cyclic - * decompositions. If NULL ??? + * decompositions for the SUBSET rearranger. Ignored if block + * rearranger is used. If NULL and SUBSET rearranger is used, the + * iostarts are generated. * @returns 0 on success, error code otherwise * @ingroup PIO_initdecomp */ -int PIOc_InitDecomp(int iosysid, int basetype, int ndims, const int *dims, int maplen, +int PIOc_InitDecomp(int iosysid, int pio_type, int ndims, const int *gdimlen, int maplen, const PIO_Offset *compmap, int *ioidp, const int *rearranger, const PIO_Offset *iostart, const PIO_Offset *iocount) { @@ -318,24 +336,24 @@ int PIOc_InitDecomp(int iosysid, int basetype, int ndims, const int *dims, int m int mpierr = MPI_SUCCESS, mpierr2; /* Return code from MPI function calls. */ int ierr; /* Return code. */ - LOG((1, "PIOc_InitDecomp iosysid = %d basetype = %d ndims = %d maplen = %d", - iosysid, basetype, ndims, maplen)); + LOG((1, "PIOc_InitDecomp iosysid = %d pio_type = %d ndims = %d maplen = %d", + iosysid, pio_type, ndims, maplen)); /* Get IO system info. */ if (!(ios = pio_get_iosystem_from_id(iosysid))) return pio_err(NULL, NULL, PIO_EBADID, __FILE__, __LINE__); /* Caller must provide these. */ - if (!dims || !compmap || !ioidp) + if (!gdimlen || !compmap || !ioidp) return pio_err(ios, NULL, PIO_EINVAL, __FILE__, __LINE__); /* Check the dim lengths. */ for (int i = 0; i < ndims; i++) - if (dims[i] <= 0) + if (gdimlen[i] <= 0) return pio_err(ios, NULL, PIO_EINVAL, __FILE__, __LINE__); /* If async is in use, and this is not an IO task, bcast the parameters. */ - if (ios->async_interface) + if (ios->async) { if (!ios->ioproc) { @@ -350,11 +368,11 @@ int PIOc_InitDecomp(int iosysid, int basetype, int ndims, const int *dims, int m if (!mpierr) mpierr = MPI_Bcast(&iosysid, 1, MPI_INT, ios->compmaster, ios->intercomm); if (!mpierr) - mpierr = MPI_Bcast(&basetype, 1, MPI_INT, ios->compmaster, ios->intercomm); + mpierr = MPI_Bcast(&pio_type, 1, MPI_INT, ios->compmaster, ios->intercomm); if (!mpierr) mpierr = MPI_Bcast(&ndims, 1, MPI_INT, ios->compmaster, ios->intercomm); if (!mpierr) - mpierr = MPI_Bcast((int *)dims, ndims, MPI_INT, ios->compmaster, ios->intercomm); + mpierr = MPI_Bcast((int *)gdimlen, ndims, MPI_INT, ios->compmaster, ios->intercomm); if (!mpierr) mpierr = MPI_Bcast(&maplen, 1, MPI_INT, ios->compmaster, ios->intercomm); if (!mpierr) @@ -374,8 +392,8 @@ int PIOc_InitDecomp(int iosysid, int basetype, int ndims, const int *dims, int m mpierr = MPI_Bcast(&iocount_present, 1, MPI_CHAR, ios->compmaster, ios->intercomm); if (iocount_present && !mpierr) mpierr = MPI_Bcast((PIO_Offset *)iocount, ndims, MPI_OFFSET, ios->compmaster, ios->intercomm); - LOG((2, "PIOc_InitDecomp iosysid = %d basetype = %d ndims = %d maplen = %d rearranger_present = %d iostart_present = %d " - "iocount_present = %d ", iosysid, basetype, ndims, maplen, rearranger_present, iostart_present, iocount_present)); + LOG((2, "PIOc_InitDecomp iosysid = %d pio_type = %d ndims = %d maplen = %d rearranger_present = %d iostart_present = %d " + "iocount_present = %d ", iosysid, pio_type, ndims, maplen, rearranger_present, iostart_present, iocount_present)); } /* Handle MPI errors. */ @@ -385,8 +403,10 @@ int PIOc_InitDecomp(int iosysid, int basetype, int ndims, const int *dims, int m return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); } - /* Allocate space for the iodesc info. */ - if ((ierr = malloc_iodesc(ios, basetype, ndims, &iodesc))) + /* Allocate space for the iodesc info. This also allocates the + * first region and copies the rearranger opts into this + * iodesc. */ + if ((ierr = malloc_iodesc(ios, pio_type, ndims, &iodesc))) return pio_err(ios, NULL, ierr, __FILE__, __LINE__); /* Remember the maplen. */ @@ -402,7 +422,7 @@ int PIOc_InitDecomp(int iosysid, int basetype, int ndims, const int *dims, int m if (!(iodesc->dimlen = malloc(sizeof(int) * ndims))) return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); for (int d = 0; d < ndims; d++) - iodesc->dimlen[d] = dims[d]; + iodesc->dimlen[d] = gdimlen[d]; /* Set the rearranger. */ if (!rearranger) @@ -414,71 +434,76 @@ int PIOc_InitDecomp(int iosysid, int basetype, int ndims, const int *dims, int m /* Is this the subset rearranger? */ if (iodesc->rearranger == PIO_REARR_SUBSET) { - LOG((2, "Handling subset rearranger.")); - if (iostart && iocount) - fprintf(stderr,"%s %s\n","Iostart and iocount arguments to PIOc_InitDecomp", - "are incompatable with subset rearrange method and will be ignored"); iodesc->num_aiotasks = ios->num_iotasks; - if ((ierr = subset_rearrange_create(ios, maplen, (PIO_Offset *)compmap, dims, + LOG((2, "creating subset rearranger iodesc->num_aiotasks = %d", + iodesc->num_aiotasks)); + if ((ierr = subset_rearrange_create(ios, maplen, (PIO_Offset *)compmap, gdimlen, ndims, iodesc))) - return pio_err(NULL, NULL, ierr, __FILE__, __LINE__); + return pio_err(ios, NULL, ierr, __FILE__, __LINE__); } - else + else /* box rearranger */ { - LOG((2, "Handling not the subset rearranger.")); if (ios->ioproc) { /* Unless the user specifies the start and count for each * IO task compute it. */ if (iostart && iocount) { - iodesc->maxiobuflen = 1; + LOG((3, "iostart and iocount provided")); for (int i = 0; i < ndims; i++) { iodesc->firstregion->start[i] = iostart[i]; iodesc->firstregion->count[i] = iocount[i]; - compute_maxIObuffersize(ios->io_comm, iodesc); - } iodesc->num_aiotasks = ios->num_iotasks; } else { - iodesc->num_aiotasks = CalcStartandCount(basetype, ndims, dims, - ios->num_iotasks, ios->io_rank, - iodesc->firstregion->start, iodesc->firstregion->count); + /* Compute start and count values for each io task. */ + LOG((2, "about to call CalcStartandCount pio_type = %d ndims = %d", pio_type, ndims)); + if ((ierr = CalcStartandCount(pio_type, ndims, gdimlen, ios->num_iotasks, + ios->io_rank, iodesc->firstregion->start, + iodesc->firstregion->count, &iodesc->num_aiotasks))) + return pio_err(ios, NULL, ierr, __FILE__, __LINE__); } - compute_maxIObuffersize(ios->io_comm, iodesc); + + /* Compute the max io buffer size needed for an iodesc. */ + if ((ierr = compute_maxIObuffersize(ios->io_comm, iodesc))) + return pio_err(ios, NULL, ierr, __FILE__, __LINE__); + LOG((3, "compute_maxIObuffersize called iodesc->maxiobuflen = %d", + iodesc->maxiobuflen)); } /* Depending on array size and io-blocksize the actual number * of io tasks used may vary. */ - if ((mpierr = MPI_Bcast(&(iodesc->num_aiotasks), 1, MPI_INT, ios->ioroot, ios->my_comm))) + if ((mpierr = MPI_Bcast(&(iodesc->num_aiotasks), 1, MPI_INT, ios->ioroot, + ios->my_comm))) return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); LOG((3, "iodesc->num_aiotasks = %d", iodesc->num_aiotasks)); /* Compute the communications pattern for this decomposition. */ if (iodesc->rearranger == PIO_REARR_BOX) - ierr = box_rearrange_create(ios, maplen, compmap, dims, ndims, iodesc); - - /* - if (ios->ioproc){ - io_region *ioregion = iodesc->firstregion; - while(ioregion != NULL){ - for (int i=0;istart[i],ioregion->count[i]); - ioregion = ioregion->next; - } - } - */ + if ((ierr = box_rearrange_create(ios, maplen, compmap, gdimlen, ndims, iodesc))) + return pio_err(ios, NULL, ierr, __FILE__, __LINE__); } /* Add this IO description to the list. */ *ioidp = pio_add_to_iodesc_list(iodesc); - LOG((3, "About to tune rearranger...")); +#if PIO_ENABLE_LOGGING + /* Log results. */ + LOG((2, "iodesc ioid = %d nrecvs = %d ndof = %d ndims = %d num_aiotasks = %d " + "rearranger = %d maxregions = %d needsfill = %d llen = %d maxiobuflen = %d", + iodesc->ioid, iodesc->nrecvs, iodesc->ndof, iodesc->ndims, iodesc->num_aiotasks, + iodesc->rearranger, iodesc->maxregions, iodesc->needsfill, iodesc->llen, + iodesc->maxiobuflen)); + for (int j = 0; j < iodesc->llen; j++) + LOG((3, "rindex[%d] = %lld", j, iodesc->rindex[j])); +#endif /* PIO_ENABLE_LOGGING */ + + /* This function only does something if pre-processor macro + * PERFTUNE is set. */ performance_tune_rearranger(ios, iodesc); - LOG((3, "Done with rearranger tune.")); return PIO_NOERR; } @@ -489,17 +514,19 @@ int PIOc_InitDecomp(int iosysid, int basetype, int ndims, const int *dims, int m * tasks. * * @param iosysid the IO system ID. - * @param basetype the basic PIO data type used. + * @param pio_type the basic PIO data type used. * @param ndims the number of dimensions in the variable, not * including the unlimited dimension. - * @param dims an array of global size of each dimension. + * @param gdimlen an array length ndims with the sizes of the global + * dimensions. * @param maplen the local length of the compmap array. * @param compmap a 0 based array of offsets into the array record on * file. A -1 in this array indicates a value which should not be * transfered. * @param ioidp pointer that will get the io description ID. - * @param rearranger pointer to the rearranger to be used for this - * decomp or NULL to use the default. + * @param rearranger the rearranger to be used for this decomp or 0 to + * use the default. Valid rearrangers are PIO_REARR_BOX and + * PIO_REARR_SUBSET. * @param iostart An array of start values for block cyclic * decompositions. If NULL ??? * @param iocount An array of count values for block cyclic @@ -507,21 +534,30 @@ int PIOc_InitDecomp(int iosysid, int basetype, int ndims, const int *dims, int m * @returns 0 on success, error code otherwise * @ingroup PIO_initdecomp */ -int PIOc_init_decomp(int iosysid, int basetype, int ndims, const int *dims, int maplen, - const PIO_Offset *compmap, int *ioidp, const int *rearranger, +int PIOc_init_decomp(int iosysid, int pio_type, int ndims, const int *gdimlen, int maplen, + const PIO_Offset *compmap, int *ioidp, int rearranger, const PIO_Offset *iostart, const PIO_Offset *iocount) { PIO_Offset compmap_1_based[maplen]; + int *rearrangerp = NULL; + + LOG((1, "PIOc_init_decomp iosysid = %d pio_type = %d ndims = %d maplen = %d", + iosysid, pio_type, ndims, maplen)); - LOG((1, "PIOc_init_decomp iosysid = %d basetype = %d ndims = %d maplen = %d", - iosysid, basetype, ndims, maplen)); + /* If the user specified a non-default rearranger, use it. */ + if (rearranger) + rearrangerp = &rearranger; /* Add 1 to all elements in compmap. */ for (int e = 0; e < maplen; e++) + { + LOG((3, "zero-based compmap[%d] = %d", e, compmap[e])); compmap_1_based[e] = compmap[e] + 1; + } - return PIOc_InitDecomp(iosysid, basetype, ndims, dims, maplen, compmap_1_based, - ioidp, rearranger, iostart, iocount); + /* Call the legacy version of the function. */ + return PIOc_InitDecomp(iosysid, pio_type, ndims, gdimlen, maplen, compmap_1_based, + ioidp, rearrangerp, iostart, iocount); } /** @@ -530,16 +566,17 @@ int PIOc_init_decomp(int iosysid, int basetype, int ndims, const int *dims, int * the file. In this case we compute the compdof. * * @param iosysid the IO system ID - * @param basetype + * @param pio_type * @param ndims the number of dimensions - * @param dims array of dimensions + * @param gdimlen an array length ndims with the sizes of the global + * dimensions. * @param start start array * @param count count array * @param pointer that gets the IO ID. * @returns 0 for success, error code otherwise * @ingroup PIO_initdecomp */ -int PIOc_InitDecomp_bc(int iosysid, int basetype, int ndims, const int *dims, +int PIOc_InitDecomp_bc(int iosysid, int pio_type, int ndims, const int *gdimlen, const long int *start, const long int *count, int *ioidp) { @@ -548,20 +585,20 @@ int PIOc_InitDecomp_bc(int iosysid, int basetype, int ndims, const int *dims, PIO_Offset prod[ndims], loc[ndims]; int rearr = PIO_REARR_SUBSET; - LOG((1, "PIOc_InitDecomp_bc iosysid = %d basetype = %d ndims = %d")); + LOG((1, "PIOc_InitDecomp_bc iosysid = %d pio_type = %d ndims = %d")); /* Get the info about the io system. */ if (!(ios = pio_get_iosystem_from_id(iosysid))) return pio_err(NULL, NULL, PIO_EBADID, __FILE__, __LINE__); /* Check for required inputs. */ - if (!dims || !start || !count || !ioidp) + if (!gdimlen || !start || !count || !ioidp) return pio_err(ios, NULL, PIO_EINVAL, __FILE__, __LINE__); /* Check that dim, start, and count values are not obviously * incorrect. */ for (int i = 0; i < ndims; i++) - if (dims[i] <= 0 || start[i] < 0 || count[i] < 0 || (start[i] + count[i]) > dims[i]) + if (gdimlen[i] <= 0 || start[i] < 0 || count[i] < 0 || (start[i] + count[i]) > gdimlen[i]) return pio_err(ios, NULL, PIO_EINVAL, __FILE__, __LINE__); /* Find the maplen. */ @@ -576,7 +613,7 @@ int PIOc_InitDecomp_bc(int iosysid, int basetype, int ndims, const int *dims, loc[ndims - 1] = 0; for (n = ndims - 2; n >= 0; n--) { - prod[n] = prod[n + 1] * dims[n + 1]; + prod[n] = prod[n + 1] * gdimlen[n + 1]; loc[n] = 0; } for (i = 0; i < maplen; i++) @@ -594,7 +631,7 @@ int PIOc_InitDecomp_bc(int iosysid, int basetype, int ndims, const int *dims, } } - return PIOc_InitDecomp(iosysid, basetype, ndims, dims, maplen, compmap, ioidp, + return PIOc_InitDecomp(iosysid, pio_type, ndims, gdimlen, maplen, compmap, ioidp, &rearr, NULL, NULL); } @@ -608,13 +645,37 @@ int PIOc_InitDecomp_bc(int iosysid, int basetype, int ndims, const int *dims, * The caller must create all comp_comm and the io_comm MPI * communicators before calling this function. * - * @param comp_comm the MPI_Comm of the compute tasks - * @param num_iotasks the number of io tasks to use - * @param stride the offset between io tasks in the comp_comm - * @param base the comp_comm index of the first io task + * Internally, this function does the following: + * + *
    + *
  • Initialize logging system (if PIO_ENABLE_LOGGING is set). + *
  • Allocates and initializes the iosystem_desc_t struct (ios). + *
  • MPI duplicated user comp_comm to ios->comp_comm and + * ios->union_comm. + *
  • Set ios->my_comm to be ios->comp_comm. (Not an MPI + * duplication.) + *
  • Find MPI rank in comp_comm, determine ranks of IO tasks, + * determine whether this task is one of the IO tasks. + *
  • Identify the root IO tasks. + *
  • Create MPI groups for IO tasks, and for computation tasks. + *
  • On IO tasks, create an IO communicator (ios->io_comm). + *
  • Assign an iosystemid, and put this iosystem_desc_t into the + * list of open iosystems. + *
  • Initialize the bget buffer, unless PIO_USE_MALLOC was used. + *
+ * + * When complete, there are three MPI communicators (ios->comp_comm, + * ios->union_comm, and ios->io_comm), and two MPI groups + * (ios->compgroup and ios->iogroup) that must be freed by MPI. + * + * @param comp_comm the MPI_Comm of the compute tasks. + * @param num_iotasks the number of io tasks to use. + * @param stride the offset between io tasks in the comp_comm. + * @param base the comp_comm index of the first io task. * @param rearr the rearranger to use by default, this may be - * overriden in the @ref PIO_initdecomp - * @param iosysidp index of the defined system descriptor + * overriden in the PIO_init_decomp(). The rearranger is not used + * until the decomposition is initialized. + * @param iosysidp index of the defined system descriptor. * @return 0 on success, otherwise a PIO error code. * @ingroup PIO_init */ @@ -652,9 +713,13 @@ int PIOc_Init_Intracomm(MPI_Comm comp_comm, int num_iotasks, int stride, int bas ios->num_iotasks = num_iotasks; ios->num_comptasks = num_comptasks; - /* Initialize the rearranger options. */ - init_rearr_opts(ios); + /* For non-async, the IO tasks are a subset of the comptasks. */ + ios->num_uniontasks = num_comptasks; + /* Initialize the rearranger options. */ + ios->rearr_opts.comm_type = PIO_REARR_COMM_COLL; + ios->rearr_opts.fcd = PIO_REARR_COMM_FC_2D_DISABLE; + /* Copy the computation communicator into union_comm. */ if ((mpierr = MPI_Comm_dup(comp_comm, &ios->union_comm))) return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); @@ -667,35 +732,42 @@ int PIOc_Init_Intracomm(MPI_Comm comp_comm, int num_iotasks, int stride, int bas ios->my_comm = ios->comp_comm; ustride = stride; - /* Find MPI rank comp_comm communicator. */ + /* Find MPI rank in comp_comm communicator. */ if ((mpierr = MPI_Comm_rank(ios->comp_comm, &ios->comp_rank))) return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); + /* With non-async, all tasks are part of computation component. */ + ios->compproc = true; + + /* Create an array that holds the ranks of the tasks to be used + * for computation. */ + if (!(ios->compranks = calloc(ios->num_comptasks, sizeof(int)))) + return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); + for (int i = 0; i < ios->num_comptasks; i++) + ios->compranks[i] = i; + + /* Is this the comp master? */ if (ios->comp_rank == 0) ios->compmaster = MPI_ROOT; LOG((2, "comp_rank = %d num_comptasks = %d", ios->comp_rank, ios->num_comptasks)); /* Create an array that holds the ranks of the tasks to be used - * for IO. NOTE that sizeof(int) should probably be 1, not - * sizeof(int) ???*/ - if (!(ios->ioranks = calloc(sizeof(int), ios->num_iotasks))) + * for IO. */ + if (!(ios->ioranks = calloc(ios->num_iotasks, sizeof(int)))) return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); for (int i = 0; i < ios->num_iotasks; i++) { ios->ioranks[i] = (base + i * ustride) % ios->num_comptasks; if (ios->ioranks[i] == ios->comp_rank) ios->ioproc = true; + LOG((3, "ios->ioranks[%d] = %d", i, ios->ioranks[i])); } ios->ioroot = ios->ioranks[0]; - for (int i = 0; i < ios->num_iotasks; i++) - LOG((3, "ios->ioranks[%d] = %d", i, ios->ioranks[i])); - /* We are not providing an info object. */ ios->info = MPI_INFO_NULL; - /* The task that has an iomaster value of MPI_ROOT will be the - * root of the IO communicator. */ + /* Identify the task that will be the root of the IO communicator. */ if (ios->comp_rank == ios->ioranks[0]) ios->iomaster = MPI_ROOT; @@ -713,8 +785,8 @@ int PIOc_Init_Intracomm(MPI_Comm comp_comm, int num_iotasks, int stride, int bas return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); /* For the tasks that are doing IO, get their rank within the IO - * communicator. For some reason when I check the return value of - * this MPI call, all tests start to fail! */ + * communicator. If they are not doing IO, set their io_rank to + * -1. */ if (ios->ioproc) { if ((mpierr = MPI_Comm_rank(ios->io_comm, &ios->io_rank))) @@ -724,6 +796,7 @@ int PIOc_Init_Intracomm(MPI_Comm comp_comm, int num_iotasks, int stride, int bas ios->io_rank = -1; LOG((3, "ios->io_comm = %d ios->io_rank = %d", ios->io_comm, ios->io_rank)); + /* Rank in the union comm is the same as rank in the comp comm. */ ios->union_rank = ios->comp_rank; /* Add this ios struct to the list in the PIO library. */ @@ -770,12 +843,12 @@ int PIOc_Init_Intracomm_from_F90(int f90_comp_comm, LOG((1, "Setting rearranger options, iosys=%d", *iosysidp)); return PIOc_set_rearr_opts(*iosysidp, rearr_opts->comm_type, rearr_opts->fcd, - rearr_opts->comm_fc_opts_comp2io.enable_hs, - rearr_opts->comm_fc_opts_comp2io.enable_isend, - rearr_opts->comm_fc_opts_comp2io.max_pend_req, - rearr_opts->comm_fc_opts_io2comp.enable_hs, - rearr_opts->comm_fc_opts_io2comp.enable_isend, - rearr_opts->comm_fc_opts_io2comp.max_pend_req); + rearr_opts->comp2io.hs, + rearr_opts->comp2io.isend, + rearr_opts->comp2io.max_pend_req, + rearr_opts->io2comp.hs, + rearr_opts->io2comp.isend, + rearr_opts->io2comp.max_pend_req); } return ret; } @@ -841,7 +914,7 @@ int PIOc_finalize(int iosysid) * comp master to the IO processes. This may be called by * componets for other components iosysid. So don't send unless * there is a valid union_comm. */ - if (ios->async_interface && ios->union_comm != MPI_COMM_NULL) + if (ios->async && ios->union_comm != MPI_COMM_NULL) { int msg = PIO_MSG_EXIT; @@ -874,6 +947,9 @@ int PIOc_finalize(int iosysid) if (ios->ioranks) free(ios->ioranks); LOG((3, "Freed ioranks.")); + if (ios->compranks) + free(ios->compranks); + LOG((3, "Freed compranks.")); /* Free the buffer pool. */ int niosysid; @@ -1062,24 +1138,30 @@ int PIOc_iotype_available(int iotype) * duplicates and each must later be freed with MPI_Free() by the * caller.) * + * @param rearranger the default rearranger to use for decompositions + * in this IO system. Must be either PIO_REARR_BOX or + * PIO_REARR_SUBSET. + * * @param iosysidp pointer to array of length component_count that * gets the iosysid for each component. * * @return PIO_NOERR on success, error code otherwise. * @ingroup PIO_init */ -int PIOc_Init_Async(MPI_Comm world, int num_io_procs, int *io_proc_list, +int PIOc_init_async(MPI_Comm world, int num_io_procs, int *io_proc_list, int component_count, int *num_procs_per_comp, int **proc_list, - MPI_Comm *user_io_comm, MPI_Comm *user_comp_comm, int *iosysidp) + MPI_Comm *user_io_comm, MPI_Comm *user_comp_comm, int rearranger, + int *iosysidp) { - int my_rank; - int **my_proc_list; - int *my_io_proc_list; - int mpierr; - int ret; + int my_rank; /* Rank of this task. */ + int **my_proc_list; /* Array of arrays of procs for comp components. */ + int *my_io_proc_list; /* List of processors in IO component. */ + int mpierr; /* Return code from MPI functions. */ + int ret; /* Return code. */ /* Check input parameters. */ - if (num_io_procs < 1 || component_count < 1 || !num_procs_per_comp || !iosysidp) + if (num_io_procs < 1 || component_count < 1 || !num_procs_per_comp || !iosysidp || + (rearranger != PIO_REARR_BOX && rearranger != PIO_REARR_SUBSET)) return pio_err(NULL, NULL, PIO_EINVAL, __FILE__, __LINE__); /* Temporarily limit to one computational component. */ @@ -1088,16 +1170,21 @@ int PIOc_Init_Async(MPI_Comm world, int num_io_procs, int *io_proc_list, /* Turn on the logging system for PIO. */ pio_init_logging(); - LOG((1, "PIOc_Init_Async component_count = %d", component_count)); + LOG((1, "PIOc_Init_Async num_io_procs = %d component_count = %d", num_io_procs, + component_count)); /* If the user did not supply a list of process numbers to use for * IO, create it. */ if (!io_proc_list) { + LOG((3, "calculating processors for IO component")); if (!(my_io_proc_list = malloc(num_io_procs * sizeof(int)))) return pio_err(NULL, NULL, PIO_ENOMEM, __FILE__, __LINE__); for (int p = 0; p < num_io_procs; p++) + { my_io_proc_list[p] = p; + LOG((3, "my_io_proc_list[%d] = %d", p, my_io_proc_list[p])); + } } else my_io_proc_list = io_proc_list; @@ -1106,16 +1193,16 @@ int PIOc_Init_Async(MPI_Comm world, int num_io_procs, int *io_proc_list, * component, create one. */ if (!proc_list) { - int last_proc = 0; + int last_proc = num_io_procs; /* Allocate space for array of arrays. */ - if (!(my_proc_list = malloc((component_count + 1) * sizeof(int *)))) + if (!(my_proc_list = malloc((component_count) * sizeof(int *)))) return pio_err(NULL, NULL, PIO_ENOMEM, __FILE__, __LINE__); /* Fill the array of arrays. */ - for (int cmp = 0; cmp < component_count + 1; cmp++) + for (int cmp = 0; cmp < component_count; cmp++) { - LOG((3, "calculating processors for component %d", cmp)); + LOG((3, "calculating processors for component %d num_procs_per_comp[cmp] = %d", cmp, num_procs_per_comp[cmp])); /* Allocate space for each array. */ if (!(my_proc_list[cmp] = malloc(num_procs_per_comp[cmp] * sizeof(int)))) @@ -1133,20 +1220,19 @@ int PIOc_Init_Async(MPI_Comm world, int num_io_procs, int *io_proc_list, else my_proc_list = proc_list; - /* Get rank of this task. */ + /* Get rank of this task in world. */ if ((ret = MPI_Comm_rank(world, &my_rank))) return check_mpi(NULL, ret, __FILE__, __LINE__); /* Is this process in the IO component? */ int pidx; - for (pidx = 0; pidx < num_procs_per_comp[0]; pidx++) - if (my_rank == my_proc_list[0][pidx]) + for (pidx = 0; pidx < num_io_procs; pidx++) + if (my_rank == my_io_proc_list[pidx]) break; - int in_io = (pidx == num_procs_per_comp[0]) ? 0 : 1; + int in_io = (pidx == num_io_procs) ? 0 : 1; LOG((3, "in_io = %d", in_io)); /* Allocate struct to hold io system info for each computation component. */ - /* Allocate struct to hold io system info for each component. */ iosystem_desc_t *iosys[component_count], *my_iosys; for (int cmp1 = 0; cmp1 < component_count; cmp1++) if (!(iosys[cmp1] = (iosystem_desc_t *)calloc(1, sizeof(iosystem_desc_t)))) @@ -1174,9 +1260,7 @@ int PIOc_Init_Async(MPI_Comm world, int num_io_procs, int *io_proc_list, /* Create a group for the IO component. */ if ((ret = MPI_Group_incl(world_group, num_io_procs, my_io_proc_list, &io_group))) return check_mpi(NULL, ret, __FILE__, __LINE__); - LOG((3, "created IO group - io_group = %d group empty is %d", io_group, MPI_GROUP_EMPTY)); - for (int p = 0; p < num_io_procs; p++) - LOG((3, "my_io_proc_list[%d] = %d", p, my_io_proc_list[p])); + LOG((3, "created IO group - io_group = %d MPI_GROUP_EMPTY = %d", io_group, MPI_GROUP_EMPTY)); /* There is one shared IO comm. Create it. */ if ((ret = MPI_Comm_create(world, io_group, &io_comm))) @@ -1204,45 +1288,47 @@ int PIOc_Init_Async(MPI_Comm world, int num_io_procs, int *io_proc_list, io_comm, io_rank, iomaster == MPI_ROOT ? "MASTER" : "SERVANT")); } - /* We will create a group for each component. */ - MPI_Group group[component_count + 1]; + /* We will create a group for each computational component. */ + MPI_Group group[component_count]; /* We will also create a group for each component and the IO * component processes (i.e. a union of computation and IO * processes. */ MPI_Group union_group[component_count]; - /* For each component, starting with the IO component. */ - for (int cmp = 0; cmp < component_count + 1; cmp++) + /* For each computation component. */ + for (int cmp = 0; cmp < component_count; cmp++) { LOG((3, "processing component %d", cmp)); - /* Don't start initing iosys until after IO component. */ - if (cmp) - { - /* Get pointer to current iosys. */ - my_iosys = iosys[cmp - 1]; - - /* Initialize some values. */ - my_iosys->io_comm = MPI_COMM_NULL; - my_iosys->comp_comm = MPI_COMM_NULL; - my_iosys->union_comm = MPI_COMM_NULL; - my_iosys->intercomm = MPI_COMM_NULL; - my_iosys->my_comm = MPI_COMM_NULL; - my_iosys->async_interface = 1; - my_iosys->error_handler = default_error_handler; - my_iosys->num_comptasks = num_procs_per_comp[cmp]; - my_iosys->num_iotasks = num_procs_per_comp[0]; - my_iosys->compgroup = MPI_GROUP_NULL; - my_iosys->iogroup = MPI_GROUP_NULL; - - /* The rank of the computation leader in the union comm. */ - my_iosys->comproot = num_procs_per_comp[0]; - LOG((3, "my_iosys->comproot = %d", my_iosys->comproot)); - - /* We are not providing an info object. */ - my_iosys->info = MPI_INFO_NULL; - } + /* Get pointer to current iosys. */ + my_iosys = iosys[cmp]; + + /* Initialize some values. */ + my_iosys->io_comm = MPI_COMM_NULL; + my_iosys->comp_comm = MPI_COMM_NULL; + my_iosys->union_comm = MPI_COMM_NULL; + my_iosys->intercomm = MPI_COMM_NULL; + my_iosys->my_comm = MPI_COMM_NULL; + my_iosys->async = 1; + my_iosys->error_handler = default_error_handler; + my_iosys->num_comptasks = num_procs_per_comp[cmp]; + my_iosys->num_iotasks = num_io_procs; + my_iosys->num_uniontasks = my_iosys->num_comptasks + my_iosys->num_iotasks; + my_iosys->compgroup = MPI_GROUP_NULL; + my_iosys->iogroup = MPI_GROUP_NULL; + my_iosys->default_rearranger = rearranger; + + /* Initialize the rearranger options. */ + my_iosys->rearr_opts.comm_type = PIO_REARR_COMM_COLL; + my_iosys->rearr_opts.fcd = PIO_REARR_COMM_FC_2D_DISABLE; + + /* The rank of the computation leader in the union comm. */ + my_iosys->comproot = num_io_procs; + LOG((3, "my_iosys->comproot = %d", my_iosys->comproot)); + + /* We are not providing an info object. */ + my_iosys->info = MPI_INFO_NULL; /* Create a group for this component. */ if ((ret = MPI_Group_incl(world_group, num_procs_per_comp[cmp], my_proc_list[cmp], @@ -1250,39 +1336,47 @@ int PIOc_Init_Async(MPI_Comm world, int num_io_procs, int *io_proc_list, return check_mpi(NULL, ret, __FILE__, __LINE__); LOG((3, "created component MPI group - group[%d] = %d", cmp, group[cmp])); - /* For all the computation components (i.e. cmp != 0), create - * a union group with their processors and the processors of - * the (shared) IO component. */ - if (cmp) - { - /* How many processors in the union comm? */ - int nprocs_union = num_procs_per_comp[0] + num_procs_per_comp[cmp]; + /* For all the computation components create a union group + * with their processors and the processors of the (shared) IO + * component. */ - /* This will hold proc numbers from both computation and IO - * components. */ - int proc_list_union[nprocs_union]; + /* How many processors in the union comm? */ + int nprocs_union = num_io_procs + num_procs_per_comp[cmp]; - /* Add proc numbers from IO. */ - for (int p = 0; p < num_procs_per_comp[0]; p++) - proc_list_union[p] = my_proc_list[0][p]; + /* This will hold proc numbers from both computation and IO + * components. */ + int proc_list_union[nprocs_union]; - /* Add proc numbers from computation component. */ - for (int p = 0; p < num_procs_per_comp[cmp]; p++) - proc_list_union[p + num_procs_per_comp[0]] = my_proc_list[cmp][p]; + /* Add proc numbers from IO. */ + for (int p = 0; p < num_io_procs; p++) + proc_list_union[p] = my_io_proc_list[p]; - /* Create the union group. */ - if ((ret = MPI_Group_incl(world_group, nprocs_union, proc_list_union, - &union_group[cmp - 1]))) - return check_mpi(NULL, ret, __FILE__, __LINE__); - LOG((3, "created union MPI_group - union_group[%d] = %d with %d procs", cmp, union_group[cmp-1], nprocs_union)); - } + /* Add proc numbers from computation component. */ + for (int p = 0; p < num_procs_per_comp[cmp]; p++) + proc_list_union[p + num_io_procs] = my_proc_list[cmp][p]; + + /* Allocate space for computation task ranks. */ + if (!(my_iosys->compranks = calloc(my_iosys->num_comptasks, sizeof(int)))) + return pio_err(NULL, NULL, PIO_ENOMEM, __FILE__, __LINE__); + + /* Remember computation task ranks. */ + for (int p = 0; p < num_procs_per_comp[cmp]; p++) + my_iosys->compranks[p] = my_proc_list[cmp][p]; + + /* Create the union group. */ + if ((ret = MPI_Group_incl(world_group, nprocs_union, proc_list_union, &union_group[cmp]))) + return check_mpi(NULL, ret, __FILE__, __LINE__); + LOG((3, "created union MPI_group - union_group[%d] = %d with %d procs", cmp, + union_group[cmp], nprocs_union)); /* Remember whether this process is in the IO component. */ - if (cmp) - my_iosys->ioproc = in_io; + my_iosys->ioproc = in_io; + + /* With async, tasks are either in a computation component or + * the IO component. */ + my_iosys->compproc = !in_io; - /* Is this process in this computation component (which is the - * IO component if cmp == 0)? */ + /* Is this process in this computation component? */ int in_cmp = 0; for (pidx = 0; pidx < num_procs_per_comp[cmp]; pidx++) if (my_rank == my_proc_list[cmp][pidx]) @@ -1294,113 +1388,97 @@ int PIOc_Init_Async(MPI_Comm world, int num_io_procs, int *io_proc_list, /* Create an intracomm for this component. Only processes in * the component need to participate in the intracomm create * call. */ - /* Create the intracomm from the group. */ LOG((3, "creating intracomm cmp = %d from group[%d] = %d", cmp, cmp, group[cmp])); + if ((ret = MPI_Comm_create(world, group[cmp], &my_iosys->comp_comm))) + return check_mpi(NULL, ret, __FILE__, __LINE__); - /* We handle the IO comm differently (cmp == 0). */ - if (!cmp) + if (in_cmp) { - /* LOG((3, "about to create io comm")); */ - /* if ((ret = MPI_Comm_create_group(world, group[cmp], cmp, &io_comm))) */ - /* return check_mpi(NULL, ret, __FILE__, __LINE__); */ - /* LOG((3, "about to get io rank")); */ - /* if ((ret = MPI_Comm_rank(io_comm, &io_rank))) */ - /* return check_mpi(NULL, ret, __FILE__, __LINE__); */ - /* iomaster = !io_rank ? MPI_ROOT : MPI_PROC_NULL; */ - /* LOG((3, "intracomm created for cmp = %d io_comm = %d io_rank = %d IO %s", */ - /* cmp, io_comm, io_rank, iomaster == MPI_ROOT ? "MASTER" : "SERVANT")); */ - } - else - { - if ((ret = MPI_Comm_create(world, group[cmp], &my_iosys->comp_comm))) - return check_mpi(NULL, ret, __FILE__, __LINE__); + /* Does the user want a copy? */ + if (user_comp_comm) + if ((mpierr = MPI_Comm_dup(my_iosys->comp_comm, &user_comp_comm[cmp]))) + return check_mpi(NULL, mpierr, __FILE__, __LINE__); - if (in_cmp) - { - /* Does the user want a copy? */ - if (user_comp_comm) - if ((mpierr = MPI_Comm_dup(my_iosys->comp_comm, &user_comp_comm[cmp - 1]))) - return check_mpi(NULL, mpierr, __FILE__, __LINE__); - - /* Get the rank in this comp comm. */ - if ((ret = MPI_Comm_rank(my_iosys->comp_comm, &my_iosys->comp_rank))) - return check_mpi(NULL, ret, __FILE__, __LINE__); + /* Get the rank in this comp comm. */ + if ((ret = MPI_Comm_rank(my_iosys->comp_comm, &my_iosys->comp_rank))) + return check_mpi(NULL, ret, __FILE__, __LINE__); - /* Set comp_rank 0 to be the compmaster. It will have - * a setting of MPI_ROOT, all other tasks will have a - * setting of MPI_PROC_NULL. */ - my_iosys->compmaster = my_iosys->comp_rank ? MPI_PROC_NULL : MPI_ROOT; + /* Set comp_rank 0 to be the compmaster. It will have a + * setting of MPI_ROOT, all other tasks will have a + * setting of MPI_PROC_NULL. */ + my_iosys->compmaster = my_iosys->comp_rank ? MPI_PROC_NULL : MPI_ROOT; - LOG((3, "intracomm created for cmp = %d comp_comm = %d comp_rank = %d comp %s", - cmp, my_iosys->comp_comm, my_iosys->comp_rank, - my_iosys->compmaster == MPI_ROOT ? "MASTER" : "SERVANT")); - } + LOG((3, "intracomm created for cmp = %d comp_comm = %d comp_rank = %d comp %s", + cmp, my_iosys->comp_comm, my_iosys->comp_rank, + my_iosys->compmaster == MPI_ROOT ? "MASTER" : "SERVANT")); } - /* If this is the IO component, make a copy of the IO comm for * each computational component. */ if (in_io) - if (cmp) - { - LOG((3, "making a dup of io_comm = %d io_rank = %d", io_comm, io_rank)); - if ((ret = MPI_Comm_dup(io_comm, &my_iosys->io_comm))) - return check_mpi(NULL, ret, __FILE__, __LINE__); - LOG((3, "dup of io_comm = %d io_rank = %d", my_iosys->io_comm, io_rank)); - my_iosys->iomaster = iomaster; - my_iosys->io_rank = io_rank; - my_iosys->ioroot = 0; - my_iosys->comp_idx = cmp - 1; - } + { + LOG((3, "making a dup of io_comm = %d io_rank = %d", io_comm, io_rank)); + if ((ret = MPI_Comm_dup(io_comm, &my_iosys->io_comm))) + return check_mpi(NULL, ret, __FILE__, __LINE__); + LOG((3, "dup of io_comm = %d io_rank = %d", my_iosys->io_comm, io_rank)); + my_iosys->iomaster = iomaster; + my_iosys->io_rank = io_rank; + my_iosys->ioroot = 0; + my_iosys->comp_idx = cmp; + } + + /* Create an array that holds the ranks of the tasks to be used + * for IO. */ + if (!(my_iosys->ioranks = calloc(my_iosys->num_iotasks, sizeof(int)))) + return pio_err(NULL, NULL, PIO_ENOMEM, __FILE__, __LINE__); + for (int i = 0; i < my_iosys->num_iotasks; i++) + my_iosys->ioranks[i] = my_io_proc_list[i]; + my_iosys->ioroot = my_iosys->ioranks[0]; /* All the processes in this component, and the IO component, * are part of the union_comm. */ - if (cmp) + if (in_io || in_cmp) { - if (in_io || in_cmp) - { - LOG((3, "my_iosys->io_comm = %d group = %d", my_iosys->io_comm, union_group[cmp-1])); - /* Create a group for the union of the IO component - * and one of the computation components. */ - if ((ret = MPI_Comm_create(world, union_group[cmp - 1], - &my_iosys->union_comm))) - return check_mpi(NULL, ret, __FILE__, __LINE__); + LOG((3, "my_iosys->io_comm = %d group = %d", my_iosys->io_comm, union_group[cmp])); + /* Create a group for the union of the IO component + * and one of the computation components. */ + if ((ret = MPI_Comm_create(world, union_group[cmp], &my_iosys->union_comm))) + return check_mpi(NULL, ret, __FILE__, __LINE__); - if ((ret = MPI_Comm_rank(my_iosys->union_comm, &my_iosys->union_rank))) - return check_mpi(NULL, ret, __FILE__, __LINE__); + if ((ret = MPI_Comm_rank(my_iosys->union_comm, &my_iosys->union_rank))) + return check_mpi(NULL, ret, __FILE__, __LINE__); - /* Set my_comm to union_comm for async. */ - my_iosys->my_comm = my_iosys->union_comm; - LOG((3, "intracomm created for union cmp = %d union_rank = %d union_comm = %d", - cmp, my_iosys->union_rank, my_iosys->union_comm)); + /* Set my_comm to union_comm for async. */ + my_iosys->my_comm = my_iosys->union_comm; + LOG((3, "intracomm created for union cmp = %d union_rank = %d union_comm = %d", + cmp, my_iosys->union_rank, my_iosys->union_comm)); - if (in_io) - { - LOG((3, "my_iosys->io_comm = %d", my_iosys->io_comm)); - /* Create the intercomm from IO to computation component. */ - LOG((3, "about to create intercomm for IO component to cmp = %d " - "my_iosys->io_comm = %d", cmp, my_iosys->io_comm)); - if ((ret = MPI_Intercomm_create(my_iosys->io_comm, 0, my_iosys->union_comm, - my_proc_list[cmp][0], 0, &my_iosys->intercomm))) - return check_mpi(NULL, ret, __FILE__, __LINE__); - } - else - { - /* Create the intercomm from computation component to IO component. */ - LOG((3, "about to create intercomm for cmp = %d my_iosys->comp_comm = %d", cmp, - my_iosys->comp_comm)); - if ((ret = MPI_Intercomm_create(my_iosys->comp_comm, 0, my_iosys->union_comm, - my_proc_list[0][0], 0, &my_iosys->intercomm))) - return check_mpi(NULL, ret, __FILE__, __LINE__); - } - LOG((3, "intercomm created for cmp = %d", cmp)); + if (in_io) + { + LOG((3, "my_iosys->io_comm = %d", my_iosys->io_comm)); + /* Create the intercomm from IO to computation component. */ + LOG((3, "about to create intercomm for IO component to cmp = %d " + "my_iosys->io_comm = %d", cmp, my_iosys->io_comm)); + if ((ret = MPI_Intercomm_create(my_iosys->io_comm, 0, my_iosys->union_comm, + my_proc_list[cmp][0], 0, &my_iosys->intercomm))) + return check_mpi(NULL, ret, __FILE__, __LINE__); } - - /* Add this id to the list of PIO iosystem ids. */ - iosysidp[cmp - 1] = pio_add_to_iosystem_list(my_iosys); - LOG((2, "new iosys ID added to iosystem_list iosysid = %d\n", iosysidp[cmp - 1])); + else + { + /* Create the intercomm from computation component to IO component. */ + LOG((3, "about to create intercomm for cmp = %d my_iosys->comp_comm = %d", cmp, + my_iosys->comp_comm)); + if ((ret = MPI_Intercomm_create(my_iosys->comp_comm, 0, my_iosys->union_comm, + my_io_proc_list[0], 0, &my_iosys->intercomm))) + return check_mpi(NULL, ret, __FILE__, __LINE__); + } + LOG((3, "intercomm created for cmp = %d", cmp)); } - } + + /* Add this id to the list of PIO iosystem ids. */ + iosysidp[cmp] = pio_add_to_iosystem_list(my_iosys); + LOG((2, "new iosys ID added to iosystem_list iosysid = %d", iosysidp[cmp])); + } /* next computational component */ /* Now call the function from which the IO tasks will not return * until the PIO_MSG_EXIT message is sent. This will handle all @@ -1415,7 +1493,6 @@ int PIOc_Init_Async(MPI_Comm world, int num_io_procs, int *io_proc_list, } /* Free resources if needed. */ - LOG((2, "PIOc_Init_Async starting to free resources")); if (!io_proc_list) free(my_io_proc_list); @@ -1425,7 +1502,7 @@ int PIOc_Init_Async(MPI_Comm world, int num_io_procs, int *io_proc_list, if (!proc_list) { - for (int cmp = 0; cmp < component_count + 1; cmp++) + for (int cmp = 0; cmp < component_count; cmp++) free(my_proc_list[cmp]); free(my_proc_list); } @@ -1434,13 +1511,12 @@ int PIOc_Init_Async(MPI_Comm world, int num_io_procs, int *io_proc_list, if ((ret = MPI_Group_free(&io_group))) return check_mpi(NULL, ret, __FILE__, __LINE__); - for (int cmp = 0; cmp < component_count + 1; cmp++) + for (int cmp = 0; cmp < component_count; cmp++) { if ((ret = MPI_Group_free(&group[cmp]))) return check_mpi(NULL, ret, __FILE__, __LINE__); - if (cmp) - if ((ret = MPI_Group_free(&union_group[cmp - 1]))) - return check_mpi(NULL, ret, __FILE__, __LINE__); + if ((ret = MPI_Group_free(&union_group[cmp]))) + return check_mpi(NULL, ret, __FILE__, __LINE__); } if ((ret = MPI_Group_free(&world_group))) diff --git a/src/externals/pio2/src/clib/pioc_sc.c b/src/externals/pio2/src/clib/pioc_sc.c index 6b991b373dbd..98e3c6aa11a7 100644 --- a/src/externals/pio2/src/clib/pioc_sc.c +++ b/src/externals/pio2/src/clib/pioc_sc.c @@ -74,11 +74,11 @@ int gcd_array(int nain, int *ain) } /** - * Return the gcd of nain and any value in ain as int_64. + * Return the greatest common devisor of array ain as int_64. * - * @param main - * @param ain - * @returns + * @param nain number of elements in ain. + * @param ain array of length nain. + * @returns GCD of elements in ain. */ long long lgcd_array(int nain, long long *ain) { @@ -101,35 +101,50 @@ long long lgcd_array(int nain, long long *ain) } /** - * Compute start and count arrays. + * Compute one element (dimension) of start and count arrays. This + * function is used by CalcStartandCount(). * - * @param gdim - * @param ioprocs - * @param rank - * @param start - * @param kount + * @param gdim global size of one dimension. + * @param ioprocs number of io tasks. + * @param rank IO rank of this task. + * @param start pointer to PIO_Offset that will get the start value. + * @param count pointer to PIO_Offset that will get the count value. */ -void computestartandcount(int gdim, int ioprocs, int rank, PIO_Offset *start, - PIO_Offset *kount) +void compute_one_dim(int gdim, int ioprocs, int rank, PIO_Offset *start, + PIO_Offset *count) { - int irank; + int irank; /* The IO rank for this task. */ int remainder; int adds; - PIO_Offset lstart, lkount; + PIO_Offset lstart, lcount; + /* Check inputs. */ + pioassert(gdim >= 0 && ioprocs > 0 && rank >= 0 && start && count, + "invalid input", __FILE__, __LINE__); + + /* Determin which IO task to use. */ irank = rank % ioprocs; - lkount = (long int)(gdim / ioprocs); - lstart = (long int)(lkount * irank); - remainder = gdim - lkount * ioprocs; - if (remainder >= ioprocs-irank) + /* Each IO task will have its share of the global dim. */ + lcount = (long int)(gdim / ioprocs); + + /* Find the start for this task. */ + lstart = (long int)(lcount * irank); + + /* Is there anything left over? */ + remainder = gdim - lcount * ioprocs; + + /* Distribute left over data to some IO tasks. */ + if (remainder >= ioprocs - irank) { - lkount++; + lcount++; if ((adds = irank + remainder - ioprocs) > 0) lstart += adds; } + + /* Return results to caller. */ *start = lstart; - *kount = lkount; + *count = lcount; } /** @@ -138,20 +153,29 @@ void computestartandcount(int gdim, int ioprocs, int rank, PIO_Offset *start, * * @param arrlen * @param arr_in - * @returns + * @returns the size of the block */ PIO_Offset GCDblocksize(int arrlen, const PIO_Offset *arr_in) { - int i, j, n, numblks, numtimes, ii, numgaps; - PIO_Offset bsize, bsizeg, blklensum; - PIO_Offset del_arr[arrlen - 1]; + int numblks = 0; /* Number of blocks. */ + int numtimes = 0; /* Number of times adjacent arr_in elements differ by != 1. */ + int numgaps = 0; /* Number of gaps. */ + int j; /* Loop counter. */ + int ii; /* Loop counter. */ + int n; + PIO_Offset bsize; /* Size of the block. */ + PIO_Offset bsizeg; /* Size of gap block. */ + PIO_Offset blklensum; /* Sum of all block lengths. */ + PIO_Offset del_arr[arrlen - 1]; /* Array of deltas between adjacent elements in arr_in. */ PIO_Offset loc_arr[arrlen - 1]; - numblks = 0; - numgaps = 0; - numtimes = 0; + /* Check inputs. */ + pioassert(arrlen > 0 && arr_in, "invalid input", __FILE__, __LINE__); - for (i = 0; i < arrlen - 1; i++) + /* Count the number of contiguous blocks in arr_in. If any if + these blocks is of size 1, we are done and can return. + Otherwise numtimes is the number of blocks. */ + for (int i = 0; i < arrlen - 1; i++) { del_arr[i] = arr_in[i + 1] - arr_in[i]; if (del_arr[i] != 1) @@ -162,76 +186,95 @@ PIO_Offset GCDblocksize(int arrlen, const PIO_Offset *arr_in) } } + /* If numtimes is 0 the all of the data in arr_in is contiguous + * and numblks=1. Not sure why I have three different variables + * here, seems like n,numblks and numtimes could be combined. */ numblks = numtimes + 1; if (numtimes == 0) n = numblks; else n = numtimes; + /* If numblks==1 then the result is arrlen and you can return. */ bsize = (PIO_Offset)arrlen; if (numblks > 1) { PIO_Offset blk_len[numblks]; PIO_Offset gaps[numtimes]; - if(numtimes > 0) + + /* If numblks > 1 then numtimes must be > 0 and this if block + * isn't needed. */ + if (numtimes > 0) { ii = 0; - for (i = 0; i < arrlen - 1; i++) + for (int i = 0; i < arrlen - 1; i++) if (del_arr[i] > 1) gaps[ii++] = del_arr[i] - 1; numgaps = ii; } j = 0; - for (i = 0; i < n; i++) + for (int i = 0; i < n; i++) loc_arr[i] = 1; - for (i = 0; i < arrlen - 1; i++) + for (int i = 0; i < arrlen - 1; i++) if(del_arr[i] != 1) loc_arr[j++] = i; blk_len[0] = loc_arr[0]; blklensum = blk_len[0]; - for(i = 1; i < numblks - 1; i++) + for(int i = 1; i < numblks - 1; i++) { blk_len[i] = loc_arr[i] - loc_arr[i - 1]; blklensum += blk_len[i]; } blk_len[numblks - 1] = arrlen - blklensum; + /* Get the GCD in blk_len array. */ bsize = lgcd_array(numblks, blk_len); + + /* I don't recall why i needed these next two blocks, I + * remember struggling to get this right in all cases and I'm + * afraid that the end result is that bsize is almost always + * 1. */ if (numgaps > 0) { bsizeg = lgcd_array(numgaps, gaps); bsize = lgcd(bsize, bsizeg); } - if(arr_in[0] > 0) + + /* ??? */ + if (arr_in[0] > 0) bsize = lgcd(bsize, arr_in[0]); } + return bsize; } /** - * Compute start and count values for each io task. + * Compute start and count values for each io task. This is used in + * PIOc_InitDecomp() for the box rearranger only. * - * @param basetype - * @param ndims - * @param gdims - * @param num_io_procs - * @param myiorank - * @param start - * @param kount + * @param pio_type the PIO data type used in this decompotion. + * @param ndims the number of dimensions in the variable, not + * including the unlimited dimension. + * @param gdims an array of global size of each dimension. + * @param num_io_procs the number of IO tasks. + * @param myiorank rank of this task in IO communicator. + * @param start array of length ndims with data start values. + * @param count array of length ndims with data count values. + * @param num_aiotasks the number of IO tasks used(?) + * @returns 0 for success, error code otherwise. */ -int CalcStartandCount(int basetype, int ndims, const int *gdims, int num_io_procs, - int myiorank, PIO_Offset *start, PIO_Offset *kount) +int CalcStartandCount(int pio_type, int ndims, const int *gdims, int num_io_procs, + int myiorank, PIO_Offset *start, PIO_Offset *count, int *num_aiotasks) { - int minbytes; + int minbytes; int maxbytes; - int minblocksize; - int basesize; + int minblocksize; /* Like minbytes, but in data elements. */ + int basesize; /* Size in bytes of base data type. */ int use_io_procs; int i; - long int p; long int pgdims; bool converged; int iorank; @@ -239,50 +282,61 @@ int CalcStartandCount(int basetype, int ndims, const int *gdims, int num_io_proc int tiorank; int ioprocs; int tioprocs; - int mystart[ndims], mykount[ndims]; + int mystart[ndims], mycount[ndims]; long int pknt; - long int tpsize=0; - + long int tpsize = 0; + int ret; + + /* Check inputs. */ + pioassert(pio_type > 0 && ndims > 0 && gdims && num_io_procs > 0 && start && count, + "invalid input", __FILE__, __LINE__); + LOG((1, "CalcStartandCount pio_type = %d ndims = %d num_io_procs = %d myiorank = %d", + pio_type, ndims, num_io_procs, myiorank)); + + /* We are trying to find start and count indices for each iotask + * such that each task has approximately blocksize data to write + * (read). The number of iotasks participating in the operation is + * blocksize/global_size. */ minbytes = blocksize - 256; - maxbytes = blocksize + 256; + maxbytes = blocksize + 256; - switch (basetype) - { - case PIO_INT: - basesize = sizeof(int); - break; - case PIO_REAL: - basesize = sizeof(float); - break; - case PIO_DOUBLE: - basesize = sizeof(double); - break; - default: - piodie("Invalid basetype ",__FILE__,__LINE__); - break; - } + /* Determine the size of the data type. */ + if ((ret = find_mpi_type(pio_type, NULL, &basesize))) + return ret; + + /* Determine the minimum block size. */ minblocksize = minbytes / basesize; + /* Find the total size of the data. */ pgdims = 1; for (i = 0; i < ndims; i++) pgdims *= (long int)gdims[i]; - p = pgdims; - use_io_procs = max(1, min((int)((float)p / (float)minblocksize + 0.5), num_io_procs)); + + /* Find the number of ioprocs that are needed so that we have + * blocksize data on each iotask*/ + use_io_procs = max(1, min((int)((float)pgdims / (float)minblocksize + 0.5), num_io_procs)); + + /* Initialize to 0. */ converged = 0; for (i = 0; i < ndims; i++) { mystart[i] = 0; - mykount[i] = 0; + mycount[i] = 0; } + /* Use_io_procs is the number of ioprocs that are needed so that + * we have blocksize data on each iotask, now find start and count + * values needed on each of these tasks. */ while (!converged) { + long int p; + for (iorank = 0; iorank < use_io_procs; iorank++) { for (i = 0; i < ndims; i++) { start[i] = 0; - kount[i] = gdims[i]; + count[i] = gdims[i]; } ldims = ndims - 1; p = basesize; @@ -298,7 +352,7 @@ int CalcStartandCount(int basetype, int ndims, const int *gdims, int num_io_proc if (gdims[ldims] < use_io_procs) { - if (ldims > 0 && gdims[ldims-1] > use_io_procs) + if (ldims > 0 && gdims[ldims - 1] > use_io_procs) ldims--; else use_io_procs -= (use_io_procs % gdims[ldims]); @@ -310,8 +364,8 @@ int CalcStartandCount(int basetype, int ndims, const int *gdims, int num_io_proc { if (gdims[i] >= ioprocs) { - computestartandcount(gdims[i], ioprocs, tiorank, start + i, kount + i); - if (start[i] + kount[i] > gdims[i] + 1) + compute_one_dim(gdims[i], ioprocs, tiorank, &start[i], &count[i]); + if (start[i] + count[i] > gdims[i] + 1) { piodie("Start plus count exceeds dimension bound",__FILE__,__LINE__); } @@ -320,7 +374,7 @@ int CalcStartandCount(int basetype, int ndims, const int *gdims, int num_io_proc { tioprocs = gdims[i]; tiorank = (iorank * tioprocs) / ioprocs; - computestartandcount(gdims[i], tioprocs, tiorank, start + i, kount + i); + compute_one_dim(gdims[i], tioprocs, tiorank, &start[i], &count[i]); ioprocs = ioprocs / tioprocs; tiorank = iorank % ioprocs; } @@ -332,13 +386,13 @@ int CalcStartandCount(int basetype, int ndims, const int *gdims, int num_io_proc for (i = 0; i < ndims; i++) { mystart[i] = start[i]; - mykount[i] = kount[i]; + mycount[i] = count[i]; } } pknt = 1; for(i = 0; i < ndims; i++) - pknt *= kount[i]; + pknt *= count[i]; tpsize += pknt; @@ -360,12 +414,14 @@ int CalcStartandCount(int basetype, int ndims, const int *gdims, int num_io_proc } } + /* On IO tasks, set the start/count arrays to computed values. On + * non-io tasks set start/count to zero. */ if (myiorank < use_io_procs) { for (i = 0; i < ndims; i++) { start[i] = mystart[i]; - kount[i] = mykount[i]; + count[i] = mycount[i]; } } else @@ -373,9 +429,12 @@ int CalcStartandCount(int basetype, int ndims, const int *gdims, int num_io_proc for (i = 0; i < ndims; i++) { start[i] = 0; - kount[i] = 0; + count[i] = 0; } } - return use_io_procs; + /* Return the number of IO procs used to the caller. */ + *num_aiotasks = use_io_procs; + + return PIO_NOERR; } diff --git a/src/externals/pio2/src/clib/pioc_support.c b/src/externals/pio2/src/clib/pioc_support.c index 8f45277a32fd..fba1ad9070ac 100644 --- a/src/externals/pio2/src/clib/pioc_support.c +++ b/src/externals/pio2/src/clib/pioc_support.c @@ -101,13 +101,14 @@ int PIOc_strerror(int pioerr, char *errmsg) */ int PIOc_set_log_level(int level) { - int ret; #if PIO_ENABLE_LOGGING /* Set the log level. */ pio_log_level = level; #if NETCDF_C_LOGGING_ENABLED + int ret; + /* If netcdf logging is available turn it on starting at level = 4. */ if (level > NC_LEVEL_DIFF) if ((ret = nc_set_log_level(level - NC_LEVEL_DIFF))) @@ -148,7 +149,7 @@ void pio_init_logging(void) /** * Finalize logging - close log files, if open. */ -void pio_finalize_logging(void ) +void pio_finalize_logging(void) { #if PIO_ENABLE_LOGGING pio_log_ref_cnt -= 1; @@ -288,22 +289,6 @@ void print_trace(FILE *fp) free(strings); } -/** - * Exit due to lack of memory. - * - * @param ios the iosystem description struct - * @param req amount of memory that was being requested - * @param fname name of code file where error occured - * @param line the line of code where the error occurred. - */ -void piomemerror(iosystem_desc_t *ios, size_t req, char *fname, int line) -{ - char msg[80]; - sprintf(msg, "out of memory requesting: %ld", req); - cn_buffer_report(ios, false); - piodie(msg, fname, line); -} - /** * Abort program and call MPI_Abort(). * @@ -532,145 +517,173 @@ int pio_err(iosystem_desc_t *ios, file_desc_t *file, int err_num, const char *fn } /** - * Allocate an region. + * Allocate a region struct, and initialize it. * - * ndims the number of dimensions for the data in this region. - * @returns a pointer to the newly allocated io_region struct. + * @param ios pointer to the IO system info, used for error + * handling. Ignored if NULL. + * @param ndims the number of dimensions for the data in this region. + * @param a pointer that gets a pointer to the newly allocated + * io_region struct. + * @returns 0 for success, error code otherwise. */ -io_region *alloc_region(int ndims) +int alloc_region2(iosystem_desc_t *ios, int ndims, io_region **regionp) { io_region *region; + /* Check inputs. */ + pioassert(ndims >= 0 && regionp, "invalid input", __FILE__, __LINE__); + LOG((1, "alloc_region2 ndims = %d sizeof(io_region) = %d", ndims, + sizeof(io_region))); + /* Allocate memory for the io_region struct. */ - if (!(region = bget(sizeof(io_region)))) - return NULL; + if (!(region = calloc(1, sizeof(io_region)))) + return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); /* Allocate memory for the array of start indicies. */ - if (!(region->start = bget(ndims * sizeof(PIO_Offset)))) - { - brel(region); - return NULL; - } + if (!(region->start = calloc(ndims, sizeof(PIO_Offset)))) + return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); /* Allocate memory for the array of counts. */ - if (!(region->count = bget(ndims * sizeof(PIO_Offset)))) - { - brel(region); - brel(region->start); - return NULL; - } - - region->loffset = 0; - region->next = NULL; - - /* Initialize start and count arrays to zero. */ - for (int i = 0; i < ndims; i++) - { - region->start[i] = 0; - region->count[i] = 0; - } + if (!(region->count = calloc(ndims, sizeof(PIO_Offset)))) + return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); - return region; + /* Return pointer to new region to caller. */ + *regionp = region; + + return PIO_NOERR; } /** - * Find the MPI type for a PIO type. + * Given a PIO type, find the MPI type and the type size. * * @param pio_type a PIO type, PIO_INT, PIO_FLOAT, etc. * @param mpi_type a pointer to MPI_Datatype that will get the MPI - * type that coresponds to the PIO type. + * type that coresponds to the PIO type. Ignored if NULL. + * @param type_size a pointer to int that will get the size of the + * type, in bytes. (For example, 4 for PIO_INT). Ignored if NULL. * @returns 0 for success, error code otherwise. */ -int find_mpi_type(int pio_type, MPI_Datatype *mpi_type) +int find_mpi_type(int pio_type, MPI_Datatype *mpi_type, int *type_size) { - /* Check input. */ - pioassert(mpi_type, "invalid input", __FILE__, __LINE__); + MPI_Datatype my_mpi_type; + int my_type_size; /* Decide on the base type. */ switch(pio_type) { case PIO_BYTE: - *mpi_type = MPI_BYTE; + my_mpi_type = MPI_BYTE; + my_type_size = NETCDF_CHAR_SIZE; break; case PIO_CHAR: - *mpi_type = MPI_CHAR; + my_mpi_type = MPI_CHAR; + my_type_size = NETCDF_CHAR_SIZE; break; case PIO_SHORT: - *mpi_type = MPI_SHORT; + my_mpi_type = MPI_SHORT; + my_type_size = NETCDF_SHORT_SIZE; break; case PIO_INT: - *mpi_type = MPI_INT; + my_mpi_type = MPI_INT; + my_type_size = NETCDF_INT_FLOAT_SIZE; break; case PIO_FLOAT: - *mpi_type = MPI_FLOAT; + my_mpi_type = MPI_FLOAT; + my_type_size = NETCDF_INT_FLOAT_SIZE; break; case PIO_DOUBLE: - *mpi_type = MPI_DOUBLE; + my_mpi_type = MPI_DOUBLE; + my_type_size = NETCDF_DOUBLE_INT64_SIZE; break; #ifdef _NETCDF4 case PIO_UBYTE: - *mpi_type = MPI_UNSIGNED_CHAR; + my_mpi_type = MPI_UNSIGNED_CHAR; + my_type_size = NETCDF_CHAR_SIZE; break; case PIO_USHORT: - *mpi_type = MPI_UNSIGNED_SHORT; + my_mpi_type = MPI_UNSIGNED_SHORT; + my_type_size = NETCDF_SHORT_SIZE; break; case PIO_UINT: - *mpi_type = MPI_UNSIGNED; + my_mpi_type = MPI_UNSIGNED; + my_type_size = NETCDF_INT_FLOAT_SIZE; break; case PIO_INT64: - *mpi_type = MPI_LONG_LONG; + my_mpi_type = MPI_LONG_LONG; + my_type_size = NETCDF_DOUBLE_INT64_SIZE; break; case PIO_UINT64: - *mpi_type = MPI_UNSIGNED_LONG_LONG; + my_mpi_type = MPI_UNSIGNED_LONG_LONG; + my_type_size = NETCDF_DOUBLE_INT64_SIZE; break; case PIO_STRING: - *mpi_type = MPI_CHAR; + my_mpi_type = MPI_CHAR; + my_type_size = NETCDF_CHAR_SIZE; break; #endif /* _NETCDF4 */ default: return PIO_EBADTYPE; } + /* If caller wants MPI type, set it. */ + if (mpi_type) + *mpi_type = my_mpi_type; + + /* If caller wants type size, set it. */ + if (type_size) + *type_size = my_type_size; + return PIO_NOERR; } /** - * Allocate space for an IO description struct. + * Allocate space for an IO description struct, and initialize it. * - * @param ios pointer to the IO system info. + * @param ios pointer to the IO system info, used for error + * handling. * @param piotype the PIO data type (ex. PIO_FLOAT, PIO_INT, etc.). * @param ndims the number of dimensions. - * @iodesc pointer that gets a pointer to the newly allocated - * io_desc_t or NULL if allocation failed. + * @param iodesc pointer that gets the newly allocated io_desc_t. * @returns 0 for success, error code otherwise. */ int malloc_iodesc(iosystem_desc_t *ios, int piotype, int ndims, io_desc_t **iodesc) { MPI_Datatype mpi_type; + int mpierr; int ret; /* Check input. */ - pioassert(ios && iodesc, "invalid input", __FILE__, __LINE__); + pioassert(ios && piotype > 0 && ndims >= 0 && iodesc, + "invalid input", __FILE__, __LINE__); - /* Allocate space for the io_desc_t struct. */ - if (!(*iodesc = calloc(1, sizeof(io_desc_t)))) - return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); + LOG((1, "malloc_iodesc piotype = %d ndims = %d", piotype, ndims)); /* Get the MPI type corresponding with the PIO type. */ - if ((ret = find_mpi_type(piotype, &mpi_type))) + if ((ret = find_mpi_type(piotype, &mpi_type, NULL))) return pio_err(ios, NULL, ret, __FILE__, __LINE__); - /* Decide on the base type. */ + /* Allocate space for the io_desc_t struct. */ + if (!(*iodesc = calloc(1, sizeof(io_desc_t)))) + return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); + + /* Remember the MPI type. */ (*iodesc)->basetype = mpi_type; + /* Get the size of the type. */ + if ((mpierr = MPI_Type_size((*iodesc)->basetype, &(*iodesc)->basetype_size))) + return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); + /* Initialize some values in the struct. */ (*iodesc)->maxregions = 1; (*iodesc)->ioid = -1; (*iodesc)->ndims = ndims; - (*iodesc)->firstregion = alloc_region(ndims); - /* Set the swap memory settings to defaults. */ + /* Allocate space for, and initialize, the first region. */ + if ((ret = alloc_region2(ios, ndims, &((*iodesc)->firstregion)))) + return pio_err(ios, NULL, ret, __FILE__, __LINE__); + + /* Set the swap memory settings to defaults for this IO system. */ (*iodesc)->rearr_opts = ios->rearr_opts; return PIO_NOERR; @@ -689,12 +702,12 @@ void free_region_list(io_region *top) while (ptr) { if (ptr->start) - brel(ptr->start); + free(ptr->start); if (ptr->count) - brel(ptr->count); + free(ptr->count); tptr = ptr; ptr = ptr->next; - brel(tptr); + free(tptr); } } @@ -718,7 +731,7 @@ int PIOc_freedecomp(int iosysid, int ioid) return pio_err(ios, NULL, PIO_EBADID, __FILE__, __LINE__); /* If async is in use, and this is not an IO task, bcast the parameters. */ - if (ios->async_interface) + if (ios->async) { if (!ios->ioproc) { @@ -754,7 +767,7 @@ int PIOc_freedecomp(int iosysid, int ioid) { for (int i = 0; i < iodesc->nrecvs; i++) if (iodesc->rtype[i] != PIO_DATATYPE_NULL) - if ((mpierr = MPI_Type_free(iodesc->rtype + i))) + if ((mpierr = MPI_Type_free(&iodesc->rtype[i]))) return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); free(iodesc->rtype); @@ -938,7 +951,6 @@ int PIOc_readmap_from_f90(const char *file, int *ndims, int **gdims, PIO_Offset * @param filename the filename to be used. * @param cmode for PIOc_create(). Will be bitwise or'd with NC_WRITE. * @param ioid the ID of the IO description. - * @param comm an MPI communicator. * @param title optial title attribute for the file. Must be less than * NC_MAX_NAME + 1 if provided. Ignored if NULL. * @param history optial history attribute for the file. Must be less @@ -948,12 +960,11 @@ int PIOc_readmap_from_f90(const char *file, int *ndims, int **gdims, PIO_Offset * @returns 0 for success, error code otherwise. */ int PIOc_write_nc_decomp(int iosysid, const char *filename, int cmode, int ioid, - MPI_Comm comm, char *title, char *history, int fortran_order) + char *title, char *history, int fortran_order) { - iosystem_desc_t *ios; - io_desc_t *iodesc; - int npes; /* Size of this communicator. */ - int myrank; /* Rank of this task. */ + iosystem_desc_t *ios; /* IO system info. */ + io_desc_t *iodesc; /* Decomposition info. */ + int max_maplen; /* The maximum maplen used for any task. */ int mpierr; int ret; @@ -963,42 +974,41 @@ int PIOc_write_nc_decomp(int iosysid, const char *filename, int cmode, int ioid, /* Check inputs. */ if (!filename) - return pio_err(NULL, NULL, PIO_EINVAL, __FILE__, __LINE__); + return pio_err(ios, NULL, PIO_EINVAL, __FILE__, __LINE__); + if (title) + if (strlen(title) > PIO_MAX_NAME) + return pio_err(ios, NULL, PIO_EINVAL, __FILE__, __LINE__); + if (history) + if (strlen(history) > PIO_MAX_NAME) + return pio_err(ios, NULL, PIO_EINVAL, __FILE__, __LINE__); - LOG((1, "PIOc_write_nc_decomp filename = %s iosysid = %d ioid = %d", filename, - iosysid, ioid)); + LOG((1, "PIOc_write_nc_decomp filename = %s iosysid = %d ioid = %d " + "ios->num_comptasks = %d", filename, iosysid, ioid, ios->num_comptasks)); /* Get the IO desc, which describes the decomposition. */ if (!(iodesc = pio_get_iodesc_from_id(ioid))) return pio_err(ios, NULL, PIO_EBADID, __FILE__, __LINE__); - /* Get the communicator size and task rank. */ - if ((mpierr = MPI_Comm_size(comm, &npes))) - return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); - if ((mpierr = MPI_Comm_rank(comm, &myrank))) - return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); - LOG((2, "npes = %d myrank = %d", npes, myrank)); + /* Allocate memory for array which will contain the length of the + * map on each task, for all computation tasks. */ + int task_maplen[ios->num_comptasks]; + LOG((3, "ios->num_comptasks = %d", ios->num_comptasks)); - /* Allocate memory for the nmaplen. On task 0, this will contain - * the length of the map on each task, for all tasks. */ - int task_maplen[npes]; - - /* Gather maplens from all tasks and fill the task_maplen array on - * all tasks. */ - if ((mpierr = MPI_Allgather(&iodesc->maplen, 1, MPI_INT, task_maplen, 1, MPI_INT, comm))) + /* Gather maplens from all computation tasks and fill the + * task_maplen array on all tasks. */ + if ((mpierr = MPI_Allgather(&iodesc->maplen, 1, MPI_INT, task_maplen, 1, MPI_INT, + ios->comp_comm))) return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); - /* We will need to know the maximum maplen used for any task. */ - int max_maplen; - /* Find the max maxplen. */ - if ((mpierr = MPI_Allreduce(&iodesc->maplen, &max_maplen, 1, MPI_INT, MPI_MAX, comm))) + if ((mpierr = MPI_Allreduce(&iodesc->maplen, &max_maplen, 1, MPI_INT, MPI_MAX, + ios->comp_comm))) return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); LOG((3, "max_maplen = %d", max_maplen)); - /* 2D array that, on task 0, will hold all the map information for - * all tasks. */ - int full_map[npes][max_maplen]; + /* 2D array that will hold all the map information for all + * tasks. */ + int full_map[ios->num_comptasks][max_maplen]; /* Fill local array with my map. Use the fill value for unused */ /* elements at the end if max_maplen is longer than maplen. Also @@ -1009,22 +1019,22 @@ int PIOc_write_nc_decomp(int iosysid, const char *filename, int cmode, int ioid, my_map[e] = e < iodesc->maplen ? iodesc->map[e] - 1 : NC_FILL_INT; LOG((3, "my_map[%d] = %d", e, my_map[e])); } - - /* Gather my_map from all tasks and fill the full_map array. */ + + /* Gather my_map from all computation tasks and fill the full_map array. */ if ((mpierr = MPI_Allgather(&my_map, max_maplen, MPI_INT, full_map, max_maplen, - MPI_INT, comm))) + MPI_INT, ios->comp_comm))) return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); - - for (int p = 0; p < npes; p++) + + for (int p = 0; p < ios->num_comptasks; p++) for (int e = 0; e < max_maplen; e++) LOG((3, "full_map[%d][%d] = %d", p, e, full_map[p][e])); /* Write the netCDF decomp file. */ - if ((ret = pioc_write_nc_decomp_int(iosysid, filename, cmode, iodesc->ndims, iodesc->dimlen, npes, - task_maplen, (int *)full_map, title, history, fortran_order))) + if ((ret = pioc_write_nc_decomp_int(ios, filename, cmode, iodesc->ndims, iodesc->dimlen, + ios->num_comptasks, task_maplen, (int *)full_map, title, + history, fortran_order))) return ret; - return PIO_NOERR; } @@ -1060,8 +1070,6 @@ int PIOc_read_nc_decomp(int iosysid, const char *filename, int *ioidp, MPI_Comm int num_tasks_decomp; /* The number of tasks for this decomp. */ int size; /* Size of comm. */ int my_rank; /* Task rank in comm. */ - MPI_Datatype mpi_type; /* Will be used as the basetype in iodesc. */ - int mpi_type_int; /* int version of mpi_type. */ char source_in[PIO_MAX_NAME + 1]; /* Text metadata in decomp file. */ char version_in[PIO_MAX_NAME + 1]; /* Text metadata in decomp file. */ int mpierr; @@ -1078,12 +1086,6 @@ int PIOc_read_nc_decomp(int iosysid, const char *filename, int *ioidp, MPI_Comm LOG((1, "PIOc_read_nc_decomp filename = %s iosysid = %d pio_type = %d", filename, iosysid, pio_type)); - /* Get the MPI type. We need it as an int. */ - if ((ret = find_mpi_type(pio_type, &mpi_type))) - return pio_err(ios, NULL, ret, __FILE__, __LINE__); - mpi_type_int = mpi_type; - LOG((2, "mpi_type = %d mpi_type_int = %d", mpi_type, mpi_type_int)); - /* Get the communicator size and task rank. */ if ((mpierr = MPI_Comm_size(comm, &size))) return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); @@ -1130,7 +1132,7 @@ int PIOc_read_nc_decomp(int iosysid, const char *filename, int *ioidp, MPI_Comm /* Write the decomp information in netCDF. This is an internal * function. * - * @param iosysid the IO system ID. + * @param ios pointer to io system info. * @param filename the name the decomp file will have. * @param cmode for PIOc_create(). Will be bitwise or'd with NC_WRITE. * @param ndims number of dims in the data being described. @@ -1151,31 +1153,22 @@ int PIOc_read_nc_decomp(int iosysid, const char *filename, int *ioidp, MPI_Comm * ordering, 0 for C array ordering. * @returns 0 for success, error code otherwise. */ -int pioc_write_nc_decomp_int(int iosysid, const char *filename, int cmode, int ndims, +int pioc_write_nc_decomp_int(iosystem_desc_t *ios, const char *filename, int cmode, int ndims, int *global_dimlen, int num_tasks, int *task_maplen, int *map, const char *title, const char *history, int fortran_order) { - iosystem_desc_t *ios; int max_maplen = 0; int ncid; int ret; - /* Get the IO system info. */ - if (!(ios = pio_get_iosystem_from_id(iosysid))) - return pio_err(NULL, NULL, PIO_EBADID, __FILE__, __LINE__); - /* Check inputs. */ - if (!filename || !global_dimlen || !task_maplen) - return pio_err(ios, NULL, PIO_EINVAL, __FILE__, __LINE__); - if (title) - if (strlen(title) > PIO_MAX_NAME) - return pio_err(ios, NULL, PIO_EINVAL, __FILE__, __LINE__); - if (history) - if (strlen(history) > PIO_MAX_NAME) - return pio_err(ios, NULL, PIO_EINVAL, __FILE__, __LINE__); + pioassert(ios && filename && global_dimlen && task_maplen && + (!title || strlen(title) <= PIO_MAX_NAME) && + (!history || strlen(history) <= PIO_MAX_NAME), "invalid input", + __FILE__, __LINE__); - LOG((2, "pioc_write_nc_decomp_int iosysid = %d filename = %s ndims = %d num_tasks = %d", - iosysid, filename, ndims, num_tasks)); + LOG((2, "pioc_write_nc_decomp_int filename = %s ndims = %d num_tasks = %d", filename, + ndims, num_tasks)); /* Find the maximum maplen. */ for (int t = 0; t < num_tasks; t++) @@ -1184,7 +1177,7 @@ int pioc_write_nc_decomp_int(int iosysid, const char *filename, int cmode, int n LOG((3, "max_maplen = %d", max_maplen)); /* Create the netCDF decomp file. */ - if ((ret = PIOc_create(iosysid, filename, cmode | NC_WRITE, &ncid))) + if ((ret = PIOc_create(ios->iosysid, filename, cmode | NC_WRITE, &ncid))) return pio_err(ios, NULL, ret, __FILE__, __LINE__); /* Write an attribute with the version of this file. */ @@ -1407,7 +1400,7 @@ int pioc_read_nc_decomp_int(int iosysid, const char *filename, int *ndims, int * int max_maplen_in; if ((ret = PIOc_get_att_int(ncid, NC_GLOBAL, DECOMP_MAX_MAPLEN_ATT_NAME, &max_maplen_in))) return pio_err(ios, NULL, ret, __FILE__, __LINE__); - LOG((3, "max_maplen_in = %d", version_in)); + LOG((3, "max_maplen_in = %d", max_maplen_in)); if (max_maplen) *max_maplen = max_maplen_in; @@ -1737,11 +1730,11 @@ int PIOc_createfile_int(int iosysid, int *ncidp, int *iotype, const char *filena ios->io_rank == 0) file->do_io = 1; - LOG((2, "file->do_io = %d ios->async_interface = %d", file->do_io, ios->async_interface)); + LOG((2, "file->do_io = %d ios->async = %d", file->do_io, ios->async)); /* If async is in use, and this is not an IO task, bcast the * parameters. */ - if (ios->async_interface) + if (ios->async) { int msg = PIO_MSG_CREATE_FILE; size_t len = strlen(filename); @@ -1914,7 +1907,7 @@ int PIOc_openfile_retry(int iosysid, int *ncidp, int *iotype, const char *filena file->do_io = 1; /* If async is in use, and this is not an IO task, bcast the parameters. */ - if (ios->async_interface) + if (ios->async) { int msg = PIO_MSG_OPEN_FILE; size_t len = strlen(filename); @@ -2134,7 +2127,7 @@ int pioc_change_def(int ncid, int is_enddef) ios = file->iosystem; /* If async is in use, and this is not an IO task, bcast the parameters. */ - if (ios->async_interface) + if (ios->async) { if (!ios->ioproc) { @@ -2223,147 +2216,6 @@ int iotype_is_valid(int iotype) return ret; } -/** - * Internal function to compare rearranger flow control options. - * - * @param opt pointer to rearranger flow control options to compare. - * @param exp_opt pointer to rearranger flow control options with - * expected values. - * @return true if values in opt == values in exp_opt, false - * otherwise. - */ -bool cmp_rearr_comm_fc_opts(const rearr_comm_fc_opt_t *opt, - const rearr_comm_fc_opt_t *exp_opt) -{ - bool is_same = true; - - assert(opt && exp_opt); - - if (opt->enable_hs != exp_opt->enable_hs) - { - LOG((1, "Warning rearranger enable_hs = %s, expected = %s", - opt->enable_hs ? "TRUE" : "FALSE", exp_opt->enable_hs ? "TRUE" : "FALSE")); - is_same = false; - } - - if (opt->enable_isend != exp_opt->enable_isend) - { - LOG((1, "Warning rearranger enable_isend = %s, expected = %s", - opt->enable_isend ? "TRUE" : "FALSE", exp_opt->enable_isend ? "TRUE" : "FALSE")); - is_same = false; - } - - if (opt->max_pend_req != exp_opt->max_pend_req) - { - LOG((1, "Warning rearranger max_pend_req = %d, expected = %d", - opt->max_pend_req, exp_opt->max_pend_req)); - is_same = false; - } - - return is_same; -} - -/** - * Internal function to compare rearranger options. - * - * @param rearr_opts pointer to rearranger options to compare - * @param exp_rearr_opts pointer to rearranger options with the - * expected value - * @return true if values in rearr_opts == values in exp_rearr_opts - * false otherwise - */ -bool cmp_rearr_opts(const rearr_opt_t *rearr_opts, const rearr_opt_t *exp_rearr_opts) -{ - bool is_same = true; - - assert(rearr_opts && exp_rearr_opts); - - if (rearr_opts->comm_type != exp_rearr_opts->comm_type) - { - LOG((1, "Warning rearranger comm_type = %d, expected = %d. ", rearr_opts->comm_type, - exp_rearr_opts->comm_type)); - is_same = false; - } - - if (rearr_opts->fcd != exp_rearr_opts->fcd) - { - LOG((1, "Warning rearranger fcd = %d, expected = %d. ", rearr_opts->fcd, - exp_rearr_opts->fcd)); - is_same = false; - } - - is_same = is_same && cmp_rearr_comm_fc_opts(&(rearr_opts->comm_fc_opts_comp2io), - &(exp_rearr_opts->comm_fc_opts_comp2io)); - is_same = is_same && cmp_rearr_comm_fc_opts(&(rearr_opts->comm_fc_opts_io2comp), - &(exp_rearr_opts->comm_fc_opts_io2comp)); - - return is_same; -} - -/** - * Internal function to reset rearranger opts in iosystem to valid values. - * The old default for max pending requests was DEF_P2P_MAXREQ = 64. - * - * @param ios pointer to iosystem descriptor - */ -void check_and_reset_rearr_opts(iosystem_desc_t *ios) -{ - /* Disable handshake/isend and set max_pend_req to unlimited */ - const rearr_comm_fc_opt_t def_comm_nofc_opts = - { false, false, PIO_REARR_COMM_UNLIMITED_PEND_REQ }; - /* Disable handshake /isend and set max_pend_req = 0 to turn off throttling */ - const rearr_comm_fc_opt_t def_coll_comm_fc_opts = { false, false, 0 }; - const rearr_opt_t def_coll_rearr_opts = { - PIO_REARR_COMM_COLL, - PIO_REARR_COMM_FC_2D_DISABLE, - def_coll_comm_fc_opts, - def_coll_comm_fc_opts - }; - - assert(ios); - - /* Reset to defaults, if needed (user did not set it correctly) */ - if (ios->rearr_opts.comm_type == PIO_REARR_COMM_COLL) - { - /* Compare and log the user and default rearr opts for coll. */ - cmp_rearr_opts(&(ios->rearr_opts), &def_coll_rearr_opts); - /* Hard reset flow control options. */ - ios->rearr_opts = def_coll_rearr_opts; - } - else if (ios->rearr_opts.comm_type == PIO_REARR_COMM_P2P) - { - if (ios->rearr_opts.fcd == PIO_REARR_COMM_FC_2D_DISABLE) - { - /* Compare and log user and default opts. */ - cmp_rearr_comm_fc_opts(&(ios->rearr_opts.comm_fc_opts_comp2io), - &def_comm_nofc_opts); - cmp_rearr_comm_fc_opts(&(ios->rearr_opts.comm_fc_opts_io2comp), - &def_comm_nofc_opts); - /* Hard reset flow control opts to defaults. */ - ios->rearr_opts.comm_fc_opts_comp2io = def_comm_nofc_opts; - ios->rearr_opts.comm_fc_opts_io2comp = def_comm_nofc_opts; - } - else if (ios->rearr_opts.fcd == PIO_REARR_COMM_FC_1D_COMP2IO) - { - /* Compare and log user and default opts. */ - cmp_rearr_comm_fc_opts(&(ios->rearr_opts.comm_fc_opts_io2comp), - &def_comm_nofc_opts); - /* Hard reset io2comp dir to defaults. */ - ios->rearr_opts.comm_fc_opts_io2comp = def_comm_nofc_opts; - } - else if (ios->rearr_opts.fcd == PIO_REARR_COMM_FC_1D_IO2COMP) - { - /* Compare and log user and default opts. */ - cmp_rearr_comm_fc_opts(&(ios->rearr_opts.comm_fc_opts_comp2io), - &def_comm_nofc_opts); - /* Hard reset comp2io dir to defaults. */ - ios->rearr_opts.comm_fc_opts_comp2io = def_comm_nofc_opts; - } - /* Don't reset if flow control is enabled in both directions - * by user. */ - } -} - /** * Set the rearranger options associated with an iosystem * @@ -2404,21 +2256,25 @@ int PIOc_set_rearr_opts(int iosysid, int comm_type, int fcd, bool enable_hs_c2i, int max_pend_req_i2c) { iosystem_desc_t *ios; - int ret = PIO_NOERR; rearr_opt_t user_rearr_opts = { comm_type, fcd, {enable_hs_c2i,enable_isend_c2i, max_pend_req_c2i}, {enable_hs_i2c, enable_isend_i2c, max_pend_req_i2c} }; + /* Check inputs. */ + if ((comm_type != PIO_REARR_COMM_P2P && comm_type != PIO_REARR_COMM_FC_1D_COMP2IO) || + (fcd < 0 || fcd > PIO_REARR_COMM_FC_2D_DISABLE) || + (max_pend_req_c2i != PIO_REARR_COMM_UNLIMITED_PEND_REQ && max_pend_req_c2i < 0) || + (max_pend_req_i2c != PIO_REARR_COMM_UNLIMITED_PEND_REQ && max_pend_req_i2c < 0)) + return pio_err(NULL, NULL, PIO_EINVAL, __FILE__, __LINE__); + /* Get the IO system info. */ if (!(ios = pio_get_iosystem_from_id(iosysid))) return pio_err(NULL, NULL, PIO_EBADID, __FILE__, __LINE__); + /* Set the options. */ ios->rearr_opts = user_rearr_opts; - /* Perform sanity checks on the user supplied values */ - check_and_reset_rearr_opts(ios); - - return ret; + return PIO_NOERR; } diff --git a/src/externals/pio2/tests/cunit/CMakeLists.txt b/src/externals/pio2/tests/cunit/CMakeLists.txt index 1ef8be7afc3b..418340b79165 100644 --- a/src/externals/pio2/tests/cunit/CMakeLists.txt +++ b/src/externals/pio2/tests/cunit/CMakeLists.txt @@ -65,33 +65,45 @@ if (NOT PIO_USE_MPISERIAL) target_link_libraries (test_pioc_fill pioc) add_executable (test_darray EXCLUDE_FROM_ALL test_darray.c test_common.c) target_link_libraries (test_darray pioc) + add_executable (test_darray_multi EXCLUDE_FROM_ALL test_darray_multi.c test_common.c) + target_link_libraries (test_darray_multi pioc) add_executable (test_darray_multivar EXCLUDE_FROM_ALL test_darray_multivar.c test_common.c) target_link_libraries (test_darray_multivar pioc) + add_executable (test_darray_multivar2 EXCLUDE_FROM_ALL test_darray_multivar2.c test_common.c) + target_link_libraries (test_darray_multivar2 pioc) add_executable (test_darray_1d EXCLUDE_FROM_ALL test_darray_1d.c test_common.c) target_link_libraries (test_darray_1d pioc) add_executable (test_darray_3d EXCLUDE_FROM_ALL test_darray_3d.c test_common.c) target_link_libraries (test_darray_3d pioc) + add_executable (test_decomp_uneven EXCLUDE_FROM_ALL test_decomp_uneven.c test_common.c) + target_link_libraries (test_decomp_uneven pioc) add_executable (test_decomps EXCLUDE_FROM_ALL test_decomps.c test_common.c) target_link_libraries (test_decomps pioc) + add_executable (test_rearr EXCLUDE_FROM_ALL test_rearr.c test_common.c) + target_link_libraries (test_rearr pioc) endif () add_executable (test_spmd EXCLUDE_FROM_ALL test_spmd.c test_common.c) target_link_libraries (test_spmd pioc) add_dependencies (tests test_spmd) +add_dependencies (tests test_rearr) add_dependencies (tests test_pioc) add_dependencies (tests test_pioc_unlim) add_dependencies (tests test_pioc_putget) add_dependencies (tests test_pioc_fill) add_dependencies (tests test_darray) +add_dependencies (tests test_darray_multi) add_dependencies (tests test_darray_multivar) +add_dependencies (tests test_darray_multivar2) add_dependencies (tests test_darray_1d) add_dependencies (tests test_darray_3d) +add_dependencies (tests test_decomp_uneven) add_dependencies (tests test_decomps) # Test Timeout in seconds. if (PIO_VALGRIND_CHECK) - set (DEFAULT_TEST_TIMEOUT 120) + set (DEFAULT_TEST_TIMEOUT 240) else () - set (DEFAULT_TEST_TIMEOUT 60) + set (DEFAULT_TEST_TIMEOUT 120) endif () # All tests need a certain number of tasks, but they should be able to @@ -109,6 +121,10 @@ else () EXECUTABLE ${CMAKE_CURRENT_BINARY_DIR}/test_spmd NUMPROCS ${AT_LEAST_FOUR_TASKS} TIMEOUT ${DEFAULT_TEST_TIMEOUT}) + add_mpi_test(test_rearr + EXECUTABLE ${CMAKE_CURRENT_BINARY_DIR}/test_rearr + NUMPROCS ${AT_LEAST_FOUR_TASKS} + TIMEOUT ${DEFAULT_TEST_TIMEOUT}) add_mpi_test(test_intercomm2 EXECUTABLE ${CMAKE_CURRENT_BINARY_DIR}/test_intercomm2 NUMPROCS ${AT_LEAST_FOUR_TASKS} @@ -169,10 +185,18 @@ else () EXECUTABLE ${CMAKE_CURRENT_BINARY_DIR}/test_darray NUMPROCS ${AT_LEAST_FOUR_TASKS} TIMEOUT ${DEFAULT_TEST_TIMEOUT}) + add_mpi_test(test_darray_multi + EXECUTABLE ${CMAKE_CURRENT_BINARY_DIR}/test_darray_multi + NUMPROCS ${AT_LEAST_FOUR_TASKS} + TIMEOUT ${DEFAULT_TEST_TIMEOUT}) add_mpi_test(test_darray_multivar EXECUTABLE ${CMAKE_CURRENT_BINARY_DIR}/test_darray_multivar NUMPROCS ${AT_LEAST_FOUR_TASKS} TIMEOUT ${DEFAULT_TEST_TIMEOUT}) + add_mpi_test(test_darray_multivar2 + EXECUTABLE ${CMAKE_CURRENT_BINARY_DIR}/test_darray_multivar2 + NUMPROCS ${AT_LEAST_FOUR_TASKS} + TIMEOUT ${DEFAULT_TEST_TIMEOUT}) add_mpi_test(test_darray_1d EXECUTABLE ${CMAKE_CURRENT_BINARY_DIR}/test_darray_1d NUMPROCS ${AT_LEAST_FOUR_TASKS} @@ -181,6 +205,10 @@ else () EXECUTABLE ${CMAKE_CURRENT_BINARY_DIR}/test_darray_3d NUMPROCS ${AT_LEAST_FOUR_TASKS} TIMEOUT ${DEFAULT_TEST_TIMEOUT}) + add_mpi_test(test_decomp_uneven + EXECUTABLE ${CMAKE_CURRENT_BINARY_DIR}/test_decomp_uneven + NUMPROCS ${AT_LEAST_FOUR_TASKS} + TIMEOUT ${DEFAULT_TEST_TIMEOUT}) add_mpi_test(test_decomps EXECUTABLE ${CMAKE_CURRENT_BINARY_DIR}/test_decomps NUMPROCS ${AT_LEAST_FOUR_TASKS} diff --git a/src/externals/pio2/tests/cunit/pio_tests.h b/src/externals/pio2/tests/cunit/pio_tests.h index 86ac35af5472..5a601ea6df17 100644 --- a/src/externals/pio2/tests/cunit/pio_tests.h +++ b/src/externals/pio2/tests/cunit/pio_tests.h @@ -29,6 +29,9 @@ /* Number of NetCDF-4 types. */ #define NUM_NETCDF4_TYPES 12 +/* Number of PIO rearrangers. */ +#define NUM_REARRANGERS 2 + /* Number of sample files constructed for these tests. */ #define NUM_SAMPLES 3 diff --git a/src/externals/pio2/tests/cunit/test_async_2comp.c b/src/externals/pio2/tests/cunit/test_async_2comp.c deleted file mode 100644 index 3c79cb6101e6..000000000000 --- a/src/externals/pio2/tests/cunit/test_async_2comp.c +++ /dev/null @@ -1,132 +0,0 @@ -/** - * @file Tests for PIOc_Intercomm. This tests basic asynch I/O capability. - * @author Ed Hartnett - * - * To run with valgrind, use this command: - *
mpiexec -n 4 valgrind -v --leak-check=full --suppressions=../../../tests/unit/valsupp_test.supp
- * --error-exitcode=99 --track-origins=yes ./test_intercomm3
- * - */ -#include -#include - -/* Number of processors that will do IO. */ -#define NUM_IO_PROCS 2 - -/* Number of computational components to create. */ -#define COMPONENT_COUNT 2 - -/* The number of tasks this test should run on. */ -#define TARGET_NTASKS 4 - -/* The name of this test. */ -#define TEST_NAME "test_intercomm3" - -/** Run Tests for Init_Intercomm - * - */ -int -main(int argc, char **argv) -{ - int verbose = 1; - - /* Zero-based rank of processor. */ - int my_rank; - - /* Number of processors involved in current execution. */ - int ntasks; - - /* Different output flavors. */ - int flavor[NUM_FLAVORS]; - - int num_flavors; - - /* The ID for the parallel I/O system. */ - int iosysid[COMPONENT_COUNT]; - - /* The ncid of the netCDF file. */ - int ncid; - - /* The ID of the netCDF varable. */ - int varid; - - /* Return code. */ - int ret; - - /* Index for loops. */ - int fmt, d, d1, i; - - /* Initialize test. */ - if ((ret = pio_test_init(argc, argv, &my_rank, &ntasks, TARGET_NTASKS))) - ERR(ERR_INIT); - - /* Figure out iotypes. */ - if ((ret = get_iotypes(&num_flavors, flavor))) - ERR(ret); - - /* How many processors will be used for our IO and 2 computation components. */ - int num_procs[COMPONENT_COUNT + 1] = {2, 1, 1}; - - /* Is the current process a computation task? */ - int comp_task = my_rank < 2 ? 0 : 1; - - /* Index of computation task in iosysid array. Varies by rank and - * does not apply to IO component processes. */ - int my_comp_idx = comp_task ? my_rank - 2 : -1; - - /* Initialize the IO system. */ - if ((ret = PIOc_Init_Async(MPI_COMM_WORLD, NUM_IO_PROCS, NULL, COMPONENT_COUNT, - num_procs, NULL, iosysid))) - ERR(ERR_AWFUL); - - /* All the netCDF calls are only executed on the computation - * tasks. The IO tasks have not returned from PIOc_Init_Intercomm, - * and when the do, they should go straight to finalize. */ - if (comp_task) - { - for (int flv = 0; flv < num_flavors; flv++) - { - char filename[NC_MAX_NAME + 1]; - - /* Create a filename. */ - int sample = 1; - sprintf(filename, "%s_%s_%d_%d.nc", TEST_NAME, flavor_name(flv), sample, my_comp_idx); - - /* Create sample file 1. */ - printf("%d %s creating file %s\n", my_rank, TEST_NAME, filename); - if ((ret = create_nc_sample_1(iosysid[my_comp_idx], flavor[flv], filename, my_rank, NULL))) - ERR(ret); - - /* Check the file for correctness. */ - if ((ret = check_nc_sample_1(iosysid[my_comp_idx], flavor[flv], filename, my_rank, NULL))) - ERR(ret); - - } /* next netcdf format flavor */ - - /* If I don't sleep here for a second, there are problems. */ - sleep(2); - - /* Finalize the IO system. Only call this from the computation tasks. */ - if (verbose) - printf("%d test_intercomm3 Freeing PIO resources\n", my_rank); - for (int c = 0; c < COMPONENT_COUNT; c++) - { - if ((ret = PIOc_finalize(iosysid[c]))) - ERR(ret); - printf("%d test_intercomm3 PIOc_finalize completed for iosysid = %d\n", my_rank, iosysid[c]); - } - } /* endif comp_task */ - - /* Wait for everyone to catch up. */ - printf("%d %s waiting for all processes!\n", my_rank, TEST_NAME); - MPI_Barrier(MPI_COMM_WORLD); - - /* Finalize the MPI library. */ - printf("%d %s Finalizing...\n", my_rank, TEST_NAME); - if ((ret = pio_test_finalize(&test_comm))) - return ERR_AWFUL; - - printf("%d %s SUCCESS!!\n", my_rank, TEST_NAME); - - return 0; -} diff --git a/src/externals/pio2/tests/cunit/test_async_3proc.c b/src/externals/pio2/tests/cunit/test_async_3proc.c index 020ed78172c0..4a2f8435fe2f 100644 --- a/src/externals/pio2/tests/cunit/test_async_3proc.c +++ b/src/externals/pio2/tests/cunit/test_async_3proc.c @@ -39,7 +39,7 @@ int main(int argc, char **argv) MPI_Comm test_comm; /* comm for test */ /* Num procs for IO and computation. */ - int num_procs[NUM_COMBOS][COMPONENT_COUNT + 1] = {{2, 1}, {1, 2}}; + int num_procs[NUM_COMBOS][COMPONENT_COUNT] = {{1}, {2}}; /* Number of processors that will do IO. */ int num_io_procs[NUM_COMBOS] = {2, 1}; @@ -63,8 +63,8 @@ int main(int argc, char **argv) int comp_task = my_rank < num_io_procs[combo] ? 0 : 1; /* Initialize the IO system. */ - if ((ret = PIOc_Init_Async(test_comm, num_io_procs[combo], NULL, COMPONENT_COUNT, - num_procs[combo], NULL, NULL, NULL, iosysid))) + if ((ret = PIOc_init_async(test_comm, num_io_procs[combo], NULL, COMPONENT_COUNT, + num_procs[combo], NULL, NULL, NULL, PIO_REARR_BOX, iosysid))) ERR(ERR_INIT); for (int c = 0; c < COMPONENT_COUNT; c++) diff --git a/src/externals/pio2/tests/cunit/test_async_4proc.c b/src/externals/pio2/tests/cunit/test_async_4proc.c index 4f91b0c0eecc..db0197ab9be9 100644 --- a/src/externals/pio2/tests/cunit/test_async_4proc.c +++ b/src/externals/pio2/tests/cunit/test_async_4proc.c @@ -32,15 +32,14 @@ int main(int argc, char **argv) int ret; /* Return code. */ MPI_Comm test_comm; - /* Num procs for IO and computation. */ - int num_procs[NUM_COMBOS][COMPONENT_COUNT + 1] = {{3, 1}, {2, 2}, {1, 3}}; + /* Num procs for computation. */ + int num_procs2[NUM_COMBOS][COMPONENT_COUNT] = {{1}, {2}, {3}}; /* Number of processors that will do IO. */ int num_io_procs[NUM_COMBOS] = {3, 2, 1}; /* Initialize test. */ - if ((ret = pio_test_init(argc, argv, &my_rank, &ntasks, TARGET_NTASKS, - &test_comm))) + if ((ret = pio_test_init(argc, argv, &my_rank, &ntasks, TARGET_NTASKS, &test_comm))) ERR(ERR_INIT); /* Test code runs on TARGET_NTASKS tasks. The left over tasks do @@ -57,8 +56,8 @@ int main(int argc, char **argv) int comp_task = my_rank < num_io_procs[combo] ? 0 : 1; /* Initialize the IO system. */ - if ((ret = PIOc_Init_Async(test_comm, num_io_procs[combo], NULL, COMPONENT_COUNT, - num_procs[combo], NULL, NULL, NULL, iosysid))) + if ((ret = PIOc_init_async(test_comm, num_io_procs[combo], NULL, COMPONENT_COUNT, + num_procs2[combo], NULL, NULL, NULL, PIO_REARR_BOX, iosysid))) ERR(ERR_INIT); for (int c = 0; c < COMPONENT_COUNT; c++) diff --git a/src/externals/pio2/tests/cunit/test_async_simple.c b/src/externals/pio2/tests/cunit/test_async_simple.c index a0852b3e712d..4b4836bcb022 100644 --- a/src/externals/pio2/tests/cunit/test_async_simple.c +++ b/src/externals/pio2/tests/cunit/test_async_simple.c @@ -29,13 +29,18 @@ /* Run simple async test. */ int main(int argc, char **argv) { +#define NUM_IO_PROCS 1 +#define NUM_COMP_PROCS 1 int my_rank; /* Zero-based rank of processor. */ int ntasks; /* Number of processors involved in current execution. */ int iosysid[COMPONENT_COUNT]; /* The ID for the parallel I/O system. */ int num_flavors; /* Number of PIO netCDF flavors in this build. */ int flavor[NUM_FLAVORS]; /* iotypes for the supported netCDF IO flavors. */ int ret; /* Return code. */ - int num_procs[COMPONENT_COUNT + 1] = {1, 1}; /* Num procs for IO and computation. */ + int num_procs[COMPONENT_COUNT] = {1}; /* Num procs for IO and computation. */ + int io_proc_list[NUM_IO_PROCS] = {0}; + int comp_proc_list[NUM_COMP_PROCS] = {1}; + int *proc_list[COMPONENT_COUNT] = {comp_proc_list}; MPI_Comm test_comm; /* Initialize test. */ @@ -54,19 +59,22 @@ int main(int argc, char **argv) int comp_task = my_rank < NUM_IO_PROCS ? 0 : 1; /* Check for invalid values. */ - if (PIOc_Init_Async(test_comm, NUM_IO_PROCS, NULL, COMPONENT_COUNT, - num_procs, NULL, NULL, NULL, NULL) != PIO_EINVAL) + if (PIOc_init_async(test_comm, NUM_IO_PROCS, NULL, COMPONENT_COUNT, + num_procs, NULL, NULL, NULL, PIO_REARR_BOX, NULL) != PIO_EINVAL) + ERR(ERR_WRONG); + if (PIOc_init_async(test_comm, NUM_IO_PROCS, NULL, COMPONENT_COUNT, + num_procs, NULL, NULL, NULL, TEST_VAL_42, iosysid) != PIO_EINVAL) ERR(ERR_WRONG); - if (PIOc_Init_Async(test_comm, NUM_IO_PROCS, NULL, -1, - num_procs, NULL, NULL, NULL, iosysid) != PIO_EINVAL) + if (PIOc_init_async(test_comm, NUM_IO_PROCS, NULL, -1, + num_procs, NULL, NULL, NULL, PIO_REARR_BOX, iosysid) != PIO_EINVAL) ERR(ERR_WRONG); - if (PIOc_Init_Async(test_comm, NUM_IO_PROCS, NULL, COMPONENT_COUNT, - NULL, NULL, NULL, NULL, iosysid) != PIO_EINVAL) + if (PIOc_init_async(test_comm, NUM_IO_PROCS, NULL, COMPONENT_COUNT, + NULL, NULL, NULL, NULL, PIO_REARR_BOX, iosysid) != PIO_EINVAL) ERR(ERR_WRONG); /* Initialize the IO system. */ - if ((ret = PIOc_Init_Async(test_comm, NUM_IO_PROCS, NULL, COMPONENT_COUNT, - num_procs, NULL, NULL, NULL, iosysid))) + if ((ret = PIOc_init_async(test_comm, NUM_IO_PROCS, io_proc_list, COMPONENT_COUNT, + num_procs, (int **)proc_list, NULL, NULL, PIO_REARR_BOX, iosysid))) ERR(ERR_INIT); /* All the netCDF calls are only executed on the computation diff --git a/src/externals/pio2/tests/cunit/test_darray_1d.c b/src/externals/pio2/tests/cunit/test_darray_1d.c index 01b894a87497..c5c6ac850125 100644 --- a/src/externals/pio2/tests/cunit/test_darray_1d.c +++ b/src/externals/pio2/tests/cunit/test_darray_1d.c @@ -107,18 +107,52 @@ int test_darray_fill(int iosysid, int ioid, int pio_type, int num_flavors, int * void *test_data_in; void *expected_in; PIO_Offset type_size; /* Size of the data type. */ - float my_float_rank = my_rank; /* my_rank in a float. */ - double my_double_rank = my_rank; /* my_rank in a double. */ + /* My rank as each type. */ + signed char my_byte_rank = my_rank; + char my_char_rank = my_rank; + short my_short_rank = my_rank; + float my_float_rank = my_rank; + double my_double_rank = my_rank; +#ifdef _NETCDF4 + unsigned char my_ubyte_rank = my_rank; + unsigned short my_ushort_rank = my_rank; + unsigned int my_uint_rank = my_rank; + long long my_int64_rank = my_rank; + unsigned long long my_uint64_rank = my_rank; +#endif /* _NETCDF4 */ + + /* Default fill value for each type. */ + signed char byte_fill = NC_FILL_BYTE; + char char_fill = NC_FILL_CHAR; + short short_fill = NC_FILL_SHORT; int int_fill = NC_FILL_INT; float float_fill = NC_FILL_FLOAT; double double_fill = NC_FILL_DOUBLE; +#ifdef _NETCDF4 + unsigned char ubyte_fill = NC_FILL_UBYTE; + unsigned short ushort_fill = NC_FILL_USHORT; + unsigned int uint_fill = NC_FILL_UINT; + long long int64_fill = NC_FILL_INT64; + unsigned long long uint64_fill = NC_FILL_UINT64; +#endif /* _NETCDF4 */ + void *bufr; - int ret; /* Return code. */ + int ret; /* Return code. */ /* Use PIO to create the example file in each of the four * available ways. */ for (int fmt = 0; fmt < num_flavors; fmt++) { + /* BYTE and CHAR don't work with pnetcdf. Don't know why yet. */ + if (flavor[fmt] == PIO_IOTYPE_PNETCDF && (pio_type == PIO_BYTE || pio_type == PIO_CHAR)) + continue; + + /* NetCDF-4 types only work with netCDF-4 formats. */ + printf("pio_type = %d flavor[fmt] = %d\n", pio_type, flavor[fmt]); + if (pio_type > PIO_DOUBLE && flavor[fmt] != PIO_IOTYPE_NETCDF4C && + flavor[fmt] != PIO_IOTYPE_NETCDF4P) + continue; + for (int with_fillvalue = 0; with_fillvalue < NUM_FILLVALUE_PRESENT_TESTS; with_fillvalue++) { /* Create the filename. */ @@ -152,11 +186,37 @@ int test_darray_fill(int iosysid, int ioid, int pio_type, int num_flavors, int * return ret; /* Initialize some data. */ + signed char byte_test_data[2] = {my_rank, my_rank}; + char char_test_data[2] = {my_rank, my_rank}; + short short_test_data[2] = {my_rank, my_rank}; int int_test_data[2] = {my_rank, my_rank}; float float_test_data[2] = {my_rank, my_rank}; double double_test_data[2] = {my_rank, my_rank}; +#ifdef _NETCDF4 + unsigned char ubyte_test_data[2] = {my_rank, my_rank}; + unsigned short ushort_test_data[2] = {my_rank, my_rank}; + unsigned int uint_test_data[2] = {my_rank, my_rank}; + long long int64_test_data[2] = {my_rank, my_rank}; + unsigned long long uint64_test_data[2] = {my_rank, my_rank}; +#endif /* _NETCDF4 */ + switch (pio_type) { + case PIO_BYTE: + test_data = byte_test_data; + fillvalue = with_fillvalue ? &byte_fill : NULL; + expected_in = &my_byte_rank; + break; + case PIO_CHAR: + test_data = char_test_data; + fillvalue = with_fillvalue ? &char_fill : NULL; + expected_in = &my_char_rank; + break; + case PIO_SHORT: + test_data = short_test_data; + fillvalue = with_fillvalue ? &short_fill : NULL; + expected_in = &my_short_rank; + break; case PIO_INT: test_data = int_test_data; fillvalue = with_fillvalue ? &int_fill : NULL; @@ -172,16 +232,42 @@ int test_darray_fill(int iosysid, int ioid, int pio_type, int num_flavors, int * fillvalue = with_fillvalue ? &double_fill : NULL; expected_in = &my_double_rank; break; +#ifdef _NETCDF4 + case PIO_UBYTE: + test_data = ubyte_test_data; + fillvalue = with_fillvalue ? &ubyte_fill : NULL; + expected_in = &my_ubyte_rank; + break; + case PIO_USHORT: + test_data = ushort_test_data; + fillvalue = with_fillvalue ? &ushort_fill : NULL; + expected_in = &my_ushort_rank; + break; + case PIO_UINT: + test_data = uint_test_data; + fillvalue = with_fillvalue ? &uint_fill : NULL; + expected_in = &my_uint_rank; + break; + case PIO_INT64: + test_data = int64_test_data; + fillvalue = with_fillvalue ? &int64_fill : NULL; + expected_in = &my_int64_rank; + break; + case PIO_UINT64: + test_data = uint64_test_data; + fillvalue = with_fillvalue ? &uint64_fill : NULL; + expected_in = &my_uint64_rank; + break; +#endif /* _NETCDF4 */ default: return ERR_WRONG; } - /* Write the data. Our test_data contains only one real value - * (instead of 2, as indicated by arraylen), but due to the - * decomposition, only the first value is used in the - * output. */ - if ((ret = PIOc_write_darray(ncid, varid, ioid, arraylen, test_data, - fillvalue))) + /* Write the data. Our test_data contains only one real + * value (instead of 2, as indicated by arraylen), but due + * to the decomposition, only the first value is used in + * the output. */ + if ((ret = PIOc_write_darray(ncid, varid, ioid, arraylen, test_data, fillvalue))) ERR(ret); /* Close the netCDF file. */ @@ -221,6 +307,18 @@ int test_darray_fill(int iosysid, int ioid, int pio_type, int num_flavors, int * { switch (pio_type) { + case PIO_BYTE: + if (((signed char *)bufr)[e] != (e < 4 ? e : NC_FILL_BYTE)) + return ERR_WRONG; + break; + case PIO_CHAR: + if (((char *)bufr)[e] != (e < 4 ? e : NC_FILL_CHAR)) + return ERR_WRONG; + break; + case PIO_SHORT: + if (((short *)bufr)[e] != (e < 4 ? e : NC_FILL_SHORT)) + return ERR_WRONG; + break; case PIO_INT: if (((int *)bufr)[e] != (e < 4 ? e : NC_FILL_INT)) return ERR_WRONG; @@ -233,6 +331,28 @@ int test_darray_fill(int iosysid, int ioid, int pio_type, int num_flavors, int * if (((double *)bufr)[e] != (e < 4 ? e : NC_FILL_DOUBLE)) return ERR_WRONG; break; +#ifdef _NETCDF4 + case PIO_UBYTE: + if (((unsigned char *)bufr)[e] != (e < 4 ? e : NC_FILL_UBYTE)) + return ERR_WRONG; + break; + case PIO_USHORT: + if (((unsigned short *)bufr)[e] != (e < 4 ? e : NC_FILL_USHORT)) + return ERR_WRONG; + break; + case PIO_UINT: + if (((unsigned int *)bufr)[e] != (e < 4 ? e : NC_FILL_UINT)) + return ERR_WRONG; + break; + case PIO_INT64: + if (((long long *)bufr)[e] != (e < 4 ? e : NC_FILL_INT64)) + return ERR_WRONG; + break; + case PIO_UINT64: + if (((unsigned long long *)bufr)[e] != (e < 4 ? e : NC_FILL_UINT64)) + return ERR_WRONG; + break; +#endif /* _NETCDF4 */ default: return ERR_WRONG; } @@ -277,11 +397,35 @@ int test_darray_fill_unlim(int iosysid, int ioid, int pio_type, int num_flavors, void *test_data_in; void *expected_in; PIO_Offset type_size; /* Size of the data type. */ - float my_float_rank = my_rank; /* my_rank in a float. */ - double my_double_rank = my_rank; /* my_rank in a double. */ + + /* My rank as each type. */ + signed char my_byte_rank = my_rank; + char my_char_rank = my_rank; + short my_short_rank = my_rank; + float my_float_rank = my_rank; + double my_double_rank = my_rank; +#ifdef _NETCDF4 + unsigned char my_ubyte_rank = my_rank; + unsigned short my_ushort_rank = my_rank; + unsigned int my_uint_rank = my_rank; + long long my_int64_rank = my_rank; + unsigned long long my_uint64_rank = my_rank; +#endif /* _NETCDF4 */ + + /* Default fill value for each type. */ + signed char byte_fill = NC_FILL_BYTE; + char char_fill = NC_FILL_CHAR; + short short_fill = NC_FILL_SHORT; int int_fill = NC_FILL_INT; float float_fill = NC_FILL_FLOAT; double double_fill = NC_FILL_DOUBLE; +#ifdef _NETCDF4 + unsigned char ubyte_fill = NC_FILL_UBYTE; + unsigned short ushort_fill = NC_FILL_USHORT; + unsigned int uint_fill = NC_FILL_UINT; + long long int64_fill = NC_FILL_INT64; + unsigned long long uint64_fill = NC_FILL_UINT64; +#endif /* _NETCDF4 */ void *bufr; int ret; /* Return code. */ @@ -289,6 +433,16 @@ int test_darray_fill_unlim(int iosysid, int ioid, int pio_type, int num_flavors, * available ways. */ for (int fmt = 0; fmt < num_flavors; fmt++) { + /* BYTE and CHAR don't work with pnetcdf. Don't know why yet. */ + if (flavor[fmt] == PIO_IOTYPE_PNETCDF && (pio_type == PIO_BYTE || pio_type == PIO_CHAR)) + continue; + + /* NetCDF-4 types only work with netCDF-4 formats. */ + printf("pio_type = %d flavor[fmt] = %d\n", pio_type, flavor[fmt]); + if (pio_type > PIO_DOUBLE && flavor[fmt] != PIO_IOTYPE_NETCDF4C && + flavor[fmt] != PIO_IOTYPE_NETCDF4P) + continue; + /* Create the filename. */ sprintf(filename, "data_%s_iotype_%d_pio_type_%d_unlim.nc", TEST_NAME, flavor[fmt], pio_type); @@ -322,11 +476,36 @@ int test_darray_fill_unlim(int iosysid, int ioid, int pio_type, int num_flavors, return ret; /* Initialize some data. */ + signed char byte_test_data[2] = {my_rank, my_rank}; + char char_test_data[2] = {my_rank, my_rank}; + short short_test_data[2] = {my_rank, my_rank}; int int_test_data[2] = {my_rank, my_rank}; float float_test_data[2] = {my_rank, my_rank}; double double_test_data[2] = {my_rank, my_rank}; +#ifdef _NETCDF4 + unsigned char ubyte_test_data[2] = {my_rank, my_rank}; + unsigned short ushort_test_data[2] = {my_rank, my_rank}; + unsigned int uint_test_data[2] = {my_rank, my_rank}; + long long int64_test_data[2] = {my_rank, my_rank}; + unsigned long long uint64_test_data[2] = {my_rank, my_rank}; +#endif /* _NETCDF4 */ switch (pio_type) { + case PIO_BYTE: + test_data = byte_test_data; + fillvalue = &byte_fill; + expected_in = &my_byte_rank; + break; + case PIO_CHAR: + test_data = char_test_data; + fillvalue = &char_fill; + expected_in = &my_char_rank; + break; + case PIO_SHORT: + test_data = short_test_data; + fillvalue = &short_fill; + expected_in = &my_short_rank; + break; case PIO_INT: test_data = int_test_data; fillvalue = &int_fill; @@ -342,6 +521,33 @@ int test_darray_fill_unlim(int iosysid, int ioid, int pio_type, int num_flavors, fillvalue = &double_fill; expected_in = &my_double_rank; break; +#ifdef _NETCDF4 + case PIO_UBYTE: + test_data = ubyte_test_data; + fillvalue = &ubyte_fill; + expected_in = &my_ubyte_rank; + break; + case PIO_USHORT: + test_data = ushort_test_data; + fillvalue = &ushort_fill; + expected_in = &my_ushort_rank; + break; + case PIO_UINT: + test_data = uint_test_data; + fillvalue = &uint_fill; + expected_in = &my_uint_rank; + break; + case PIO_INT64: + test_data = int64_test_data; + fillvalue = &int64_fill; + expected_in = &my_int64_rank; + break; + case PIO_UINT64: + test_data = uint64_test_data; + fillvalue = &uint64_fill; + expected_in = &my_uint64_rank; + break; +#endif /* _NETCDF4 */ default: return ERR_WRONG; } @@ -406,6 +612,18 @@ int test_darray_fill_unlim(int iosysid, int ioid, int pio_type, int num_flavors, { switch (pio_type) { + case PIO_BYTE: + if (((signed char *)bufr)[e] != (e % 8 < 4 ? e % 8 : NC_FILL_BYTE)) + return ERR_WRONG; + break; + case PIO_CHAR: + if (((char *)bufr)[e] != (e % 8 < 4 ? e % 8 : NC_FILL_CHAR)) + return ERR_WRONG; + break; + case PIO_SHORT: + if (((short *)bufr)[e] != (e % 8 < 4 ? e % 8 : NC_FILL_SHORT)) + return ERR_WRONG; + break; case PIO_INT: if (((int *)bufr)[e] != (e % 8 < 4 ? e % 8 : NC_FILL_INT)) return ERR_WRONG; @@ -418,6 +636,28 @@ int test_darray_fill_unlim(int iosysid, int ioid, int pio_type, int num_flavors, if (((double *)bufr)[e] != (e % 8 < 4 ? e % 8 : NC_FILL_DOUBLE)) return ERR_WRONG; break; +#ifdef _NETCDF4 + case PIO_UBYTE: + if (((unsigned char *)bufr)[e] != (e % 8 < 4 ? e % 8 : NC_FILL_UBYTE)) + return ERR_WRONG; + break; + case PIO_USHORT: + if (((unsigned short *)bufr)[e] != (e % 8 < 4 ? e % 8 : NC_FILL_USHORT)) + return ERR_WRONG; + break; + case PIO_UINT: + if (((unsigned int *)bufr)[e] != (e % 8 < 4 ? e % 8 : NC_FILL_UINT)) + return ERR_WRONG; + break; + case PIO_INT64: + if (((long long *)bufr)[e] != (e % 8 < 4 ? e % 8 : NC_FILL_INT64)) + return ERR_WRONG; + break; + case PIO_UINT64: + if (((unsigned long long *)bufr)[e] != (e % 8 < 4 ? e % 8 : NC_FILL_UINT64)) + return ERR_WRONG; + break; +#endif /* _NETCDF4 */ default: return ERR_WRONG; } @@ -466,8 +706,7 @@ int test_decomp_read_write(int iosysid, int ioid, int num_flavors, int *flavor, sprintf(filename, "decomp_%s_iotype_%d.nc", TEST_NAME, flavor[fmt]); printf("writing decomp file %s\n", filename); - if ((ret = PIOc_write_nc_decomp(iosysid, filename, 0, ioid, test_comm, NULL, - NULL, 0))) + if ((ret = PIOc_write_nc_decomp(iosysid, filename, 0, ioid, NULL, NULL, 0))) return ret; /* Read the data. */ @@ -480,10 +719,19 @@ int test_decomp_read_write(int iosysid, int ioid, int num_flavors, int *flavor, { iosystem_desc_t *ios; io_desc_t *iodesc; - int expected_basetype; + MPI_Datatype expected_basetype; switch (pio_type) { + case PIO_BYTE: + expected_basetype = MPI_BYTE; + break; + case PIO_CHAR: + expected_basetype = MPI_CHAR; + break; + case PIO_SHORT: + expected_basetype = MPI_SHORT; + break; case PIO_INT: expected_basetype = MPI_INT; break; @@ -493,6 +741,23 @@ int test_decomp_read_write(int iosysid, int ioid, int num_flavors, int *flavor, case PIO_DOUBLE: expected_basetype = MPI_DOUBLE; break; +#ifdef _NETCDF4 + case PIO_UBYTE: + expected_basetype = MPI_UNSIGNED_CHAR; + break; + case PIO_USHORT: + expected_basetype = MPI_UNSIGNED_SHORT; + break; + case PIO_UINT: + expected_basetype = MPI_UNSIGNED; + break; + case PIO_INT64: + expected_basetype = MPI_LONG_LONG; + break; + case PIO_UINT64: + expected_basetype = MPI_UNSIGNED_LONG_LONG; + break; +#endif /* _NETCDF4 */ default: return ERR_WRONG; } @@ -536,8 +801,14 @@ int main(int argc, char **argv) { #define NUM_REARRANGERS_TO_TEST 2 int rearranger[NUM_REARRANGERS_TO_TEST] = {PIO_REARR_BOX, PIO_REARR_SUBSET}; -#define NUM_TYPES_TO_TEST 3 - int test_type[NUM_TYPES_TO_TEST] = {PIO_INT, PIO_FLOAT, PIO_DOUBLE}; +#ifdef _NETCDF4 +#define NUM_TYPES_TO_TEST 11 + int test_type[NUM_TYPES_TO_TEST] = {PIO_BYTE, PIO_CHAR, PIO_SHORT, PIO_INT, PIO_FLOAT, PIO_DOUBLE, + PIO_UBYTE, PIO_USHORT, PIO_UINT, PIO_INT64, PIO_UINT64}; +#else +#define NUM_TYPES_TO_TEST 6 + int test_type[NUM_TYPES_TO_TEST] = {PIO_BYTE, PIO_CHAR, PIO_SHORT, PIO_INT, PIO_FLOAT, PIO_DOUBLE}; +#endif /* _NETCDF4 */ int my_rank; int ntasks; int num_flavors; /* Number of PIO netCDF flavors in this build. */ diff --git a/src/externals/pio2/tests/cunit/test_darray_3d.c b/src/externals/pio2/tests/cunit/test_darray_3d.c index 07f2cd4e303c..e261c0cec433 100644 --- a/src/externals/pio2/tests/cunit/test_darray_3d.c +++ b/src/externals/pio2/tests/cunit/test_darray_3d.c @@ -92,7 +92,7 @@ int create_decomposition_3d(int ntasks, int my_rank, int iosysid, int *ioid) /* Create the PIO decomposition for this test. */ printf("%d Creating decomposition elements_per_pe = %lld\n", my_rank, elements_per_pe); if ((ret = PIOc_init_decomp(iosysid, PIO_INT, NDIM3, dim_len_3d, elements_per_pe, - compdof, ioid, NULL, NULL, NULL))) + compdof, ioid, 0, NULL, NULL))) ERR(ret); printf("%d decomposition initialized.\n", my_rank); @@ -259,8 +259,7 @@ int test_decomp_read_write(int iosysid, int ioid, int num_flavors, int *flavor, sprintf(filename, "decomp_%s_iotype_%d.nc", TEST_NAME, flavor[fmt]); printf("writing decomp file %s\n", filename); - if ((ret = PIOc_write_nc_decomp(iosysid, filename, 0, ioid, test_comm, NULL, - NULL, 0))) + if ((ret = PIOc_write_nc_decomp(iosysid, filename, 0, ioid, NULL, NULL, 0))) return ret; /* Read the data. */ diff --git a/src/externals/pio2/tests/cunit/test_darray_multi.c b/src/externals/pio2/tests/cunit/test_darray_multi.c new file mode 100644 index 000000000000..206b783e121b --- /dev/null +++ b/src/externals/pio2/tests/cunit/test_darray_multi.c @@ -0,0 +1,473 @@ +/* + * Tests for PIO distributed arrays. This program tests the + * PIOc_write_darray_multi() function with more than one variable. + * + * Ed Hartnett, 3/7/17 + */ +#include +#include +#include + +/* The number of tasks this test should run on. */ +#define TARGET_NTASKS 4 + +/* The minimum number of tasks this test should run on. */ +#define MIN_NTASKS 4 + +/* The name of this test. */ +#define TEST_NAME "test_darray_multi" + +/* Number of processors that will do IO. */ +#define NUM_IO_PROCS 1 + +/* Number of computational components to create. */ +#define COMPONENT_COUNT 1 + +/* The number of dimensions in the example data. In this test, we + * are using three-dimensional data. */ +#define NDIM 3 + +/* But sometimes we need arrays of the non-record dimensions. */ +#define NDIM2 2 + +/* The length of our sample data along each dimension. */ +#define X_DIM_LEN 4 +#define Y_DIM_LEN 4 + +/* The number of timesteps of data to write. */ +#define NUM_TIMESTEPS 2 + +/* Number of variables. */ +#define NVAR 3 + +/* For attributes. */ +#define NOTE_NAME "note" +#define NOTE "This is a test file for the PIO library, and may be deleted." + +/* Who would have thought? */ +#define TOTAL_NUMBER_OF_STOOGES_NAME "Total_Number_of_Stooges" +#define TOTAL_NUMBER_OF_STOOGES 6 + +/* The dimension names. */ +char dim_name[NDIM][PIO_MAX_NAME + 1] = {"year", "Stooge_popularity", "face_smacks"}; + +/* The variable names. */ +char var_name[NVAR][PIO_MAX_NAME + 1] = {"Larry", "Curly", "Moe"}; + +/* Length of the dimensions in the sample data. */ +int dim_len[NDIM] = {NC_UNLIMITED, X_DIM_LEN, Y_DIM_LEN}; + +/** + * Test the darray functionality. Create a netCDF file with 3 + * dimensions and 3 variable, and use PIOc_write_darray_multi() to + * write one record of data to all three vars at once. + * + * @param iosysid the IO system ID. + * @param ioid the ID of the decomposition. + * @param num_flavors the number of IOTYPES available in this build. + * @param flavor array of available iotypes. + * @param my_rank rank of this task. + * @param pio_type the type of the data. + * @returns 0 for success, error code otherwise. +*/ +int test_darray(int iosysid, int ioid, int num_flavors, int *flavor, int my_rank, + int pio_type) +{ +#define NUM_TEST_CASES_WRT_MULTI 2 +#define NUM_TEST_CASES_FILLVALUE 2 + + char filename[PIO_MAX_NAME + 1]; /* Name for the output files. */ + int dimids[NDIM]; /* The dimension IDs. */ + int ncid; /* The ncid of the netCDF file. */ + int ncid2; /* The ncid of the re-opened netCDF file. */ + int varid[NVAR]; /* The IDs of the netCDF varables. */ + int ret; /* Return code. */ + PIO_Offset arraylen = 4; /* Amount of data from each task. */ + void *fillvalue; /* Pointer to fill value. */ + void *test_data; /* Pointer to test data we will write. */ + void *test_data_in; /* Pointer to buffer we will read into. */ + + /* Default fill value array for each type. */ + signed char byte_fill[NVAR] = {NC_FILL_BYTE, NC_FILL_BYTE, NC_FILL_BYTE}; + char char_fill[NVAR] = {NC_FILL_CHAR, NC_FILL_CHAR, NC_FILL_CHAR}; + short short_fill[NVAR] = {NC_FILL_SHORT, NC_FILL_SHORT, NC_FILL_SHORT}; + int int_fill[NVAR] = {NC_FILL_INT, NC_FILL_INT, NC_FILL_INT}; + float float_fill[NVAR] = {NC_FILL_FLOAT, NC_FILL_FLOAT, NC_FILL_FLOAT}; + double double_fill[NVAR] = {NC_FILL_DOUBLE, NC_FILL_DOUBLE, NC_FILL_DOUBLE}; +#ifdef _NETCDF4 + unsigned char ubyte_fill[NVAR] = {NC_FILL_UBYTE, NC_FILL_UBYTE, NC_FILL_UBYTE}; + unsigned short ushort_fill[NVAR] = {NC_FILL_USHORT, NC_FILL_USHORT, NC_FILL_USHORT}; + unsigned int uint_fill[NVAR] = {NC_FILL_UINT, NC_FILL_UINT, NC_FILL_UINT}; + long long int64_fill[NVAR] = {NC_FILL_INT64, NC_FILL_INT64, NC_FILL_INT64}; + unsigned long long uint64_fill[NVAR] = {NC_FILL_UINT64, NC_FILL_UINT64, NC_FILL_UINT64}; +#endif /* _NETCDF4 */ + + /* Test data we will write. */ + signed char test_data_byte[arraylen * NVAR]; + char test_data_char[arraylen * NVAR]; + short test_data_short[arraylen * NVAR]; + int test_data_int[arraylen * NVAR]; + float test_data_float[arraylen * NVAR]; + double test_data_double[arraylen * NVAR]; +#ifdef _NETCDF4 + unsigned char test_data_ubyte[arraylen * NVAR]; + unsigned short test_data_ushort[arraylen * NVAR]; + unsigned int test_data_uint[arraylen * NVAR]; + long long test_data_int64[arraylen * NVAR]; + unsigned long long test_data_uint64[arraylen * NVAR]; +#endif /* _NETCDF4 */ + + /* We will read test data into these buffers. */ + signed char test_data_byte_in[arraylen]; + char test_data_char_in[arraylen]; + short test_data_short_in[arraylen]; + int test_data_int_in[arraylen]; + float test_data_float_in[arraylen]; + double test_data_double_in[arraylen]; +#ifdef _NETCDF4 + unsigned char test_data_ubyte_in[arraylen]; + unsigned short test_data_ushort_in[arraylen]; + unsigned int test_data_uint_in[arraylen]; + long long test_data_int64_in[arraylen]; + unsigned long long test_data_uint64_in[arraylen]; +#endif /* _NETCDF4 */ + + /* Initialize a big blob of test data for NVAR vars. */ + for (int f = 0; f < arraylen * NVAR; f++) + { + test_data_byte[f] = my_rank * 1 + f; + test_data_char[f] = my_rank * 2 + f; + test_data_short[f] = my_rank * 5 + f; + test_data_int[f] = my_rank * 10 + f; + test_data_float[f] = my_rank * 10 + f + 0.5; + test_data_double[f] = my_rank * 100000 + f + 0.5; +#ifdef _NETCDF4 + test_data_ubyte[f] = my_rank * 3 + f; + test_data_ushort[f] = my_rank * 9 + f; + test_data_uint[f] = my_rank * 100 + f; + test_data_int64[f] = my_rank * 10000 + f; + test_data_uint64[f] = my_rank * 100000 + f; +#endif /* _NETCDF4 */ + } + + /* Use PIO to create the example file in each of the four + * available ways. */ + for (int fmt = 0; fmt < num_flavors; fmt++) + { + /* 1-byte types not working with pnetcdf. */ + if (flavor[fmt] == PIO_IOTYPE_PNETCDF && (pio_type == PIO_BYTE || pio_type == PIO_CHAR)) + continue; + + /* NetCDF-4 types only work with netCDF-4. */ + if (pio_type > PIO_DOUBLE && (flavor[fmt] != PIO_IOTYPE_NETCDF4C && + flavor[fmt] != PIO_IOTYPE_NETCDF4P)) + continue; + + /* Add a couple of extra tests for the + * PIOc_write_darray_multi() function. */ + for (int test_multi = 0; test_multi < NUM_TEST_CASES_WRT_MULTI; test_multi++) + { + /* Test with/without providing a fill value to PIOc_write_darray(). */ + for (int provide_fill = 0; provide_fill < NUM_TEST_CASES_FILLVALUE; provide_fill++) + { + /* Create the filename. */ + sprintf(filename, "data_%s_iotype_%d_pio_type_%d_test_multi_%d_provide_fill_%d.nc", TEST_NAME, + flavor[fmt], pio_type, test_multi, provide_fill); + + /* Select the fill value and data. */ + switch (pio_type) + { + case PIO_BYTE: + fillvalue = provide_fill ? byte_fill : NULL; + test_data = test_data_byte; + test_data_in = test_data_byte_in; + break; + case PIO_CHAR: + fillvalue = provide_fill ? char_fill : NULL; + test_data = test_data_char; + test_data_in = test_data_char_in; + break; + case PIO_SHORT: + fillvalue = provide_fill ? short_fill : NULL; + test_data = test_data_short; + test_data_in = test_data_short_in; + break; + case PIO_INT: + fillvalue = provide_fill ? int_fill : NULL; + test_data = test_data_int; + test_data_in = test_data_int_in; + break; + case PIO_FLOAT: + fillvalue = provide_fill ? float_fill : NULL; + test_data = test_data_float; + test_data_in = test_data_float_in; + break; + case PIO_DOUBLE: + fillvalue = provide_fill ? double_fill : NULL; + test_data = test_data_double; + test_data_in = test_data_double_in; + break; +#ifdef _NETCDF4 + case PIO_UBYTE: + fillvalue = provide_fill ? ubyte_fill : NULL; + test_data = test_data_ubyte; + test_data_in = test_data_ubyte_in; + break; + case PIO_USHORT: + fillvalue = provide_fill ? ushort_fill : NULL; + test_data = test_data_ushort; + test_data_in = test_data_ushort_in; + break; + case PIO_UINT: + fillvalue = provide_fill ? uint_fill : NULL; + test_data = test_data_uint; + test_data_in = test_data_uint_in; + break; + case PIO_INT64: + fillvalue = provide_fill ? int64_fill : NULL; + test_data = test_data_int64; + test_data_in = test_data_int64_in; + break; + case PIO_UINT64: + fillvalue = provide_fill ? uint64_fill : NULL; + test_data = test_data_uint64; + test_data_in = test_data_uint64_in; + break; +#endif /* _NETCDF4 */ + default: + ERR(ERR_WRONG); + } + + /* Create the netCDF output file. */ + printf("rank: %d Creating sample file %s with format %d type %d\n", my_rank, filename, + flavor[fmt], pio_type); + if ((ret = PIOc_createfile(iosysid, &ncid, &flavor[fmt], filename, PIO_CLOBBER))) + ERR(ret); + + /* Define netCDF dimensions and variable. */ + printf("%d Defining netCDF metadata...\n", my_rank); + for (int d = 0; d < NDIM; d++) + if ((ret = PIOc_def_dim(ncid, dim_name[d], (PIO_Offset)dim_len[d], &dimids[d]))) + ERR(ret); + + /* Define a variable. */ + for (int v = 0; v < NVAR; v++) + if ((ret = PIOc_def_var(ncid, var_name[v], pio_type, NDIM, dimids, &varid[v]))) + ERR(ret); + + /* Leave a note. */ + if ((ret = PIOc_put_att_text(ncid, NC_GLOBAL, NOTE_NAME, strlen(NOTE), NOTE))) + ERR(ret); + int num_stooges = TOTAL_NUMBER_OF_STOOGES; + if ((ret = PIOc_put_att_int(ncid, NC_GLOBAL, TOTAL_NUMBER_OF_STOOGES_NAME, PIO_INT, 1, &num_stooges))) + ERR(ret); + + /* End define mode. */ + if ((ret = PIOc_enddef(ncid))) + ERR(ret); + + /* Set the value of the record dimension. */ + if ((ret = PIOc_setframe(ncid, varid[0], 0))) + ERR(ret); + + int frame[NVAR] = {0, 0, 0}; + int flushtodisk = test_multi; + + /* Write the data with the _multi function. */ + if ((ret = PIOc_write_darray_multi(ncid, varid, ioid, NVAR, arraylen, test_data, frame, + fillvalue, flushtodisk))) + ERR(ret); + + /* Close the netCDF file. */ + if ((ret = PIOc_closefile(ncid))) + ERR(ret); + + /* Reopen the file. */ + if ((ret = PIOc_openfile(iosysid, &ncid2, &flavor[fmt], filename, PIO_NOWRITE))) + ERR(ret); + + /* Now use read_darray on each var in turn and make + * sure we get correct data. */ + for (int v = 0; v < NVAR; v++) + { + /* Read the data. */ + if ((ret = PIOc_read_darray(ncid2, varid[v], ioid, arraylen, test_data_in))) + ERR(ret); + + /* Check the results. */ + for (int f = 0; f < arraylen; f++) + { + switch (pio_type) + { + case PIO_BYTE: + if (test_data_byte_in[f] != test_data_byte[f + arraylen * v]) + return ERR_WRONG; + break; + case PIO_CHAR: + if (test_data_char_in[f] != test_data_char[f + arraylen * v]) + return ERR_WRONG; + break; + case PIO_SHORT: + if (test_data_short_in[f] != test_data_short[f + arraylen * v]) + return ERR_WRONG; + break; + case PIO_INT: + if (test_data_int_in[f] != test_data_int[f + arraylen * v]) + return ERR_WRONG; + break; + case PIO_FLOAT: + if (test_data_float_in[f] != test_data_float[f + arraylen * v]) + return ERR_WRONG; + break; + case PIO_DOUBLE: + if (test_data_double_in[f] != test_data_double[f + arraylen * v]) + return ERR_WRONG; + break; +#ifdef _NETCDF4 + case PIO_UBYTE: + if (test_data_ubyte_in[f] != test_data_ubyte[f + arraylen * v]) + return ERR_WRONG; + break; + case PIO_USHORT: + if (test_data_ushort_in[f] != test_data_ushort[f + arraylen * v]) + return ERR_WRONG; + break; + case PIO_UINT: + if (test_data_uint_in[f] != test_data_uint[f + arraylen * v]) + return ERR_WRONG; + break; + case PIO_INT64: + if (test_data_int64_in[f] != test_data_int64[f + arraylen * v]) + return ERR_WRONG; + break; + case PIO_UINT64: + if (test_data_uint64_in[f] != test_data_uint64[f + arraylen * v]) + return ERR_WRONG; + break; +#endif /* _NETCDF4 */ + default: + ERR(ERR_WRONG); + } + } + } + + /* Close the netCDF file. */ + printf("%d Closing the sample data file...\n", my_rank); + if ((ret = PIOc_closefile(ncid2))) + ERR(ret); + } /* next fillvalue test case */ + } /* next test multi */ + } /* next iotype */ + + return PIO_NOERR; +} + +/** + * Run all the tests. + * + * @param iosysid the IO system ID. + * @param num_flavors number of available iotypes in the build. + * @param flavor pointer to array of the available iotypes. + * @param my_rank rank of this task. + * @param test_comm the communicator the test is running on. + * @returns 0 for success, error code otherwise. + */ +int test_all_darray(int iosysid, int num_flavors, int *flavor, int my_rank, + MPI_Comm test_comm) +{ +#ifdef _NETCDF4 +#define NUM_TYPES_TO_TEST 11 + int pio_type[NUM_TYPES_TO_TEST] = {PIO_BYTE, PIO_CHAR, PIO_SHORT, PIO_INT, PIO_FLOAT, PIO_DOUBLE, + PIO_UBYTE, PIO_USHORT, PIO_UINT, PIO_INT64, PIO_UINT64}; +#else +#define NUM_TYPES_TO_TEST 6 + int pio_type[NUM_TYPES_TO_TEST] = {PIO_BYTE, PIO_CHAR, PIO_SHORT, PIO_INT, PIO_FLOAT, PIO_DOUBLE}; +#endif /* _NETCDF4 */ + int ioid; + char filename[NC_MAX_NAME + 1]; + int dim_len_2d[NDIM2] = {X_DIM_LEN, Y_DIM_LEN}; + int ret; /* Return code. */ + + for (int t = 0; t < NUM_TYPES_TO_TEST; t++) + { + /* This will be our file name for writing out decompositions. */ + sprintf(filename, "%s_decomp_rank_%d_flavor_%d_type_%d.nc", TEST_NAME, my_rank, + *flavor, pio_type[t]); + + /* Decompose the data over the tasks. */ + if ((ret = create_decomposition_2d(TARGET_NTASKS, my_rank, iosysid, dim_len_2d, + &ioid, pio_type[t]))) + return ret; + + /* Run a simple darray test. */ + if ((ret = test_darray(iosysid, ioid, num_flavors, flavor, my_rank, pio_type[t]))) + return ret; + + /* Free the PIO decomposition. */ + if ((ret = PIOc_freedecomp(iosysid, ioid))) + ERR(ret); + } + + return PIO_NOERR; +} + +/* Run tests for darray functions. */ +int main(int argc, char **argv) +{ +#define NUM_REARRANGERS_TO_TEST 2 + int rearranger[NUM_REARRANGERS_TO_TEST] = {PIO_REARR_BOX, PIO_REARR_SUBSET}; + int my_rank; + int ntasks; + int num_flavors; /* Number of PIO netCDF flavors in this build. */ + int flavor[NUM_FLAVORS]; /* iotypes for the supported netCDF IO flavors. */ + MPI_Comm test_comm; /* A communicator for this test. */ + int ret; /* Return code. */ + + /* Initialize test. */ + if ((ret = pio_test_init2(argc, argv, &my_rank, &ntasks, MIN_NTASKS, + MIN_NTASKS, 3, &test_comm))) + ERR(ERR_INIT); + + if ((ret = PIOc_set_iosystem_error_handling(PIO_DEFAULT, PIO_RETURN_ERROR, NULL))) + return ret; + + /* Only do something on max_ntasks tasks. */ + if (my_rank < TARGET_NTASKS) + { + int iosysid; /* The ID for the parallel I/O system. */ + int ioproc_stride = 1; /* Stride in the mpi rank between io tasks. */ + int ioproc_start = 0; /* Zero based rank of first processor to be used for I/O. */ + int ret; /* Return code. */ + + /* Figure out iotypes. */ + if ((ret = get_iotypes(&num_flavors, flavor))) + ERR(ret); + printf("Runnings tests for %d flavors\n", num_flavors); + + for (int r = 0; r < NUM_REARRANGERS_TO_TEST; r++) + { + /* Initialize the PIO IO system. This specifies how + * many and which processors are involved in I/O. */ + if ((ret = PIOc_Init_Intracomm(test_comm, TARGET_NTASKS, ioproc_stride, + ioproc_start, rearranger[r], &iosysid))) + return ret; + + /* Run tests. */ + printf("%d Running tests...\n", my_rank); + if ((ret = test_all_darray(iosysid, num_flavors, flavor, my_rank, test_comm))) + return ret; + + /* Finalize PIO system. */ + if ((ret = PIOc_finalize(iosysid))) + return ret; + } /* next rearranger */ + } /* endif my_rank < TARGET_NTASKS */ + + /* Finalize the MPI library. */ + printf("%d %s Finalizing...\n", my_rank, TEST_NAME); + if ((ret = pio_test_finalize(&test_comm))) + return ret; + + printf("%d %s SUCCESS!!\n", my_rank, TEST_NAME); + return 0; +} diff --git a/src/externals/pio2/tests/cunit/test_darray_multivar.c b/src/externals/pio2/tests/cunit/test_darray_multivar.c index c4b91c32ae24..773de9b5f600 100644 --- a/src/externals/pio2/tests/cunit/test_darray_multivar.c +++ b/src/externals/pio2/tests/cunit/test_darray_multivar.c @@ -71,10 +71,11 @@ int dim_len[NDIM] = {NC_UNLIMITED, X_DIM_LEN, Y_DIM_LEN}; * (ignored if use_fill is 0). * @returns 0 for success, error code otherwise. */ -int test_3_empty(int iosysid, int ioid, int num_flavors, int *flavor, int my_rank, - int pio_type, MPI_Comm test_comm, int rearranger, int use_fill, - int use_default) +int test_multivar_darray(int iosysid, int ioid, int num_flavors, int *flavor, + int my_rank, int pio_type, MPI_Comm test_comm, + int rearranger, int use_fill, int use_default) { +#define NUM_MULTIVAR_TEST_CASES 2 char filename[PIO_MAX_NAME + 1]; /* Name for the output files. */ int dimids[NDIM]; /* The dimension IDs. */ int ncid; /* The ncid of the netCDF file. */ @@ -84,6 +85,18 @@ int test_3_empty(int iosysid, int ioid, int num_flavors, int *flavor, int my_ran void *fillvalue; void *test_data; void *test_data_in; + signed char fillvalue_byte = NC_FILL_BYTE; + signed char custom_fillvalue_byte = -TEST_VAL_42; + signed char test_data_byte[arraylen]; + signed char test_data_byte_in[arraylen]; + char fillvalue_char = NC_FILL_CHAR; + char custom_fillvalue_char = -TEST_VAL_42; + char test_data_char[arraylen]; + char test_data_char_in[arraylen]; + short fillvalue_short = NC_FILL_SHORT; + short custom_fillvalue_short = -TEST_VAL_42; + short test_data_short[arraylen]; + short test_data_short_in[arraylen]; int fillvalue_int = NC_FILL_INT; int custom_fillvalue_int = -TEST_VAL_42; int test_data_int[arraylen]; @@ -96,19 +109,66 @@ int test_3_empty(int iosysid, int ioid, int num_flavors, int *flavor, int my_ran double custom_fillvalue_double = (-TEST_VAL_42 * 100); double test_data_double[arraylen]; double test_data_double_in[arraylen]; +#ifdef _NETCDF4 + unsigned char fillvalue_ubyte = NC_FILL_UBYTE; + unsigned char custom_fillvalue_ubyte = TEST_VAL_42; + unsigned char test_data_ubyte[arraylen]; + unsigned char test_data_ubyte_in[arraylen]; + unsigned short fillvalue_ushort = NC_FILL_USHORT; + unsigned short custom_fillvalue_ushort = (TEST_VAL_42 * 100); + unsigned short test_data_ushort[arraylen]; + unsigned short test_data_ushort_in[arraylen]; + unsigned int fillvalue_uint = NC_FILL_UINT; + unsigned int custom_fillvalue_uint = (TEST_VAL_42 * 100); + unsigned int test_data_uint[arraylen]; + unsigned int test_data_uint_in[arraylen]; + long long fillvalue_int64 = NC_FILL_INT64; + long long custom_fillvalue_int64 = (TEST_VAL_42 * 100); + long long test_data_int64[arraylen]; + long long test_data_int64_in[arraylen]; + unsigned long long fillvalue_uint64 = NC_FILL_UINT64; + unsigned long long custom_fillvalue_uint64 = (TEST_VAL_42 * 100); + unsigned long long test_data_uint64[arraylen]; + unsigned long long test_data_uint64_in[arraylen]; +#endif /* _NETCDF4 */ int ret; /* Return code. */ /* Initialize some data. */ for (int f = 0; f < arraylen; f++) { + test_data_byte[f] = my_rank * 10 + f; + test_data_char[f] = my_rank * 10 + f; + test_data_short[f] = my_rank * 10 + f; test_data_int[f] = my_rank * 10 + f; test_data_float[f] = my_rank * 10 + f + 0.5; test_data_double[f] = my_rank * 100000 + f + 0.5; +#ifdef _NETCDF4 + test_data_ubyte[f] = my_rank * 10 + f; + test_data_ushort[f] = my_rank * 10 + f; + test_data_uint[f] = my_rank * 10 + f; + test_data_int64[f] = my_rank * 10 + f; + test_data_uint64[f] = my_rank * 10 + f; +#endif /* _NETCDF4 */ } /* Select the fill value and data. */ switch (pio_type) { + case PIO_BYTE: + fillvalue = use_default ? &fillvalue_byte : &custom_fillvalue_byte; + test_data = test_data_byte; + test_data_in = test_data_byte_in; + break; + case PIO_CHAR: + fillvalue = use_default ? &fillvalue_char : &custom_fillvalue_char; + test_data = test_data_char; + test_data_in = test_data_char_in; + break; + case PIO_SHORT: + fillvalue = use_default ? &fillvalue_short : &custom_fillvalue_short; + test_data = test_data_short; + test_data_in = test_data_short_in; + break; case PIO_INT: fillvalue = use_default ? &fillvalue_int : &custom_fillvalue_int; test_data = test_data_int; @@ -124,6 +184,33 @@ int test_3_empty(int iosysid, int ioid, int num_flavors, int *flavor, int my_ran test_data = test_data_double; test_data_in = test_data_double_in; break; +#ifdef _NETCDF4 + case PIO_UBYTE: + fillvalue = use_default ? &fillvalue_ubyte : &custom_fillvalue_ubyte; + test_data = test_data_ubyte; + test_data_in = test_data_ubyte_in; + break; + case PIO_USHORT: + fillvalue = use_default ? &fillvalue_ushort : &custom_fillvalue_ushort; + test_data = test_data_ushort; + test_data_in = test_data_ushort_in; + break; + case PIO_UINT: + fillvalue = use_default ? &fillvalue_uint : &custom_fillvalue_uint; + test_data = test_data_uint; + test_data_in = test_data_uint_in; + break; + case PIO_INT64: + fillvalue = use_default ? &fillvalue_int64 : &custom_fillvalue_int64; + test_data = test_data_int64; + test_data_in = test_data_int64_in; + break; + case PIO_UINT64: + fillvalue = use_default ? &fillvalue_uint64 : &custom_fillvalue_uint64; + test_data = test_data_uint64; + test_data_in = test_data_uint64_in; + break; +#endif /* _NETCDF4 */ default: ERR(ERR_WRONG); } @@ -168,122 +255,226 @@ int test_3_empty(int iosysid, int ioid, int num_flavors, int *flavor, int my_ran #endif /* _PNETCDF */ } - /* Use PIO to create the example file in each of the four - * available ways. */ - for (int fmt = 0; fmt < num_flavors; fmt++) + for (int tc = 0; tc < NUM_MULTIVAR_TEST_CASES; tc++) { - /* Create the filename. */ - sprintf(filename, "data_%s_iotype_%d_pio_type_%d_use_fill_%d_default_fill_%d.nc", - TEST_NAME, flavor[fmt], pio_type, use_fill, use_default); - - /* Create the netCDF output file. */ - printf("rank: %d Creating sample file %s with format %d type %d\n", my_rank, filename, - flavor[fmt], pio_type); - if ((ret = PIOc_createfile(iosysid, &ncid, &flavor[fmt], filename, PIO_CLOBBER))) - ERR(ret); - - /* Turn on fill mode if desired. */ - if (use_fill) - if ((ret = PIOc_set_fill(ncid, NC_FILL, NULL))) - ERR(ret); - - /* Define netCDF dimensions and variable. */ - printf("%d Defining netCDF metadata...\n", my_rank); - for (int d = 0; d < NDIM; d++) - if ((ret = PIOc_def_dim(ncid, dim_name[d], (PIO_Offset)dim_len[d], &dimids[d]))) - ERR(ret); - - /* Define the variables. */ - for (int v = 0; v < NUM_VAR; v++) + /* Use PIO to create the example file in each of the four + * available ways. */ + for (int fmt = 0; fmt < num_flavors; fmt++) { - if ((ret = PIOc_def_var(ncid, var_name[v], pio_type, NDIM, dimids, &varid[v]))) + /* BYTE and CHAR don't work with pnetcdf. Don't know why yet. */ + if (flavor[fmt] == PIO_IOTYPE_PNETCDF && (pio_type == PIO_BYTE || pio_type == PIO_CHAR)) + continue; + + /* NetCDF-4 types only work with netCDF-4 formats. */ + printf("pio_type = %d flavor[fmt] = %d\n", pio_type, flavor[fmt]); + if (pio_type > PIO_DOUBLE && flavor[fmt] != PIO_IOTYPE_NETCDF4C && + flavor[fmt] != PIO_IOTYPE_NETCDF4P) + continue; + + /* Create the filename. */ + sprintf(filename, "data_%s_iotype_%d_tc_%d_pio_type_%d_use_fill_%d_default_fill_%d.nc", + TEST_NAME, flavor[fmt], tc, pio_type, use_fill, use_default); + + /* Create the netCDF output file. */ + printf("rank: %d Creating sample file %s with format %d type %d\n", my_rank, filename, + flavor[fmt], pio_type); + if ((ret = PIOc_createfile(iosysid, &ncid, &flavor[fmt], filename, PIO_CLOBBER))) ERR(ret); - if (use_fill && !use_default) - if ((ret = PIOc_def_var_fill(ncid, varid[v], NC_FILL, fillvalue))) - ERR(ret); - } - /* End define mode. */ - if ((ret = PIOc_enddef(ncid))) - ERR(ret); + /* Turn on fill mode if desired. */ + if (use_fill) + if ((ret = PIOc_set_fill(ncid, NC_FILL, NULL))) + ERR(ret); - /* Set the value of the record dimension. */ - if ((ret = PIOc_setframe(ncid, varid[0], 0))) - ERR(ret); + /* Define netCDF dimensions and variable. */ + printf("%d Defining netCDF metadata...\n", my_rank); + for (int d = 0; d < NDIM; d++) + if ((ret = PIOc_def_dim(ncid, dim_name[d], (PIO_Offset)dim_len[d], &dimids[d]))) + ERR(ret); - /* Write the data. */ - if ((ret = PIOc_write_darray(ncid, varid[0], ioid, arraylen, test_data, fillvalue))) - ERR(ret); + /* Define the variables. */ + for (int v = 0; v < NUM_VAR; v++) + { + if ((ret = PIOc_def_var(ncid, var_name[v], pio_type, NDIM, dimids, &varid[v]))) + ERR(ret); + if (use_fill && !use_default) + if ((ret = PIOc_def_var_fill(ncid, varid[v], NC_FILL, fillvalue))) + ERR(ret); + } - /* Close the netCDF file. */ - if ((ret = PIOc_closefile(ncid))) - ERR(ret); + /* End define mode. */ + if ((ret = PIOc_enddef(ncid))) + ERR(ret); - /* Reopen the file. */ - if ((ret = PIOc_openfile(iosysid, &ncid2, &flavor[fmt], filename, PIO_NOWRITE))) - ERR(ret); + /* Set the value of the record dimension. */ + for (int v = 0; v < NUM_VAR; v++) + { + if ((ret = PIOc_setframe(ncid, varid[v], 0))) + ERR(ret); - /* Read the data. */ - if ((ret = PIOc_read_darray(ncid2, varid[0], ioid, arraylen, test_data_in))) - ERR(ret); + /* For the first test case we just write the first variable. */ + if (tc == 0) + break; + } - /* Check the results. */ - for (int f = 0; f < arraylen; f++) - { - switch (pio_type) + /* Write the data. */ + for (int v = 0; v < NUM_VAR; v++) { - case PIO_INT: - if (test_data_int_in[f] != test_data_int[f]) - return ERR_WRONG; - break; - case PIO_FLOAT: - if (test_data_float_in[f] != test_data_float[f]) - return ERR_WRONG; - break; - case PIO_DOUBLE: - if (test_data_double_in[f] != test_data_double[f]) - return ERR_WRONG; - break; - default: - ERR(ERR_WRONG); + if ((ret = PIOc_write_darray(ncid, varid[v], ioid, arraylen, test_data, fillvalue))) + ERR(ret); + + /* For the first test case we just write the first variable. */ + if (tc == 0) + break; } - } - /* If fill mode is in use the other vars should have fill values. */ - if (use_fill && flavor[fmt] != PIO_IOTYPE_PNETCDF) - { - /* Read the data. */ - if ((ret = PIOc_read_darray(ncid2, varid[1], ioid, arraylen, test_data_in))) + /* Close the netCDF file. */ + if ((ret = PIOc_closefile(ncid))) ERR(ret); - - /* Check the results. */ - for (int f = 0; f < arraylen; f++) + + /* Reopen the file. */ + if ((ret = PIOc_openfile(iosysid, &ncid2, &flavor[fmt], filename, PIO_NOWRITE))) + ERR(ret); + + for (int v = 0; v < NUM_VAR; v++) { - switch (pio_type) + /* Read the data. */ + if ((ret = PIOc_read_darray(ncid2, varid[0], ioid, arraylen, test_data_in))) + ERR(ret); + + /* Check the results. */ + for (int f = 0; f < arraylen; f++) { - case PIO_INT: - if (test_data_int_in[f] != (use_default ? NC_FILL_INT : custom_fillvalue_int)) - return ERR_WRONG; - break; - case PIO_FLOAT: - if (test_data_float_in[f] != (use_default ? NC_FILL_FLOAT : custom_fillvalue_float)) - return ERR_WRONG; - break; - case PIO_DOUBLE: - if (test_data_double_in[f] != (use_default ? NC_FILL_DOUBLE : custom_fillvalue_double)) - return ERR_WRONG; + switch (pio_type) + { + case PIO_BYTE: + if (test_data_byte_in[f] != test_data_byte[f]) + return ERR_WRONG; + break; + case PIO_CHAR: + if (test_data_char_in[f] != test_data_char[f]) + return ERR_WRONG; + break; + case PIO_SHORT: + if (test_data_short_in[f] != test_data_short[f]) + return ERR_WRONG; + break; + case PIO_INT: + if (test_data_int_in[f] != test_data_int[f]) + return ERR_WRONG; + break; + case PIO_FLOAT: + if (test_data_float_in[f] != test_data_float[f]) + return ERR_WRONG; + break; + case PIO_DOUBLE: + if (test_data_double_in[f] != test_data_double[f]) + return ERR_WRONG; + break; +#ifdef _NETCDF4 + case PIO_UBYTE: + if (test_data_ubyte_in[f] != test_data_ubyte[f]) + return ERR_WRONG; + break; + case PIO_USHORT: + if (test_data_ushort_in[f] != test_data_ushort[f]) + return ERR_WRONG; + break; + case PIO_UINT: + if (test_data_uint_in[f] != test_data_uint[f]) + return ERR_WRONG; + break; + case PIO_INT64: + if (test_data_int64_in[f] != test_data_int64[f]) + return ERR_WRONG; + break; + case PIO_UINT64: + if (test_data_uint64_in[f] != test_data_uint64[f]) + return ERR_WRONG; + break; +#endif /* _NETCDF4 */ + default: + ERR(ERR_WRONG); + } + } + + /* For the first test case we just write the first variable. */ + if (tc == 0) break; - default: - ERR(ERR_WRONG); + } /* next var */ + + /* For the first test case, if fill mode is in use the other vars + * should have fill values. */ + if (tc == 0 && use_fill && flavor[fmt] != PIO_IOTYPE_PNETCDF) + { + /* Read the data. */ + if ((ret = PIOc_read_darray(ncid2, varid[1], ioid, arraylen, test_data_in))) + ERR(ret); + + /* Check the results. */ + for (int f = 0; f < arraylen; f++) + { + switch (pio_type) + { + case PIO_BYTE: + if (test_data_byte_in[f] != (use_default ? NC_FILL_BYTE : custom_fillvalue_byte)) + return ERR_WRONG; + break; + case PIO_CHAR: + if (test_data_char_in[f] != (use_default ? NC_FILL_CHAR : custom_fillvalue_char)) + return ERR_WRONG; + break; + case PIO_SHORT: + if (test_data_short_in[f] != (use_default ? NC_FILL_SHORT : custom_fillvalue_short)) + return ERR_WRONG; + break; + case PIO_INT: + if (test_data_int_in[f] != (use_default ? NC_FILL_INT : custom_fillvalue_int)) + return ERR_WRONG; + break; + case PIO_FLOAT: + if (test_data_float_in[f] != (use_default ? NC_FILL_FLOAT : custom_fillvalue_float)) + return ERR_WRONG; + break; + case PIO_DOUBLE: + if (test_data_double_in[f] != (use_default ? NC_FILL_DOUBLE : custom_fillvalue_double)) + return ERR_WRONG; + break; +#ifdef _NETCDF4 + case PIO_UBYTE: + if (test_data_ubyte_in[f] != (use_default ? NC_FILL_UBYTE : custom_fillvalue_ubyte)) + return ERR_WRONG; + break; + case PIO_USHORT: + if (test_data_ushort_in[f] != (use_default ? NC_FILL_USHORT : custom_fillvalue_ushort)) + return ERR_WRONG; + break; + case PIO_UINT: + if (test_data_uint_in[f] != (use_default ? NC_FILL_UINT : custom_fillvalue_uint)) + return ERR_WRONG; + break; + case PIO_INT64: + if (test_data_int64_in[f] != (use_default ? NC_FILL_INT64 : custom_fillvalue_int64)) + return ERR_WRONG; + break; + case PIO_UINT64: + if (test_data_uint64_in[f] != (use_default ? NC_FILL_UINT64 : custom_fillvalue_uint64)) + return ERR_WRONG; + break; +#endif /* _NETCDF4 */ + default: + ERR(ERR_WRONG); + } } } - } - /* Close the netCDF file. */ - printf("%d Closing the sample data file...\n", my_rank); - if ((ret = PIOc_closefile(ncid2))) - ERR(ret); - } + /* Close the netCDF file. */ + printf("%d Closing the sample data file...\n", my_rank); + if ((ret = PIOc_closefile(ncid2))) + ERR(ret); + } + } /* next test case */ + return PIO_NOERR; } @@ -302,8 +493,14 @@ int test_all_darray(int iosysid, int num_flavors, int *flavor, int my_rank, MPI_Comm test_comm, int rearranger) { #define NUM_FILL_TESTS 3 -#define NUM_TYPES_TO_TEST 3 - int pio_type[NUM_TYPES_TO_TEST] = {PIO_INT, PIO_FLOAT, PIO_DOUBLE}; +#ifdef _NETCDF4 +#define NUM_TYPES_TO_TEST 11 + int test_type[NUM_TYPES_TO_TEST] = {PIO_BYTE, PIO_CHAR, PIO_SHORT, PIO_INT, PIO_FLOAT, PIO_DOUBLE, + PIO_UBYTE, PIO_USHORT, PIO_UINT, PIO_INT64, PIO_UINT64}; +#else +#define NUM_TYPES_TO_TEST 6 + int test_type[NUM_TYPES_TO_TEST] = {PIO_BYTE, PIO_CHAR, PIO_SHORT, PIO_INT, PIO_FLOAT, PIO_DOUBLE}; +#endif /* _NETCDF4 */ int ioid; int dim_len_2d[NDIM2] = {X_DIM_LEN, Y_DIM_LEN}; int ret; /* Return code. */ @@ -315,7 +512,7 @@ int test_all_darray(int iosysid, int num_flavors, int *flavor, int my_rank, /* Decompose the data over the tasks. */ if ((ret = create_decomposition_2d(TARGET_NTASKS, my_rank, iosysid, dim_len_2d, - &ioid, pio_type[t]))) + &ioid, test_type[t]))) return ret; /* Run the different combinations of use_fill and use_default. */ @@ -327,9 +524,9 @@ int test_all_darray(int iosysid, int num_flavors, int *flavor, int my_rank, if (f == 2) use_default++; - /* Run a simple darray test. */ - if ((ret = test_3_empty(iosysid, ioid, num_flavors, flavor, my_rank, pio_type[t], - test_comm, rearranger, use_fill, use_default))) + /* Run the multivar darray tests. */ + if ((ret = test_multivar_darray(iosysid, ioid, num_flavors, flavor, my_rank, test_type[t], + test_comm, rearranger, use_fill, use_default))) return ret; } diff --git a/src/externals/pio2/tests/cunit/test_darray_multivar2.c b/src/externals/pio2/tests/cunit/test_darray_multivar2.c new file mode 100644 index 000000000000..b25ba890ab7d --- /dev/null +++ b/src/externals/pio2/tests/cunit/test_darray_multivar2.c @@ -0,0 +1,287 @@ +/* + * Tests for PIO distributed arrays. + * + * Ed Hartnett, Jim Edwards, 4/20/17 + */ +#include +#include +#include + +/* The number of tasks this test should run on. */ +#define TARGET_NTASKS 4 + +/* The minimum number of tasks this test should run on. */ +#define MIN_NTASKS 4 + +/* The name of this test. */ +#define TEST_NAME "test_darray_multivar2" + +/* Number of processors that will do IO. */ +#define NUM_IO_PROCS 1 + +/* Number of computational components to create. */ +#define COMPONENT_COUNT 1 + +/* The number of dimensions in the example data. In this test, we + * are using three-dimensional data. */ +#define NDIM 3 + +/* But sometimes we need arrays of the non-record dimensions. */ +#define NDIM2 2 + +/* The length of our sample data along each dimension. */ +#define X_DIM_LEN 4 +#define Y_DIM_LEN 4 + +/* The number of timesteps of data to write. */ +#define NUM_TIMESTEPS 2 + +/* Number of variables in the test file. */ +#define NUM_VAR 2 + +/* The dimension names. */ +char dim_name[NDIM][PIO_MAX_NAME + 1] = {"timestep", "x", "y"}; + +/* The var names. */ +char var_name[NUM_VAR][PIO_MAX_NAME + 1] = {"Aubery", "Martin"}; + +/* Length of the dimensions in the sample data. */ +int dim_len[NDIM] = {NC_UNLIMITED, X_DIM_LEN, Y_DIM_LEN}; + +/** + * Test the darray functionality. Create a netCDF file with 3 + * dimensions and 2 variables. One of the vars uses the record + * dimension, the other does not. Then use darray to write to them. + * + * @param iosysid the IO system ID. + * @param ioid the ID of the decomposition. + * @param num_flavors the number of IOTYPES available in this build. + * @param flavor array of available iotypes. + * @param my_rank rank of this task. + * @param pio_type the type of the data. + * @param test_comm the communicator that is running this test. + * @returns 0 for success, error code otherwise. +*/ +int test_multivar_darray(int iosysid, int ioid, int num_flavors, int *flavor, + int my_rank, int pio_type, MPI_Comm test_comm) +{ + char filename[PIO_MAX_NAME + 1]; /* Name for the output files. */ + int dimids[NDIM]; /* The dimension IDs. */ + int ncid; /* The ncid of the netCDF file. */ + int varid[NUM_VAR]; /* The IDs of the netCDF varables. */ + PIO_Offset arraylen = 4; + int custom_fillvalue_int = -TEST_VAL_42; + int test_data_int[arraylen]; + int ret; /* Return code. */ + + /* Initialize some data. */ + for (int f = 0; f < arraylen; f++) + test_data_int[f] = my_rank * 10 + f; + + /* Use PIO to create the example file in each of the four + * available ways. */ + for (int fmt = 0; fmt < num_flavors; fmt++) + { + /* Create the filename. */ + sprintf(filename, "data_%s_iotype_%d_pio_type_%d.nc", TEST_NAME, flavor[fmt], pio_type); + + /* Create the netCDF output file. */ + printf("rank: %d Creating sample file %s with format %d type %d\n", my_rank, filename, + flavor[fmt], pio_type); + if ((ret = PIOc_createfile(iosysid, &ncid, &flavor[fmt], filename, PIO_CLOBBER))) + ERR(ret); + + /* Define netCDF dimensions and variable. */ + printf("%d Defining netCDF metadata...\n", my_rank); + for (int d = 0; d < NDIM; d++) + if ((ret = PIOc_def_dim(ncid, dim_name[d], (PIO_Offset)dim_len[d], &dimids[d]))) + ERR(ret); + + /* Var 0 does not have a record dim, varid 1 is a record var. */ + if ((ret = PIOc_def_var(ncid, var_name[0], pio_type, NDIM - 1, &dimids[1], &varid[0]))) + ERR(ret); + if ((ret = PIOc_def_var(ncid, var_name[1], pio_type, NDIM, dimids, &varid[1]))) + ERR(ret); + + /* End define mode. */ + if ((ret = PIOc_enddef(ncid))) + ERR(ret); + + /* Set the value of the record dimension for varid 1. */ + if ((ret = PIOc_setframe(ncid, varid[1], 0))) + ERR(ret); + + /* Write the data. */ + for (int v = 0; v < NUM_VAR; v++) + if ((ret = PIOc_write_darray(ncid, varid[v], ioid, arraylen, test_data_int, &custom_fillvalue_int))) + ERR(ret); + + /* Close the netCDF file. */ + if ((ret = PIOc_closefile(ncid))) + ERR(ret); + + /* Check the file contents. */ + { + int ncid2; /* The ncid of the re-opened netCDF file. */ + int test_data_int_in[arraylen]; + + /* Reopen the file. */ + if ((ret = PIOc_openfile(iosysid, &ncid2, &flavor[fmt], filename, PIO_NOWRITE))) + ERR(ret); + + for (int v = 0; v < NUM_VAR; v++) + { + /* Read the data. */ + if ((ret = PIOc_read_darray(ncid2, varid[v], ioid, arraylen, test_data_int_in))) + ERR(ret); + + /* Check the results. */ + for (int f = 0; f < arraylen; f++) + if (test_data_int_in[f] != test_data_int[f]) + return ERR_WRONG; + } /* next var */ + + /* Close the netCDF file. */ + if ((ret = PIOc_closefile(ncid2))) + ERR(ret); + } + } + + return PIO_NOERR; +} + +/* Create the decomposition to divide the 3-dimensional sample data + * between the 4 tasks. For the purposes of decomposition we are only + * concerned with 2 dimensions - we ignore the unlimited dimension. + * + * @param ntasks the number of available tasks + * @param my_rank rank of this task. + * @param iosysid the IO system ID. + * @param dim_len_2d an array of length 2 with the dim lengths. + * @param ioid a pointer that gets the ID of this decomposition. + * @param pio_type the data type to use for the decomposition. + * @returns 0 for success, error code otherwise. + **/ +int create_decomposition_2d_2(int ntasks, int my_rank, int iosysid, int *dim_len_2d, + int *ioid, int pio_type) +{ + PIO_Offset elements_per_pe; /* Array elements per processing unit. */ + PIO_Offset *compdof; /* The decomposition mapping. */ + int ret; + + /* How many data elements per task? In this example we will end up + * with 4. */ + elements_per_pe = dim_len_2d[0] * dim_len_2d[1] / ntasks; + + /* Allocate space for the decomposition array. */ + if (!(compdof = malloc(elements_per_pe * sizeof(PIO_Offset)))) + return PIO_ENOMEM; + + /* Describe the decomposition. This is a 1-based array, so add 1! */ + for (int i = 0; i < elements_per_pe; i++) + compdof[i] = my_rank * elements_per_pe + i + 1; + + /* Create the PIO decomposition for this test. */ + printf("%d Creating decomposition elements_per_pe = %lld\n", my_rank, elements_per_pe); + if ((ret = PIOc_InitDecomp(iosysid, pio_type, NDIM2, dim_len_2d, elements_per_pe, + compdof, ioid, NULL, NULL, NULL))) + ERR(ret); + + printf("%d decomposition initialized.\n", my_rank); + + /* Free the mapping. */ + free(compdof); + + return 0; +} + +/** + * Run all the tests. + * + * @param iosysid the IO system ID. + * @param num_flavors number of available iotypes in the build. + * @param flavor pointer to array of the available iotypes. + * @param my_rank rank of this task. + * @param test_comm the communicator the test is running on. + * @returns 0 for success, error code otherwise. + */ +int test_all_darray(int iosysid, int num_flavors, int *flavor, int my_rank, + MPI_Comm test_comm) +{ + int ioid; + int dim_len_2d[NDIM2] = {X_DIM_LEN, Y_DIM_LEN}; + int ret; /* Return code. */ + + /* Decompose the data over the tasks. */ + if ((ret = create_decomposition_2d_2(TARGET_NTASKS, my_rank, iosysid, dim_len_2d, + &ioid, PIO_INT))) + return ret; + + /* Run the multivar darray tests. */ + if ((ret = test_multivar_darray(iosysid, ioid, num_flavors, flavor, my_rank, PIO_INT, + test_comm))) + return ret; + + /* Free the PIO decomposition. */ + if ((ret = PIOc_freedecomp(iosysid, ioid))) + ERR(ret); + + return PIO_NOERR; +} + +/* Run tests for darray functions. */ +int main(int argc, char **argv) +{ + int my_rank; + int ntasks; + int num_flavors; /* Number of PIO netCDF flavors in this build. */ + int flavor[NUM_FLAVORS]; /* iotypes for the supported netCDF IO flavors. */ + MPI_Comm test_comm; /* A communicator for this test. */ + int ret; /* Return code. */ + + /* Initialize test. */ + if ((ret = pio_test_init2(argc, argv, &my_rank, &ntasks, MIN_NTASKS, MIN_NTASKS, + 3, &test_comm))) + ERR(ERR_INIT); + + if ((ret = PIOc_set_iosystem_error_handling(PIO_DEFAULT, PIO_RETURN_ERROR, NULL))) + return ret; + + /* Only do something on max_ntasks tasks. */ + if (my_rank < TARGET_NTASKS) + { + int iosysid; /* The ID for the parallel I/O system. */ + int ioproc_stride = 1; /* Stride in the mpi rank between io tasks. */ + int ioproc_start = 0; /* Zero based rank of first processor to be used for I/O. */ + int ret; /* Return code. */ + + /* Figure out iotypes. */ + if ((ret = get_iotypes(&num_flavors, flavor))) + ERR(ret); + printf("Runnings tests for %d flavors\n", num_flavors); + + /* Initialize the PIO IO system. This specifies how + * many and which processors are involved in I/O. */ + if ((ret = PIOc_Init_Intracomm(test_comm, TARGET_NTASKS, ioproc_stride, + ioproc_start, PIO_REARR_BOX, &iosysid))) + return ret; + + /* Run tests. */ + printf("%d Running tests...\n", my_rank); + if ((ret = test_all_darray(iosysid, num_flavors, flavor, my_rank, test_comm))) + return ret; + + /* Finalize PIO system. */ + if ((ret = PIOc_finalize(iosysid))) + return ret; + + } /* endif my_rank < TARGET_NTASKS */ + + /* Finalize the MPI library. */ + printf("%d %s Finalizing...\n", my_rank, TEST_NAME); + if ((ret = pio_test_finalize(&test_comm))) + return ret; + + printf("%d %s SUCCESS!!\n", my_rank, TEST_NAME); + return 0; +} diff --git a/src/externals/pio2/tests/cunit/test_decomp_uneven.c b/src/externals/pio2/tests/cunit/test_decomp_uneven.c new file mode 100644 index 000000000000..b6d47004eff1 --- /dev/null +++ b/src/externals/pio2/tests/cunit/test_decomp_uneven.c @@ -0,0 +1,380 @@ +/* + * Tests for PIO distributed arrays. This tests cases when arrays do + * not distribute evenly over the processors. + * + * Ed Hartnett, 3/6/17 + */ +#include +#include +#include + +/* The number of tasks this test should run on. */ +#define TARGET_NTASKS 4 + +/* The minimum number of tasks this test should run on. */ +#define MIN_NTASKS 4 + +/* The name of this test. */ +#define TEST_NAME "test_darray_uneven" + +/* Number of processors that will do IO. */ +#define NUM_IO_PROCS 1 + +/* Number of computational components to create. */ +#define COMPONENT_COUNT 1 + +/* This is for 3D data decompositions. */ +#define NDIM3 3 + +/* Create the decomposition to divide the 4-dimensional sample data + * between the 4 tasks. For the purposes of decomposition we are only + * concerned with 3 dimensions - we ignore the unlimited dimension. + * + * @param ntasks the number of available tasks + * @param my_rank rank of this task. + * @param iosysid the IO system ID. + * @param dim_len an array of length 3 with the dimension sizes. + * @param pio_type the type for this decomposition. + * @param ioid a pointer that gets the ID of this decomposition. + * lengths. + * @returns 0 for success, error code otherwise. + **/ +int create_decomposition_3d(int ntasks, int my_rank, int iosysid, int *dim_len, + int pio_type, int *ioid) +{ + PIO_Offset elements_per_pe; /* Array elements per processing unit. */ + PIO_Offset remainder; /* Left over array elements. */ + PIO_Offset *compdof; /* The decomposition mapping. */ + PIO_Offset data_size = 1; + int ret; + + /* How many data elements per task? In this example we will end up + * with 4. */ + for (int d = 0; d < NDIM3; d++) + data_size *= dim_len[d]; + elements_per_pe = data_size / ntasks; + remainder = data_size % ntasks; + + /* Distribute the remaining elements. */ + if (my_rank < remainder) + elements_per_pe++; + printf("%d elements_per_pe = %lld remainder = %lld\n", my_rank, elements_per_pe, remainder); + + /* Allocate space for the decomposition array. */ + if (!(compdof = malloc(elements_per_pe * sizeof(PIO_Offset)))) + return PIO_ENOMEM; + + /* Describe the decomposition. */ + for (int i = 0; i < elements_per_pe; i++) + { + int my_remainder = 0; + if (my_rank >= remainder) + my_remainder = remainder; + compdof[i] = my_rank * elements_per_pe + i + my_remainder; + printf("%d my_remainder = %d compdof[%d] = %lld\n", my_rank, i, my_remainder, compdof[i]); + } + + /* Create the PIO decomposition for this test. */ + printf("%d Creating decomposition elements_per_pe = %lld\n", my_rank, elements_per_pe); + if ((ret = PIOc_init_decomp(iosysid, pio_type, NDIM3, dim_len, elements_per_pe, + compdof, ioid, 0, NULL, NULL))) + ERR(ret); + + printf("%d decomposition initialized.\n", my_rank); + + /* Free the mapping. */ + free(compdof); + + return 0; +} + +/** + * Test the decomp read/write functionality. Given an ioid for a 3D + * decomposition, this function will write a decomp file, then read it + * in to ensure the correct values are read. + * + * @param iosysid the IO system ID. + * @param ioid the ID of the decomposition. + * @param num_flavors the number of IOTYPES available in this build. + * @param flavor array of available iotypes. + * @param my_rank rank of this task. + * @param rearranger the rearranger to use (PIO_REARR_BOX or + * PIO_REARR_SUBSET). + * @param test_comm the MPI communicator for this test. + * @param dim_len array of length 3 with dim lengths. + * @param expected_maplen pointer to array of length TARGET_NTASKS + * with the maplen we expect to get for each of the tasks running this + * test. + * @param pio_type the type we expect to be associated with + * this decomposition. + * @param full_maplen the length of the full map. + * @param pointer to expected map, an array of TARGET_NTASKS * + * max_maplen. + * @returns 0 for success, error code otherwise. +*/ +int test_decomp_read_write(int iosysid, int ioid, int num_flavors, int *flavor, int my_rank, + int rearranger, MPI_Comm test_comm, int *dim_len, int *expected_maplen, + int pio_type, int fill_maplen, int *expected_map) +{ +#define TEST_DECOMP_TITLE "Decomposition data for test_darray_uneven.c in PIO library." +#define TEST_DECOMP_HISTORY "This file may be deleted; it is for test purposes only: " + int ioid2; /* ID for decomp we read into. */ + char filename[PIO_MAX_NAME + 1]; /* Name for the output files. */ + char title[] = TEST_DECOMP_TITLE; + char history[PIO_MAX_NAME + 1] = TEST_DECOMP_HISTORY; + char title_in[PIO_MAX_NAME + 1]; + char history_in[PIO_MAX_NAME + 1]; + int fortran_order_in; /* Indicates fortran vs. c order. */ + int ret; /* Return code. */ + + /* Use PIO to create the decomp file in one of the four + * available ways. */ + for (int fmt = 0; fmt < 1; fmt++) + { + /* Create the filename. */ + sprintf(filename, "decomp_%s_pio_type_%d_dims_%d_x_%d_x_%d.nc", TEST_NAME, pio_type, + dim_len[0], dim_len[1], dim_len[2]); + + /* Create history string. */ + strncat(history, filename, NC_MAX_NAME - strlen(TEST_DECOMP_HISTORY)); + + printf("writing decomp file %s\n", filename); + if ((ret = PIOc_write_nc_decomp(iosysid, filename, 0, ioid, title, history, 0))) + return ret; + printf("about to check map with netCDF\n"); + + /* Open the decomposition file with netCDF. */ + int ncid_in; + int iotype = PIO_IOTYPE_NETCDF; + if ((ret = PIOc_openfile(iosysid, &ncid_in, &iotype, filename, NC_NOWRITE))) + return ret; + + /* Get the max maplen. */ + int max_maplen; + if ((ret = PIOc_get_att_int(ncid_in, NC_GLOBAL, DECOMP_MAX_MAPLEN_ATT_NAME, &max_maplen))) + return ret; + printf("max_maplen = %d\n", max_maplen); + + /* Check dims. */ + PIO_Offset ndims_in; + if ((ret = PIOc_inq_dim(ncid_in, 0, NULL, &ndims_in))) + return ret; + if (ndims_in != NDIM3) + return ERR_WRONG; + PIO_Offset ntasks_in; + if ((ret = PIOc_inq_dim(ncid_in, 1, NULL, &ntasks_in))) + return ret; + if (ntasks_in != TARGET_NTASKS) + return ERR_WRONG; + + /* Check the maplen. */ + int maplen_varid; + int maplen_in[TARGET_NTASKS]; + if ((ret = PIOc_inq_varid(ncid_in, DECOMP_MAPLEN_VAR_NAME, &maplen_varid))) + return ret; + if ((ret = PIOc_get_var(ncid_in, maplen_varid, &maplen_in))) + return ret; + for (int t = 0; t < TARGET_NTASKS; t++) + { + printf("%d maplen_in[%d] = %d expected_maplen[%d] = %d\n", my_rank, t, maplen_in[t], t, expected_maplen[t]); + if (maplen_in[t] != expected_maplen[t]) + return ERR_WRONG; + } + + /* Check the map. */ + int map_varid; + int map_in[TARGET_NTASKS][max_maplen]; + if ((ret = PIOc_inq_varid(ncid_in, DECOMP_MAP_VAR_NAME, &map_varid))) + return ret; + if ((ret = PIOc_get_var(ncid_in, map_varid, (int *)&map_in))) + return ret; + printf("about to check map\n"); + for (int t = 0; t < TARGET_NTASKS; t++) + { + for (int e = 0; e < max_maplen; e++) + { + printf("%d t = %d e = %d map_in[t][e] = %d expected_map[t * max_maplen + e] = %d\n", + my_rank, t, e, map_in[t][e], expected_map[t * max_maplen + e]); + if (map_in[t][e] != expected_map[t * max_maplen + e]) + return ERR_WRONG; + } + } + + /* Close the decomposition file. */ + if ((ret = PIOc_closefile(ncid_in))) + return ret; + + /* Read the decomposition file into PIO. */ + printf("reading decomp file %s\n", filename); + if ((ret = PIOc_read_nc_decomp(iosysid, filename, &ioid2, test_comm, pio_type, + title_in, history_in, &fortran_order_in))) + return ret; + + /* Check the results. */ + { + iosystem_desc_t *ios; + io_desc_t *iodesc; + + /* Get the IO system info. */ + if (!(ios = pio_get_iosystem_from_id(iosysid))) + return pio_err(NULL, NULL, PIO_EBADID, __FILE__, __LINE__); + + /* Get the IO desc, which describes the decomposition. */ + if (!(iodesc = pio_get_iodesc_from_id(ioid2))) + return pio_err(ios, NULL, PIO_EBADID, __FILE__, __LINE__); + + /* We need to find the MPI type we will expect to see in + * iodesc. */ + MPI_Datatype expected_mpi_type; + if ((ret = find_mpi_type(pio_type, &expected_mpi_type, NULL))) + return ret; + + /* Check values in iodesc. */ + printf("ioid2 = %d iodesc->ioid = %d iodesc->maplen = %d iodesc->ndims = %d " + "iodesc->ndof = %d iodesc->rearranger = %d iodesc->maxregions = %d " + "iodesc->needsfill = %d iodesc->basetype = %d expected_mpi_type = %d\n", + ioid2, iodesc->ioid, iodesc->maplen, iodesc->ndims, iodesc->ndof, + iodesc->rearranger, iodesc->maxregions, iodesc->needsfill, iodesc->basetype, + expected_mpi_type); + if (strcmp(title, title_in) || strcmp(history, history_in)) + return ERR_WRONG; + if (iodesc->ioid != ioid2 || iodesc->rearranger != rearranger || + iodesc->basetype != expected_mpi_type) + return ERR_WRONG; + if (iodesc->ndims != NDIM3) + return ERR_WRONG; + if (iodesc->maplen != expected_maplen[my_rank]) + return ERR_WRONG; + if (iodesc->ndims != NDIM3 || iodesc->ndof != expected_maplen[my_rank]) + return ERR_WRONG; + if (iodesc->needsfill) + return ERR_WRONG; + /* Don't forget to add 1! */ + for (int e = 0; e < iodesc->maplen; e++) + { + printf("%d e = %d max_maplen = %d iodesc->map[e] = %lld expected_map[my_rank * max_maplen + e] = %d\n", + my_rank, e, max_maplen, iodesc->map[e], expected_map[my_rank * max_maplen + e]); + if (iodesc->map[e] != expected_map[my_rank * max_maplen + e] + 1) + return ERR_WRONG; + } + for (int d = 0; d < NDIM3; d++) + if (iodesc->dimlen[d] != dim_len[d]) + return ERR_WRONG; + } + + /* Free the PIO decomposition. */ + if ((ret = PIOc_freedecomp(iosysid, ioid2))) + ERR(ret); + } + return PIO_NOERR; +} + +/* Run tests for darray functions. */ +int main(int argc, char **argv) +{ +/* #define NUM_TYPES_TO_TEST 3 */ +/* int test_type[NUM_TYPES_TO_TEST] = {PIO_INT, PIO_FLOAT, PIO_DOUBLE}; */ +#define NUM_TYPES_TO_TEST 1 + int test_type[NUM_TYPES_TO_TEST] = {PIO_INT}; + int my_rank; + int ntasks; + int num_flavors; /* Number of PIO netCDF flavors in this build. */ + int flavor[NUM_FLAVORS]; /* iotypes for the supported netCDF IO flavors. */ + MPI_Comm test_comm; /* A communicator for this test. */ + int ret; /* Return code. */ + + /* Initialize test. */ + if ((ret = pio_test_init2(argc, argv, &my_rank, &ntasks, MIN_NTASKS, + MIN_NTASKS, 3, &test_comm))) + ERR(ERR_INIT); + + if ((ret = PIOc_set_iosystem_error_handling(PIO_DEFAULT, PIO_RETURN_ERROR, NULL))) + return ret; + + /* Only do something on max_ntasks tasks. */ + if (my_rank < TARGET_NTASKS) + { +#define NUM_REARRANGERS_TO_TEST 2 + int rearranger[NUM_REARRANGERS_TO_TEST] = {PIO_REARR_BOX, PIO_REARR_SUBSET}; + int iosysid; /* The ID for the parallel I/O system. */ + int ioproc_stride = 1; /* Stride in the mpi rank between io tasks. */ + int ioproc_start = 0; /* Zero based rank of first processor to be used for I/O. */ + int map_1x4x4[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}; + int map_2x4x4[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}; + int map_3x4x4[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47}; + int map_1x3x3[] = {0, 1, 2, 3, 4, PIO_FILL_INT, 5, 6, PIO_FILL_INT, 7, 8, PIO_FILL_INT}; + int map_1x2x3[] = {0, 1, 2, 3, 4, PIO_FILL_INT, 5, PIO_FILL_INT}; +#define NUM_DIM_COMBOS_TO_TEST 5 + int dim_len[NUM_DIM_COMBOS_TO_TEST][NDIM3] = {{1, 4, 4}, + {2, 4, 4}, + {3, 4, 4}, + {1, 3, 3}, + {1, 2, 3}}; + int expected_maplen[NUM_DIM_COMBOS_TO_TEST][TARGET_NTASKS] = {{4, 4, 4, 4}, + {8, 8, 8, 8}, + {12, 12, 12, 12}, + {3, 2, 2, 2}, + {2, 2, 1, 1}}; + int *expected_map[NUM_DIM_COMBOS_TO_TEST] = {map_1x4x4, map_2x4x4, map_3x4x4, map_1x3x3, map_1x2x3}; + int ret; /* Return code. */ + + /* Figure out iotypes. */ + if ((ret = get_iotypes(&num_flavors, flavor))) + ERR(ret); + printf("Runnings tests for %d flavors\n", num_flavors); + + for (int r = 0; r < NUM_REARRANGERS_TO_TEST; r++) + { + int ioid; /* Decomposition ID. */ + + /* Initialize the PIO IO system. This specifies how + * many and which processors are involved in I/O. */ + if ((ret = PIOc_Init_Intracomm(test_comm, TARGET_NTASKS, ioproc_stride, + ioproc_start, rearranger[r], &iosysid))) + return ret; + + /* Run tests for each data type. */ + for (int t = 0; t < NUM_TYPES_TO_TEST; t++) + { + for (int dc = 0; dc < NUM_DIM_COMBOS_TO_TEST; dc++) + { + /* What is length of map for this combo? */ + int full_maplen = 1; + for (int d = 0; d < NDIM3; d++) + full_maplen *= dim_len[dc][d]; + + /* Decompose the data over the tasks. */ + if ((ret = create_decomposition_3d(TARGET_NTASKS, my_rank, iosysid, dim_len[dc], + test_type[t], &ioid))) + return ret; + + /* Test decomposition read/write. */ + if ((ret = test_decomp_read_write(iosysid, ioid, num_flavors, flavor, my_rank, + rearranger[r], test_comm, dim_len[dc], + expected_maplen[dc], test_type[t], full_maplen, + expected_map[dc]))) + return ret; + + /* Free the PIO decomposition. */ + if ((ret = PIOc_freedecomp(iosysid, ioid))) + ERR(ret); + } + } + + /* Finalize PIO system. */ + if ((ret = PIOc_finalize(iosysid))) + return ret; + + } /* next rearranger */ + } /* endif my_rank < TARGET_NTASKS */ + + /* Finalize the MPI library. */ + printf("%d %s Finalizing...\n", my_rank, TEST_NAME); + if ((ret = pio_test_finalize(&test_comm))) + return ret; + + printf("%d %s SUCCESS!!\n", my_rank, TEST_NAME); + return 0; +} diff --git a/src/externals/pio2/tests/cunit/test_decomps.c b/src/externals/pio2/tests/cunit/test_decomps.c index 032d10de7b0f..96dc1a386e20 100644 --- a/src/externals/pio2/tests/cunit/test_decomps.c +++ b/src/externals/pio2/tests/cunit/test_decomps.c @@ -290,8 +290,7 @@ int test_decomp_read_write(int iosysid, int ioid, int num_flavors, int *flavor, decomp_file_type); printf("writing decomp file %s\n", filename); - if ((ret = PIOc_write_nc_decomp(iosysid, filename, cmode, ioid, test_comm, NULL, - NULL, 0))) + if ((ret = PIOc_write_nc_decomp(iosysid, filename, cmode, ioid, NULL, NULL, 0))) return ret; /* Read the data. */ diff --git a/src/externals/pio2/tests/cunit/test_intercomm2.c b/src/externals/pio2/tests/cunit/test_intercomm2.c index 0a6f4a5b2ff0..e55c6e1ff2e8 100644 --- a/src/externals/pio2/tests/cunit/test_intercomm2.c +++ b/src/externals/pio2/tests/cunit/test_intercomm2.c @@ -253,7 +253,7 @@ int check_file(int iosysid, int format, char *filename, int my_rank) ERR(ERR_WRONG); if (PIOc_inq_att(ncid + TEST_VAL_42, NC_GLOBAL, too_long_name, &atttype, &attlen) != PIO_EBADID) ERR(ERR_WRONG); - if (PIOc_get_att(ncid + TEST_VAL_42, NC_GLOBAL, ATT_NAME, &att_data) != PIO_EBADID) + if (PIOc_get_att(ncid, NC_GLOBAL, TEST_NAME, &att_data) != PIO_ENOTATT) ERR(ERR_WRONG); if (PIOc_get_att(ncid, NC_GLOBAL, NULL, &att_data) != PIO_EINVAL) ERR(ERR_WRONG); @@ -317,7 +317,7 @@ int main(int argc, char **argv) ERR(ret); /* How many processors will be used for our IO and 2 computation components. */ - int num_procs[COMPONENT_COUNT + 1] = {2, 2}; + int num_procs[COMPONENT_COUNT] = {2}; /* Is the current process a computation task? */ int comp_task = my_rank < 2 ? 0 : 1; @@ -327,8 +327,8 @@ int main(int argc, char **argv) int my_comp_idx = comp_task ? 0 : -1; /* Initialize the IO system. */ - if ((ret = PIOc_Init_Async(test_comm, NUM_IO_PROCS, NULL, COMPONENT_COUNT, - num_procs, NULL, NULL, NULL, iosysid))) + if ((ret = PIOc_init_async(test_comm, NUM_IO_PROCS, NULL, COMPONENT_COUNT, + num_procs, NULL, NULL, NULL, PIO_REARR_BOX, iosysid))) ERR(ERR_AWFUL); printf("%d: test_intercomm2 ParallelIO Library test_intercomm2 comp task returned.\n", @@ -517,7 +517,6 @@ int main(int argc, char **argv) for (int i = 0; i < DIM_LEN; i++) data[i] = i; printf("%d test_intercomm2 writing data\n", my_rank); - printf("%d test_intercomm2 writing data\n", my_rank); start[0] = 0; count[0] = DIM_LEN; if ((ret = PIOc_put_vars_tc(ncid, varid, start, count, NULL, NC_INT, data))) diff --git a/src/externals/pio2/tests/cunit/test_iosystem2_simple.c b/src/externals/pio2/tests/cunit/test_iosystem2_simple.c index 4cf217e72e53..66ae617372b1 100644 --- a/src/externals/pio2/tests/cunit/test_iosystem2_simple.c +++ b/src/externals/pio2/tests/cunit/test_iosystem2_simple.c @@ -97,6 +97,8 @@ int main(int argc, char **argv) ERR(ret); if (active) ERR(ERR_WRONG); + if ((ret = PIOc_iosystem_is_active(iosysid, NULL))) + ERR(ret); int numiotasks; if (PIOc_get_numiotasks(iosysid + TEST_VAL_42, &numiotasks) != PIO_EBADID) diff --git a/src/externals/pio2/tests/cunit/test_pioc.c b/src/externals/pio2/tests/cunit/test_pioc.c index 3cffd6454632..466ed99f7065 100644 --- a/src/externals/pio2/tests/cunit/test_pioc.c +++ b/src/externals/pio2/tests/cunit/test_pioc.c @@ -100,22 +100,23 @@ int create_decomposition(int ntasks, int my_rank, int iosysid, int dim1_len, int if (!(compdof = malloc(elements_per_pe * sizeof(PIO_Offset)))) return PIO_ENOMEM; - /* Describe the decomposition. This is a 1-based array, so add 1! */ + /* Describe the decomposition. The new init_decomp uses a 0-based + * array, so don't add 1! */ for (int i = 0; i < elements_per_pe; i++) - compdof[i] = my_rank * elements_per_pe + i + 1; + compdof[i] = my_rank * elements_per_pe + i; /* These should fail. */ - if (PIOc_InitDecomp(iosysid + TEST_VAL_42, PIO_FLOAT, NDIM1, dim_len, elements_per_pe, - compdof, ioid, NULL, NULL, NULL) != PIO_EBADID) + if (PIOc_init_decomp(iosysid + TEST_VAL_42, PIO_FLOAT, NDIM1, dim_len, elements_per_pe, + compdof, ioid, 0, NULL, NULL) != PIO_EBADID) ERR(ERR_WRONG); - if (PIOc_InitDecomp(iosysid, PIO_FLOAT, NDIM1, bad_dim_len, elements_per_pe, - compdof, ioid, NULL, NULL, NULL) != PIO_EINVAL) + if (PIOc_init_decomp(iosysid, PIO_FLOAT, NDIM1, bad_dim_len, elements_per_pe, + compdof, ioid, 0, NULL, NULL) != PIO_EINVAL) ERR(ERR_WRONG); /* Create the PIO decomposition for this test. */ printf("%d Creating decomposition elements_per_pe = %lld\n", my_rank, elements_per_pe); - if ((ret = PIOc_InitDecomp(iosysid, PIO_FLOAT, NDIM1, dim_len, elements_per_pe, - compdof, ioid, NULL, NULL, NULL))) + if ((ret = PIOc_init_decomp(iosysid, PIO_FLOAT, NDIM1, dim_len, elements_per_pe, + compdof, ioid, 0, NULL, NULL))) ERR(ret); printf("%d decomposition initialized.\n", my_rank); @@ -315,7 +316,7 @@ int check_var_name(int my_rank, int ncid, MPI_Comm test_comm) * @param flavor the iotype * @param test_comm the MPI communicator of the test. * @param async 1 if we are testing async, 0 otherwise. - * @returns 0 for success, error code otherwise. + * @returns 0 for success, error code otherwise. */ int check_atts(int my_rank, int ncid, int flavor, MPI_Comm test_comm, int async) { @@ -391,7 +392,7 @@ int check_atts(int my_rank, int ncid, int flavor, MPI_Comm test_comm, int async) ERR(ret); if (att_int_value2 != ATT_VAL) return ERR_WRONG; - + /* Check second att. */ if ((ret = PIOc_inq_att(ncid, NC_GLOBAL, ATT_NAME2, &att_type, &att_len))) return ret; @@ -494,7 +495,7 @@ int test_iotypes(int my_rank) /* This is never present. */ if (PIOc_iotype_available(1000)) return ERR_WRONG; - + /* NetCDF is always present. */ if (!PIOc_iotype_available(PIO_IOTYPE_NETCDF)) return ERR_WRONG; @@ -740,26 +741,26 @@ int define_metadata(int ncid, int my_rank, int flavor) return ERR_WRONG; if (PIOc_def_var_fill(ncid, varid, NC_FILL, NULL) != PIO_EINVAL) return ERR_WRONG; - + /* Set the fill value. */ if ((ret = PIOc_def_var_fill(ncid, varid, NC_FILL, &int_fill))) return ret; - + /* These should not work. */ if (PIOc_inq_var_fill(ncid + TEST_VAL_42, varid, &fill_mode, &int_fill_in) != PIO_EBADID) return ERR_WRONG; if (PIOc_inq_var_fill(ncid, varid + TEST_VAL_42, &fill_mode, &int_fill_in) != PIO_ENOTVAR) return ERR_WRONG; - + /* Check the fill value. */ if ((ret = PIOc_inq_var_fill(ncid, varid, &fill_mode, &int_fill_in))) return ret; if (fill_mode != NC_FILL || int_fill_in != int_fill) ERR(ERR_WRONG); - + /* These should also work. */ int_fill_in = 0; - + /* This does not work for pnetcdf, but probably should. */ if (flavor != PIO_IOTYPE_PNETCDF) { @@ -886,10 +887,14 @@ int test_names(int iosysid, int num_flavors, int *flavor, int my_rank, return ERR_WRONG; if (PIOc_setframe(ncid, -1, 0) != PIO_EINVAL) return ERR_WRONG; + if (PIOc_setframe(ncid, NC_MAX_VARS + 1, 0) != PIO_EINVAL) + return ERR_WRONG; if (PIOc_advanceframe(ncid + TEST_VAL_42, 0) != PIO_EBADID) return ERR_WRONG; if (PIOc_advanceframe(ncid, -1) != PIO_EINVAL) return ERR_WRONG; + if (PIOc_advanceframe(ncid, NC_MAX_VARS + 1) != PIO_EINVAL) + return ERR_WRONG; /* Check the dimension names. */ if ((ret = check_dim_names(my_rank, ncid, test_comm))) @@ -1398,6 +1403,171 @@ int test_nc4(int iosysid, int num_flavors, int *flavor, int my_rank) return PIO_NOERR; } +/* This function is part of test_scalar(). It tests the contents of + * the scalar var. */ +int check_scalar_var(int ncid, int varid, int flavor) +{ + char var_name_in[PIO_MAX_NAME + 1]; + int var_type_in; + int ndims_in; + int natts_in; + int val_in; + int ret; + + /* Learn the var metadata. */ + if ((ret = PIOc_inq_var(ncid, varid, var_name_in, &var_type_in, &ndims_in, NULL, + &natts_in))) + return ret; + + /* Is the metadata correct? */ + if (strcmp(var_name_in, VAR_NAME) || var_type_in != PIO_INT || ndims_in != 0 || natts_in != 0) + return ERR_WRONG; + + /* Get the value. */ + if ((ret = PIOc_get_var_int(ncid, varid, &val_in))) + return ret; + printf("val_in = %d\n", val_in); + + /* Is the value correct? */ + if (val_in != TEST_VAL_42) + return ERR_WRONG; + + return 0; +} + +/* Test scalar vars. */ +int test_scalar(int iosysid, int num_flavors, int *flavor, int my_rank, int async, + MPI_Comm test_comm) +{ + int ncid; /* The ncid of the netCDF file. */ + int varid; /* The ID of the netCDF varable. */ + int ret; /* Return code. */ + + /* Use netCDF classic to create a file with a scalar var, then set + * and read the value. */ + if (my_rank == 0) + { + char test_file[] = "netcdf_test.nc"; + int test_val = TEST_VAL_42; + int test_val_in; + + if ((ret = nc_create(test_file, NC_CLOBBER, &ncid))) + return ret; + if ((ret = nc_def_var(ncid, VAR_NAME, NC_INT, 0, NULL, &varid))) + return ret; + if ((ret = nc_enddef(ncid))) + return ret; + if ((ret = nc_put_var(ncid, varid, &test_val))) + return ret; + if ((ret = nc_close(ncid))) + return ret; + if ((ret = nc_open(test_file, NC_NOWRITE, &ncid))) + return ret; + /* if ((ret = nc_get_var(ncid, varid, &test_val_in))) */ + /* return ret; */ + /* if (test_val_in != test_val) */ + /* return ERR_WRONG; */ + if ((ret = nc_get_vars(ncid, varid, NULL, NULL, NULL, &test_val_in))) + return ret; + if (test_val_in != test_val) + return ERR_WRONG; + if ((ret = nc_close(ncid))) + return ret; + } + + /* Use pnetCDF to create a file with a scalar var, then set and + * read the value. */ +#ifdef _PNETCDF + { + char test_file[] = "pnetcdf_test.nc"; + int test_val = TEST_VAL_42; + int test_val_in; + + if ((ret = ncmpi_create(test_comm, test_file, NC_CLOBBER, MPI_INFO_NULL, &ncid))) + return ret; + if ((ret = ncmpi_def_var(ncid, VAR_NAME, NC_INT, 0, NULL, &varid))) + return ret; + if ((ret = ncmpi_enddef(ncid))) + return ret; + if ((ret = ncmpi_put_var_int_all(ncid, varid, &test_val))) + return ret; + if ((ret = ncmpi_close(ncid))) + return ret; + if ((ret = ncmpi_open(test_comm, test_file, NC_NOWRITE, MPI_INFO_NULL, &ncid))) + return ret; + /* Turn on independent access for pnetcdf file. */ + if ((ret = ncmpi_begin_indep_data(ncid))) + return ret; + /* if ((ret = ncmpi_get_var_int(ncid, varid, &test_val_in))) */ + /* return ret; */ + if ((ret = ncmpi_get_vars_int(ncid, varid, NULL, NULL, NULL, &test_val_in))) + return ret; + if ((ret = ncmpi_end_indep_data(ncid))) + return ret; + if (test_val_in != test_val) + return ERR_WRONG; + printf("ret = %d test_val_in = %d\n", ret, test_val_in); + if (test_val_in != test_val) + return ERR_WRONG; + if ((ret = ncmpi_close(ncid))) + return ret; + } +#endif /* _PNETCDF */ + + /* Use PIO to create the example file in each of the four + * available ways. */ + for (int fmt = 0; fmt < num_flavors; fmt++) + { + char filename[PIO_MAX_NAME + 1]; /* Test filename. */ + char iotype_name[PIO_MAX_NAME + 1]; + + /* Create a filename. */ + if ((ret = get_iotype_name(flavor[fmt], iotype_name))) + return ret; + sprintf(filename, "%s_%s_scalar_async_%d.nc", TEST_NAME, iotype_name, async); + + /* Create the netCDF output file. */ + printf("%d Creating test file %s.\n", my_rank, filename); + if ((ret = PIOc_createfile(iosysid, &ncid, &(flavor[fmt]), filename, PIO_CLOBBER))) + ERR(ret); + + /* Define a scalar variable. */ + if ((ret = PIOc_def_var(ncid, VAR_NAME, PIO_INT, 0, NULL, &varid))) + ERR(ret); + + /* End define mode. */ + if ((ret = PIOc_enddef(ncid))) + ERR(ret); + + /* Write a scalar value. */ + int test_val = TEST_VAL_42; + if ((ret = PIOc_put_var_int(ncid, varid, &test_val))) + ERR(ret); + + /* Check the scalar var. */ + if ((ret = check_scalar_var(ncid, varid, flavor[fmt]))) + ERR(ret); + + /* Close the netCDF file. */ + printf("%d Closing the sample data file...\n", my_rank); + if ((ret = PIOc_closefile(ncid))) + ERR(ret); + + /* Reopen the file. */ + if ((ret = PIOc_openfile(iosysid, &ncid, &(flavor[fmt]), filename, PIO_NOWRITE))) + ERR(ret); + + /* Check the scalar var again. */ + if ((ret = check_scalar_var(ncid, varid, flavor[fmt]))) + ERR(ret); + + /* Close the netCDF file. */ + if ((ret = PIOc_closefile(ncid))) + ERR(ret); + } + return PIO_NOERR; +} + /** Test the malloc_iodesc() function. * * @param my_rank rank of this task. @@ -1411,11 +1581,13 @@ int test_malloc_iodesc2(int iosysid, int my_rank) #else int num_types = NUM_CLASSIC_TYPES; #endif /* _NETCDF4 */ - int test_type[NUM_NETCDF_TYPES] = {PIO_BYTE, PIO_CHAR, PIO_SHORT, PIO_INT, PIO_FLOAT, PIO_DOUBLE, - PIO_UBYTE, PIO_USHORT, PIO_UINT, PIO_INT64, PIO_UINT64, PIO_STRING}; - int mpi_type[NUM_NETCDF_TYPES] = {MPI_BYTE, MPI_CHAR, MPI_SHORT, MPI_INT, MPI_FLOAT, MPI_DOUBLE, - MPI_UNSIGNED_CHAR, MPI_UNSIGNED_SHORT, MPI_UNSIGNED, MPI_LONG_LONG, - MPI_UNSIGNED_LONG_LONG, MPI_CHAR}; + int test_type[NUM_NETCDF_TYPES] = {PIO_BYTE, PIO_CHAR, PIO_SHORT, PIO_INT, + PIO_FLOAT, PIO_DOUBLE, PIO_UBYTE, PIO_USHORT, + PIO_UINT, PIO_INT64, PIO_UINT64, PIO_STRING}; + MPI_Datatype mpi_type[NUM_NETCDF_TYPES] = {MPI_BYTE, MPI_CHAR, MPI_SHORT, MPI_INT, + MPI_FLOAT, MPI_DOUBLE, MPI_UNSIGNED_CHAR, + MPI_UNSIGNED_SHORT, MPI_UNSIGNED, MPI_LONG_LONG, + MPI_UNSIGNED_LONG_LONG, MPI_CHAR}; int ioid; iosystem_desc_t *ios; io_desc_t *iodesc; @@ -1423,10 +1595,11 @@ int test_malloc_iodesc2(int iosysid, int my_rank) if (!(ios = pio_get_iosystem_from_id(iosysid))) return pio_err(NULL, NULL, PIO_EBADID, __FILE__, __LINE__); - + printf("test_malloc_iodesc2 num_types %d\n",num_types); /* Test with each type. */ for (int t = 0; t < num_types; t++) { + if ((ret = malloc_iodesc(ios, test_type[t], 1, &iodesc))) return ret; if (iodesc->basetype != mpi_type[t]) @@ -1439,7 +1612,6 @@ int test_malloc_iodesc2(int iosysid, int my_rank) if ((ret = pio_delete_iodesc_from_list(ioid))) return ret; } - return 0; } @@ -1450,7 +1622,7 @@ int test_decomp_internal(int my_test_size, int my_rank, int iosysid, int dim_len int ioid; char filename[NC_MAX_NAME + 1]; /* Test decomp filename. */ char nc_filename[NC_MAX_NAME + 1]; /* Test decomp filename (netcdf version). */ - char too_long_name[PIO_MAX_NAME * 5 + 1]; + iosystem_desc_t *ios; /* IO system info. */ int ret; /* This will be our file name for writing out decompositions. */ @@ -1472,37 +1644,12 @@ int test_decomp_internal(int my_test_size, int my_rank, int iosysid, int dim_len int task_maplen[TARGET_NTASKS] = {1, 1, 1, 1}; int map[TARGET_NTASKS][1] = {{0},{1},{2},{3}}; - /* These should not work. */ - memset(too_long_name, 74, PIO_MAX_NAME * 5); - too_long_name[PIO_MAX_NAME * 5] = 0; - if (pioc_write_nc_decomp_int(iosysid + TEST_VAL_42, nc_filename, 0, NDIM1, global_dimlen, - TARGET_NTASKS, task_maplen, (int *)map, title, - history, 0) != PIO_EBADID) - return ERR_WRONG; - if (pioc_write_nc_decomp_int(iosysid, NULL, 0, NDIM1, global_dimlen, - TARGET_NTASKS, task_maplen, (int *)map, title, - history, 0) != PIO_EINVAL) - return ERR_WRONG; - if (pioc_write_nc_decomp_int(iosysid, nc_filename, 0, NDIM1, NULL, - TARGET_NTASKS, task_maplen, (int *)map, title, - history, 0) != PIO_EINVAL) - return ERR_WRONG; - if (pioc_write_nc_decomp_int(iosysid, nc_filename, 0, NDIM1, global_dimlen, - TARGET_NTASKS, NULL, (int *)map, title, - history, 0) != PIO_EINVAL) - return ERR_WRONG; - if (pioc_write_nc_decomp_int(iosysid, nc_filename, 0, NDIM1, global_dimlen, - TARGET_NTASKS, task_maplen, (int *)map, too_long_name, - history, 0) != PIO_EINVAL) - return ERR_WRONG; - if (pioc_write_nc_decomp_int(iosysid, nc_filename, 0, NDIM1, global_dimlen, - TARGET_NTASKS, task_maplen, (int *)map, title, - too_long_name, 0) != PIO_EINVAL) - return ERR_WRONG; - + /* Get the IO system info. */ + if (!(ios = pio_get_iosystem_from_id(iosysid))) + return pio_err(NULL, NULL, PIO_EBADID, __FILE__, __LINE__); /* Write the decomposition file. */ - if ((ret = pioc_write_nc_decomp_int(iosysid, nc_filename, 0, NDIM1, global_dimlen, + if ((ret = pioc_write_nc_decomp_int(ios, nc_filename, 0, NDIM1, global_dimlen, TARGET_NTASKS, task_maplen, (int *)map, title, history, 0))) return ret; @@ -1515,7 +1662,7 @@ int test_decomp_internal(int my_test_size, int my_rank, int iosysid, int dim_len char source_in[PIO_MAX_NAME + 1]; char version_in[PIO_MAX_NAME + 1]; char expected_source[] = "Decomposition file produced by PIO library."; - int *global_dimlen_in; + int *global_dimlen_in; int *task_maplen_in; int *map_in; int fortran_order_in; @@ -1540,6 +1687,7 @@ int test_decomp_internal(int my_test_size, int my_rank, int iosysid, int dim_len history_in, source_in, version_in, &fortran_order_in))) return ret; + /* Did we get the correct answers? */ printf("source_in = %s\n", source_in); if (strcmp(title, title_in) || strcmp(history, history_in) || @@ -1650,7 +1798,6 @@ int test_decomp_internal(int my_test_size, int my_rank, int iosysid, int dim_len free(task_maplen_in); free(map_in); - /* Free the PIO decomposition. */ if ((ret = PIOc_freedecomp(iosysid, ioid))) ERR(ret); @@ -1665,7 +1812,7 @@ int test_decomp_public(int my_test_size, int my_rank, int iosysid, int dim_len, int ioid; char nc_filename[NC_MAX_NAME + 1]; /* Test decomp filename (netcdf version). */ int ret; - + /* This will be our file name for writing out decompositions. */ sprintf(nc_filename, "nc_decomp_%s_rank_%d_async_%d.nc", TEST_NAME, my_rank, async); @@ -1679,15 +1826,29 @@ int test_decomp_public(int my_test_size, int my_rank, int iosysid, int dim_len, char *history = "Added to PIO automatic testing by Ed in February 2017."; /* These should not work. */ - if (PIOc_write_nc_decomp(iosysid + TEST_VAL_42, nc_filename, 0, ioid, test_comm, title, history, 0) != PIO_EBADID) + char too_long_name[PIO_MAX_NAME * 5 + 1]; + memset(too_long_name, 74, PIO_MAX_NAME * 5); + too_long_name[PIO_MAX_NAME * 5] = 0; + + if (PIOc_write_nc_decomp(iosysid + TEST_VAL_42, nc_filename, 0, ioid, + title, history, 0) != PIO_EBADID) return ERR_WRONG; - if (PIOc_write_nc_decomp(iosysid, NULL, 0, ioid, test_comm, title, history, 0) != PIO_EINVAL) + if (PIOc_write_nc_decomp(iosysid, NULL, 0, ioid, title, history, 0) != PIO_EINVAL) return ERR_WRONG; - if (PIOc_write_nc_decomp(iosysid, nc_filename, 0, ioid + TEST_VAL_42, test_comm, title, history, 0) != PIO_EBADID) + if (PIOc_write_nc_decomp(iosysid, nc_filename, 0, ioid + TEST_VAL_42, + title, history, 0) != PIO_EBADID) + return ERR_WRONG; + + if (PIOc_write_nc_decomp(iosysid, nc_filename, 0, ioid, + too_long_name, history, 0) != PIO_EINVAL) + return ERR_WRONG; + if (PIOc_write_nc_decomp(iosysid, nc_filename, 0, ioid, + title, too_long_name, 0) != PIO_EINVAL) return ERR_WRONG; /* Write a netCDF decomp file for this iosystem. */ - if ((ret = PIOc_write_nc_decomp(iosysid, nc_filename, 0, ioid, test_comm, title, history, 0))) + if ((ret = PIOc_write_nc_decomp(iosysid, nc_filename, 0, ioid, title, + history, 0))) return ret; int ioid_in; @@ -1696,8 +1857,8 @@ int test_decomp_public(int my_test_size, int my_rank, int iosysid, int dim_len, int fortran_order_in; /* These should not work. */ - if (PIOc_read_nc_decomp(iosysid + TEST_VAL_42, nc_filename, &ioid_in, test_comm, PIO_INT, - title_in, history_in, &fortran_order_in) != PIO_EBADID) + if (PIOc_read_nc_decomp(iosysid + TEST_VAL_42, nc_filename, &ioid_in, test_comm, + PIO_INT, title_in, history_in, &fortran_order_in) != PIO_EBADID) return ret; if (PIOc_read_nc_decomp(iosysid, NULL, &ioid_in, test_comm, PIO_INT, title_in, history_in, &fortran_order_in) != PIO_EINVAL) @@ -1705,7 +1866,7 @@ int test_decomp_public(int my_test_size, int my_rank, int iosysid, int dim_len, if (PIOc_read_nc_decomp(iosysid, nc_filename, NULL, test_comm, PIO_INT, title_in, history_in, &fortran_order_in) != PIO_EINVAL) return ret; - + /* Read it using the public read function. */ if ((ret = PIOc_read_nc_decomp(iosysid, nc_filename, &ioid_in, test_comm, PIO_INT, title_in, history_in, &fortran_order_in))) @@ -1780,9 +1941,88 @@ int test_decomp_public(int my_test_size, int my_rank, int iosysid, int dim_len, free(map_in); /* /\* These should also work. *\/ */ - /* if ((ret = PIOc_write_nc_decomp(iosysid, nc_filename, 0, ioid, test_comm, title, history, 0))) */ + /* if ((ret = PIOc_write_nc_decomp(iosysid, nc_filename, 0, ioid, title, history, 0))) */ /* return ret; */ + + /* Free the PIO decomposition. */ + if ((ret = PIOc_freedecomp(iosysid, ioid))) + ERR(ret); + + return 0; +} + +/* Test some decomp public API functions. */ +int test_decomp_public_2(int my_test_size, int my_rank, int iosysid, int dim_len, + MPI_Comm test_comm, int async) +{ + int ioid; + char nc_filename[NC_MAX_NAME + 1]; /* Test decomp filename (netcdf version). */ + int ret; + + /* This will be our file name for writing out decompositions. */ + sprintf(nc_filename, "nc_decomp_%s_rank_%d_async_%d.nc", TEST_NAME, my_rank, async); + + /* Decompose the data over the tasks. */ + if ((ret = create_decomposition(my_test_size, my_rank, iosysid, dim_len, &ioid))) + return ret; + + /* Write a netCDF decomp file for this iosystem. */ + if ((ret = PIOc_write_nc_decomp(iosysid, nc_filename, 0, ioid, NULL, NULL, 0))) + return ret; + + /* Free the PIO decomposition. */ + if ((ret = PIOc_freedecomp(iosysid, ioid))) + ERR(ret); + + return 0; +} + +/* Test some decomp public API functions. */ +int test_decomp_2(int my_test_size, int my_rank, int iosysid, int dim_len, + MPI_Comm test_comm, int async) +{ + int ioid; + char nc_filename[NC_MAX_NAME + 1]; /* Test decomp filename (netcdf version). */ + int ret; + + /* This will be our file name for writing out decompositions. */ + sprintf(nc_filename, "nc_decomp_%s_rank_%d_async_%d.nc", TEST_NAME, my_rank, async); + + /* Decompose the data over the tasks. */ + if ((ret = create_decomposition(my_test_size, my_rank, iosysid, dim_len, &ioid))) + return ret; + + /* Free the PIO decomposition. */ + if ((ret = PIOc_freedecomp(iosysid, ioid))) + ERR(ret); + + return 0; +} + +/* Test some decomp public API functions with async. */ +int test_decomp_public_async(int my_test_size, int my_rank, int iosysid, MPI_Comm test_comm, + int async) +{ +#define ELEM1 1 +#define LEN3 3 + int ioid; + int dim_len = LEN3; + PIO_Offset elements_per_pe = ELEM1; + PIO_Offset compdof[ELEM1] = {my_rank + 1}; + char filename[PIO_MAX_NAME + 1]; + int ret; + + sprintf(filename, "async_decomp_%s_rank_%d_async_%d.nc", TEST_NAME, my_rank, async); + /* Create the PIO decomposition for this test. */ + if ((ret = PIOc_init_decomp(iosysid, PIO_FLOAT, NDIM1, &dim_len, elements_per_pe, + compdof, &ioid, PIO_REARR_BOX, NULL, NULL))) + ERR(ret); + + /* Write the decomp file (on appropriate tasks). */ + if ((ret = PIOc_write_nc_decomp(iosysid, filename, 0, ioid, NULL, NULL, 0))) + return ret; + /* Free the PIO decomposition. */ if ((ret = PIOc_freedecomp(iosysid, ioid))) ERR(ret); @@ -1797,6 +2037,7 @@ int test_all(int iosysid, int num_flavors, int *flavor, int my_rank, MPI_Comm te int ioid; int my_test_size; char filename[NC_MAX_NAME + 1]; + char nc_filename[NC_MAX_NAME + 1]; int ret; /* Return code. */ if ((ret = MPI_Comm_size(test_comm, &my_test_size))) @@ -1804,7 +2045,14 @@ int test_all(int iosysid, int num_flavors, int *flavor, int my_rank, MPI_Comm te /* This will be our file name for writing out decompositions. */ sprintf(filename, "decomp_%d.txt", my_rank); + sprintf(nc_filename, "decomp_%d.nc", my_rank); + /* This is a simple test that just creates the decomp with + * async. */ + if (async) + if ((ret = test_decomp_public_async(my_test_size, my_rank, iosysid, test_comm, async))) + return ret; + /* Check iotypes. */ printf("%d Testing iotypes. async = %d\n", my_rank, async); if ((ret = test_iotypes(my_rank))) @@ -1828,25 +2076,25 @@ int test_all(int iosysid, int num_flavors, int *flavor, int my_rank, MPI_Comm te if (!async) if ((ret = test_decomp_internal(my_test_size, my_rank, iosysid, DIM_LEN, test_comm, async))) return ret; - /* Test decomposition public API functions. */ if (!async) if ((ret = test_decomp_public(my_test_size, my_rank, iosysid, DIM_LEN, test_comm, async))) return ret; + /* This is a simple test that just creates a decomp. */ + /* if ((ret = test_decomp_2(my_test_size, my_rank, iosysid, DIM_LEN, test_comm, async))) */ + /* return ret; */ + + /* This is a simple test that just writes the decomp. */ if (!async) - { - printf("%d Testing darray. async = %d\n", my_rank, async); - - /* Decompose the data over the tasks. */ - if ((ret = create_decomposition(my_test_size, my_rank, iosysid, DIM_LEN, &ioid))) + if ((ret = test_decomp_public_2(my_test_size, my_rank, iosysid, DIM_LEN, test_comm, async))) return ret; - /* Write out an ASCII version of the decomp file. */ - printf("%d Calling write_decomp. async = %d\n", my_rank, async); - if ((ret = PIOc_write_decomp(filename, iosysid, ioid, test_comm))) + /* Decompose the data over the tasks. */ + if (!async) + { + if ((ret = create_decomposition(my_test_size, my_rank, iosysid, DIM_LEN, &ioid))) return ret; - printf("%d Called write_decomp. async = %d\n", my_rank, async); /* Run the darray tests. */ for (int fv = 0; fv < 2; fv++) @@ -1872,6 +2120,11 @@ int test_all(int iosysid, int num_flavors, int *flavor, int my_rank, MPI_Comm te printf("%d Testing nc4 functions. async = %d\n", my_rank, async); if ((ret = test_nc4(iosysid, num_flavors, flavor, my_rank))) return ret; + + /* Test scalar var. */ + printf("%d Testing scalar var. async = %d\n", my_rank, async); + if ((ret = test_scalar(iosysid, num_flavors, flavor, my_rank, async, test_comm))) + return ret; return PIO_NOERR; } @@ -1880,6 +2133,6 @@ int test_all(int iosysid, int num_flavors, int *flavor, int my_rank, MPI_Comm te int main(int argc, char **argv) { /* Change the 5th arg to 3 to turn on logging. */ - return run_test_main(argc, argv, MIN_NTASKS, TARGET_NTASKS, 0, + return run_test_main(argc, argv, MIN_NTASKS, TARGET_NTASKS, 3, TEST_NAME, dim_len, COMPONENT_COUNT, NUM_IO_PROCS); } diff --git a/src/externals/pio2/tests/cunit/test_pioc_fill.c b/src/externals/pio2/tests/cunit/test_pioc_fill.c index e2015dee7f0f..c699a566be50 100644 --- a/src/externals/pio2/tests/cunit/test_pioc_fill.c +++ b/src/externals/pio2/tests/cunit/test_pioc_fill.c @@ -440,14 +440,18 @@ int create_putget_file(int iosysid, int flavor, int *dim_len, int *varid, const int xtype[NUM_NETCDF_TYPES] = {PIO_BYTE, PIO_CHAR, PIO_SHORT, PIO_INT, PIO_FLOAT, PIO_DOUBLE, PIO_UBYTE, PIO_USHORT, PIO_UINT, PIO_INT64, PIO_UINT64, PIO_STRING}; int ncid; + int old_mode; int ret; /* Create the netCDF output file. */ if ((ret = PIOc_createfile(iosysid, &ncid, &flavor, filename, PIO_CLOBBER))) return ret; + /* This should not work. */ + if (PIOc_set_fill(ncid + TEST_VAL_42, NC_FILL, &old_mode) != PIO_EBADID) + return ret; + /* Turn on fill mode. */ - int old_mode; if ((ret = PIOc_set_fill(ncid, NC_FILL, &old_mode))) return ret; printf("old_mode = %d\n", old_mode); diff --git a/src/externals/pio2/tests/cunit/test_rearr.c b/src/externals/pio2/tests/cunit/test_rearr.c new file mode 100644 index 000000000000..89e278bfc140 --- /dev/null +++ b/src/externals/pio2/tests/cunit/test_rearr.c @@ -0,0 +1,1542 @@ +/* + * This program tests some internal functions in the library related + * to the box and subset rearranger, and the transfer of data betweeen + * IO and computation tasks. + * + * Ed Hartnett, 3/9/17 + */ +#include +#include +#include + +/* The number of tasks this test should run on. */ +#define TARGET_NTASKS 4 + +/* The minimum number of tasks this test should run on. */ +#define MIN_NTASKS 1 + +/* The name of this test. */ +#define TEST_NAME "test_rearr" + +/* For 1-D use. */ +#define NDIM1 1 + +/* For maplens of 2. */ +#define MAPLEN2 2 + +/* Name of test var. (Name of a Welsh town.)*/ +#define VAR_NAME "Llanfairpwllgwyngyllgogerychwyrndrobwllllantysiliogogogoch" + +/* Test some of the rearranger utility functions. */ +int test_rearranger_opts1(int iosysid) +{ + iosystem_desc_t *ios; + int ret; + + /* This should not work. */ + if (PIOc_set_rearr_opts(TEST_VAL_42, 0, 0, false, false, 0, false, + false, 0) != PIO_EBADID) + return ERR_WRONG; + if (PIOc_set_rearr_opts(iosysid, TEST_VAL_42, 0, false, false, 0, false, + false, 0) != PIO_EINVAL) + return ERR_WRONG; + if (PIOc_set_rearr_opts(iosysid, 0, TEST_VAL_42, false, false, 0, false, + false, 0) != PIO_EINVAL) + return ERR_WRONG; + if (PIOc_set_rearr_opts(iosysid, 0, 0, false, false, + PIO_REARR_COMM_UNLIMITED_PEND_REQ - 1, false, + false, 0) != PIO_EINVAL) + return ERR_WRONG; + if (PIOc_set_rearr_opts(iosysid, 0, 0, false, false, 0, false, + false, PIO_REARR_COMM_UNLIMITED_PEND_REQ - 1) != + PIO_EINVAL) + return ERR_WRONG; + + /* This should work. */ + if ((ret = PIOc_set_rearr_opts(iosysid, PIO_REARR_COMM_P2P, + PIO_REARR_COMM_FC_1D_COMP2IO, true, + true, TEST_VAL_42, true, true, TEST_VAL_42 + 1))) + return ret; + + /* Get the IO system info from the id. */ + if (!(ios = pio_get_iosystem_from_id(iosysid))) + return pio_err(NULL, NULL, PIO_EBADID, __FILE__, __LINE__); + + /* Check the rearranger settings. */ + if (ios->rearr_opts.comm_type != PIO_REARR_COMM_P2P || + ios->rearr_opts.fcd != PIO_REARR_COMM_FC_1D_COMP2IO || + !ios->rearr_opts.comp2io.hs || !ios->rearr_opts.comp2io.isend || + !ios->rearr_opts.io2comp.hs || !ios->rearr_opts.io2comp.isend || + ios->rearr_opts.comp2io.max_pend_req != TEST_VAL_42 || + ios->rearr_opts.io2comp.max_pend_req != TEST_VAL_42 + 1) + return ERR_WRONG; + + return 0; +} + +/* Test the compare_offsets() function. */ +int test_compare_offsets() +{ + mapsort m1, m2, m3; + + m1.rfrom = 0; + m1.soffset = 0; + m1.iomap = 0; + m2.rfrom = 0; + m2.soffset = 0; + m2.iomap = 0; + m3.rfrom = 0; + m3.soffset = 0; + m3.iomap = 1; + + /* Return 0 if either or both parameters are null. */ + if (compare_offsets(NULL, &m2)) + return ERR_WRONG; + if (compare_offsets(&m1, NULL)) + return ERR_WRONG; + if (compare_offsets(NULL, NULL)) + return ERR_WRONG; + + /* m1 and m2 are the same. */ + if (compare_offsets(&m1, &m2)) + return ERR_WRONG; + + /* m1 and m3 are different. */ + if (compare_offsets(&m1, &m3) != -1) + return ERR_WRONG; + return 0; +} + +/* Test the ceil2() and pair() functions. */ +int test_ceil2_pair() +{ + /* Test the ceil2() function. */ + if (ceil2(1) != 1) + return ERR_WRONG; + if (ceil2(-100) != 1) + return ERR_WRONG; + if (ceil2(2) != 2) + return ERR_WRONG; + if (ceil2(3) != 4) + return ERR_WRONG; + if (ceil2(16) != 16) + return ERR_WRONG; + if (ceil2(17) != 32) + return ERR_WRONG; + + /* Test the pair() function. */ + if (pair(4, 0, 0) != 1) + return ERR_WRONG; + if (pair(4, 2, 2) != 1) + return ERR_WRONG; + + return 0; +} + +/* Test the create_mpi_datatypes() function. + * @returns 0 for success, error code otherwise.*/ +int test_create_mpi_datatypes() +{ + MPI_Datatype basetype = MPI_INT; + int *mfrom = NULL; + int mpierr; + int ret; + + { + int msgcnt = 1; + PIO_Offset mindex[1] = {0}; + int mcount[1] = {1}; + MPI_Datatype mtype; + + /* Create an MPI data type. */ + if ((ret = create_mpi_datatypes(basetype, msgcnt, mindex, mcount, mfrom, &mtype))) + return ret; + + /* Free the type. */ + if ((mpierr = MPI_Type_free(&mtype))) + MPIERR(mpierr); + } + + { + int msgcnt = 4; + PIO_Offset mindex[4] = {0, 0, 0, 0}; + int mcount[4] = {1, 1, 1, 1}; + MPI_Datatype mtype2[4]; + + /* Create 4 MPI data types. */ + if ((ret = create_mpi_datatypes(basetype, msgcnt, mindex, mcount, mfrom, mtype2))) + return ret; + + /* Check the size of the data types. It should be 4. */ + MPI_Aint lb, extent; + for (int t = 0; t < 4; t++) + { + if ((mpierr = MPI_Type_get_extent(mtype2[t], &lb, &extent))) + MPIERR(mpierr); + printf("t = %d lb = %ld extent = %ld\n", t, lb, extent); + if (lb != 0 || extent != 4) + return ERR_WRONG; + } + + /* Free them. */ + for (int t = 0; t < 4; t++) + if ((mpierr = MPI_Type_free(&mtype2[t]))) + return ERR_WRONG; + } + + return 0; +} + +/* Test the idx_to_dim_list() function. */ +int test_idx_to_dim_list() +{ + int ndims = 1; + int gdims[1] = {1}; + PIO_Offset idx = 0; + PIO_Offset dim_list[1]; + + /* This simplest case. */ + idx_to_dim_list(ndims, gdims, idx, dim_list); + + if (dim_list[0] != 0) + return ERR_WRONG; + + /* The case given in the function docs. */ + int ndims2 = 2; + int gdims2[2] = {3, 2}; + PIO_Offset idx2 = 4; + PIO_Offset dim_list2[2]; + + /* According to function docs, we should get 2,0 */ + idx_to_dim_list(ndims2, gdims2, idx2, dim_list2); + printf("dim_list2[0] = %lld\n", dim_list2[0]); + printf("dim_list2[1] = %lld\n", dim_list2[1]); + + /* This is the correct result! */ + if (dim_list2[0] != 2 || dim_list2[1] != 0) + return ERR_WRONG; + + return 0; +} + +/* Test the coord_to_lindex() function. */ +int test_coord_to_lindex() +{ + int ndims = 1; + PIO_Offset lcoord[1] = {0}; + PIO_Offset count[1] = {1}; + PIO_Offset lindex; + + /* Not sure what this function is really doing. */ + lindex = coord_to_lindex(ndims, lcoord, count); + if (lindex != 0) + return ERR_WRONG; + + int ndims2 = 2; + PIO_Offset lcoord2[2] = {0, 0}; + PIO_Offset count2[2] = {1, 1}; + PIO_Offset lindex2; + + lindex2 = coord_to_lindex(ndims2, lcoord2, count2); + if (lindex2 != 0) + return ERR_WRONG; + + int ndims3 = 2; + PIO_Offset lcoord3[2] = {1, 2}; + PIO_Offset count3[2] = {1, 1}; + PIO_Offset lindex3; + + lindex3 = coord_to_lindex(ndims3, lcoord3, count3); + printf("lindex = %lld\n", lindex3); + if (lindex3 != 3) + return ERR_WRONG; + + return 0; +} + +/* Test compute_maxIObuffersize() function. */ +int test_compute_maxIObuffersize(MPI_Comm test_comm, int my_rank) +{ + int ret; + + { + /* This is a simple test with one region containing 1 data + * element. */ + io_desc_t iodesc; + io_region *ior1; + int ndims = 1; + + /* This is how we allocate a region. */ + if ((ret = alloc_region2(NULL, ndims, &ior1))) + return ret; + ior1->next = NULL; + ior1->count[0] = 1; + + iodesc.firstregion = ior1; + iodesc.ndims = 1; + + /* Run the function. Simplest possible case. */ + if ((ret = compute_maxIObuffersize(test_comm, &iodesc))) + return ret; + if (iodesc.maxiobuflen != 1) + return ERR_WRONG; + + /* Free resources for the region. */ + free(ior1->start); + free(ior1->count); + free(ior1); + + } + + { + /* This also has a single region, but with 2 dims and count + * values > 1. */ + io_desc_t iodesc; + io_region *ior2; + int ndims = 2; + + /* This is how we allocate a region. */ + if ((ret = alloc_region2(NULL, ndims, &ior2))) + return ret; + + /* These should be 0. */ + for (int i = 0; i < ndims; i++) + if (ior2->start[i] != 0 || ior2->count[i] != 0) + return ERR_WRONG; + + ior2->next = NULL; + ior2->count[0] = 10; + ior2->count[1] = 2; + + iodesc.firstregion = ior2; + iodesc.ndims = 2; + + /* Run the function. */ + if ((ret = compute_maxIObuffersize(test_comm, &iodesc))) + return ret; + if (iodesc.maxiobuflen != 20) + return ERR_WRONG; + + /* Free resources for the region. */ + free(ior2->start); + free(ior2->count); + free(ior2); + } + + { + /* This test has two regions of different sizes. */ + io_desc_t iodesc; + io_region *ior3; + io_region *ior4; + int ndims = 2; + + /* This is how we allocate a region. */ + if ((ret = alloc_region2(NULL, ndims, &ior4))) + return ret; + ior4->next = NULL; + ior4->count[0] = 10; + ior4->count[1] = 2; + + if ((ret = alloc_region2(NULL, ndims, &ior3))) + return ret; + ior3->next = ior4; + ior3->count[0] = 100; + ior3->count[1] = 5; + + iodesc.firstregion = ior3; + iodesc.ndims = 2; + + /* Run the function. */ + if ((ret = compute_maxIObuffersize(test_comm, &iodesc))) + return ret; + printf("iodesc.maxiobuflen = %d\n", iodesc.maxiobuflen); + if (iodesc.maxiobuflen != 520) + return ERR_WRONG; + + /* Free resources for the region. */ + free(ior4->start); + free(ior4->count); + free(ior4); + free(ior3->start); + free(ior3->count); + free(ior3); + } + + return 0; +} + +/* Tests for determine_fill() function. */ +int test_determine_fill(MPI_Comm test_comm) +{ + iosystem_desc_t *ios; + io_desc_t *iodesc; + int gsize[1] = {4}; + PIO_Offset compmap[1] = {1}; + int ret; + + /* Initialize ios. */ + if (!(ios = calloc(1, sizeof(iosystem_desc_t)))) + return PIO_ENOMEM; + ios->union_comm = test_comm; + + /* Set up iodesc for test. */ + if (!(iodesc = calloc(1, sizeof(io_desc_t)))) + return PIO_ENOMEM; + iodesc->ndims = 1; + iodesc->rearranger = PIO_REARR_SUBSET; + iodesc->llen = 1; + + /* We don't need fill. */ + if ((ret = determine_fill(ios, iodesc, gsize, compmap))) + return ret; + if (iodesc->needsfill) + return ERR_WRONG; + + /* Change settings, so now we do need fill. */ + iodesc->llen = 0; + if ((ret = determine_fill(ios, iodesc, gsize, compmap))) + return ret; + if (!iodesc->needsfill) + return ERR_WRONG; + + /* Free test resources. */ + free(ios); + free(iodesc); + + return 0; +} + +/* Run tests for get_start_and_count_regions() funciton. */ +int test_get_regions(int my_rank) +{ +#define MAPLEN 2 + int ndims = NDIM1; + const int gdimlen[NDIM1] = {8}; + /* Don't forget map is 1-based!! */ + PIO_Offset map[MAPLEN] = {(my_rank * 2) + 1, ((my_rank + 1) * 2) + 1}; + int maxregions; + io_region *ior1; + int ret; + + /* This is how we allocate a region. */ + if ((ret = alloc_region2(NULL, NDIM1, &ior1))) + return ret; + ior1->next = NULL; + ior1->count[0] = 1; + + /* Call the function we are testing. */ + if ((ret = get_regions(ndims, gdimlen, MAPLEN, map, &maxregions, ior1))) + return ret; + if (maxregions != 2) + return ERR_WRONG; + + /* Free resources for the region. */ + free(ior1->next->start); + free(ior1->next->count); + free(ior1->next); + free(ior1->start); + free(ior1->count); + free(ior1); + + return 0; +} + +/* Run tests for find_region() function. */ +int test_find_region() +{ + int ndims = NDIM1; + int gdimlen[NDIM1] = {4}; + int maplen = 1; + PIO_Offset map[1] = {1}; + PIO_Offset start[NDIM1]; + PIO_Offset count[NDIM1]; + PIO_Offset regionlen; + + /* Call the function we are testing. */ + regionlen = find_region(ndims, gdimlen, maplen, map, start, count); + + /* Check results. */ + printf("regionlen = %lld start[0] = %lld count[0] = %lld\n", regionlen, start[0], count[0]); + if (regionlen != 1 || start[0] != 0 || count[0] != 1) + return ERR_WRONG; + + return 0; +} + +/* Run tests for expand_region() function. */ +int test_expand_region() +{ + int dim = 0; + int gdims[NDIM1] = {1}; + int maplen = 1; + PIO_Offset map[1] = {5}; + int region_size = 1; + int region_stride = 1; + int max_size[NDIM1] = {10}; + PIO_Offset count[NDIM1]; + + expand_region(dim, gdims, maplen, map, region_size, region_stride, max_size, count); + if (count[0] != 1) + return ERR_WRONG; + printf("max_size[0] = %d count[0] = %lld\n", max_size[0], count[0]); + + return 0; +} + +/* Test define_iodesc_datatypes() function. */ +int test_define_iodesc_datatypes() +{ +#define NUM_REARRANGERS 2 + int rearranger[NUM_REARRANGERS] = {PIO_REARR_BOX, PIO_REARR_SUBSET}; + int mpierr; + int ret; + + /* Run the functon. */ + for (int r = 0; r < NUM_REARRANGERS; r++) + { + iosystem_desc_t ios; + io_desc_t iodesc; + + /* Set up test for IO task with BOX rearranger to create one type. */ + ios.ioproc = 1; /* this is IO proc. */ + ios.num_iotasks = 4; /* The number of IO tasks. */ + iodesc.rtype = NULL; /* Array of MPI types will be created here. */ + iodesc.nrecvs = 1; /* Number of types created. */ + iodesc.basetype = MPI_INT; + iodesc.stype = NULL; /* Array of MPI types will be created here. */ + + /* Allocate space for arrays in iodesc that will be filled in + * define_iodesc_datatypes(). */ + if (!(iodesc.rcount = malloc(iodesc.nrecvs * sizeof(int)))) + return PIO_ENOMEM; + if (!(iodesc.rfrom = malloc(iodesc.nrecvs * sizeof(int)))) + return PIO_ENOMEM; + if (!(iodesc.rindex = malloc(1 * sizeof(PIO_Offset)))) + return PIO_ENOMEM; + iodesc.rindex[0] = 0; + iodesc.rcount[0] = 1; + + iodesc.rearranger = rearranger[r]; + + /* The two rearrangers create a different number of send types. */ + int num_send_types = iodesc.rearranger == PIO_REARR_BOX ? ios.num_iotasks : 1; + + if (!(iodesc.sindex = malloc(num_send_types * sizeof(PIO_Offset)))) + return PIO_ENOMEM; + if (!(iodesc.scount = malloc(num_send_types * sizeof(int)))) + return PIO_ENOMEM; + for (int st = 0; st < num_send_types; st++) + { + iodesc.sindex[st] = 0; + iodesc.scount[st] = 1; + } + + /* Run the test function. */ + if ((ret = define_iodesc_datatypes(&ios, &iodesc))) + return ret; + + /* We created send types, so free them. */ + for (int st = 0; st < num_send_types; st++) + if ((mpierr = MPI_Type_free(&iodesc.stype[st]))) + MPIERR(mpierr); + + /* We created one receive type, so free it. */ + if ((mpierr = MPI_Type_free(&iodesc.rtype[0]))) + MPIERR(mpierr); + + /* Free resources. */ + free(iodesc.rtype); + free(iodesc.sindex); + free(iodesc.scount); + free(iodesc.stype); + free(iodesc.rcount); + free(iodesc.rfrom); + free(iodesc.rindex); + } + + return 0; +} + +/* Test the compute_counts() function with the box rearranger. */ +int test_compute_counts(MPI_Comm test_comm, int my_rank) +{ + iosystem_desc_t *ios; + io_desc_t *iodesc; + int dest_ioproc[TARGET_NTASKS] = {0, 1, 2, 3}; + PIO_Offset dest_ioindex[TARGET_NTASKS] = {0, 1, 2, 3}; + int ret; + + /* Initialize ios. */ + if (!(ios = calloc(1, sizeof(iosystem_desc_t)))) + return PIO_ENOMEM; + + ios->num_iotasks = TARGET_NTASKS; + ios->num_comptasks = TARGET_NTASKS; + ios->num_uniontasks = TARGET_NTASKS; + ios->ioproc = 1; + ios->compproc = 1; + ios->union_comm = test_comm; + if (!(ios->ioranks = malloc(TARGET_NTASKS * sizeof(int)))) + return PIO_ENOMEM; + for (int t = 0; t < TARGET_NTASKS; t++) + ios->ioranks[t] = t; + if (!(ios->compranks = calloc(ios->num_comptasks, sizeof(int)))) + return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); + for (int i = 0; i < TARGET_NTASKS; i++) + ios->compranks[i] = i; + + /* Initialize iodesc. */ + if (!(iodesc = calloc(1, sizeof(io_desc_t)))) + return PIO_ENOMEM; + iodesc->rearranger = PIO_REARR_BOX; + iodesc->ndof = TARGET_NTASKS; + iodesc->llen = TARGET_NTASKS; + iodesc->rearr_opts.comm_type = PIO_REARR_COMM_COLL; + iodesc->rearr_opts.fcd = PIO_REARR_COMM_FC_2D_DISABLE; + + /* Test the function. */ + if ((ret = compute_counts(ios, iodesc, dest_ioproc, dest_ioindex))) + return ret; + + /* Check results. */ + for (int i = 0; i < ios->num_iotasks; i++) + if (iodesc->scount[i] != 1 || iodesc->sindex[i] != i) + return ERR_WRONG; + + for (int i = 0; i < iodesc->ndof; i++) + if (iodesc->rcount[i] != 1 || iodesc->rfrom[i] != i || + iodesc->rindex[i] != my_rank) + return ERR_WRONG; + + /* Free resources allocated in compute_counts(). */ + free(iodesc->scount); + free(iodesc->sindex); + free(iodesc->rcount); + free(iodesc->rfrom); + free(iodesc->rindex); + + /* Free test resources. */ + free(ios->ioranks); + free(ios->compranks); + free(iodesc); + free(ios); + + return 0; +} + +/* Call PIOc_InitDecomp() with parameters such that it calls + * box_rearrange_create() just like test_box_rearrange_create() will + * (see below). */ +int test_init_decomp(int iosysid, MPI_Comm test_comm, int my_rank) +{ + int ioid; + PIO_Offset compmap[MAPLEN2] = {my_rank * 2, (my_rank + 1) * 2}; + const int gdimlen[NDIM1] = {8}; + int ret; + + /* Initialize a decomposition. */ + if ((ret = PIOc_init_decomp(iosysid, PIO_INT, NDIM1, gdimlen, MAPLEN2, + compmap, &ioid, PIO_REARR_BOX, NULL, NULL))) + return ret; + + /* Free it. */ + if ((ret = PIOc_freedecomp(iosysid, ioid))) + return ret; + + return 0; +} + +/* Test for the box_rearrange_create() function. */ +int test_box_rearrange_create(MPI_Comm test_comm, int my_rank) +{ + iosystem_desc_t *ios; + io_desc_t *iodesc; + io_region *ior1; + int maplen = MAPLEN2; + PIO_Offset compmap[MAPLEN2] = {(my_rank * 2) + 1, ((my_rank + 1) * 2) + 1}; + const int gdimlen[NDIM1] = {8}; + int ndims = NDIM1; + int ret; + + /* Allocate IO system info struct for this test. */ + if (!(ios = calloc(1, sizeof(iosystem_desc_t)))) + return PIO_ENOMEM; + + /* Allocate IO desc struct for this test. */ + if (!(iodesc = calloc(1, sizeof(io_desc_t)))) + return PIO_ENOMEM; + + /* Default rearranger options. */ + iodesc->rearr_opts.comm_type = PIO_REARR_COMM_COLL; + iodesc->rearr_opts.fcd = PIO_REARR_COMM_FC_2D_DISABLE; + + /* Set up for determine_fill(). */ + ios->union_comm = test_comm; + ios->io_comm = test_comm; + iodesc->ndims = NDIM1; + iodesc->rearranger = PIO_REARR_BOX; + + /* Set up the IO task info for the test. */ + ios->ioproc = 1; + ios->compproc = 1; + ios->union_rank = my_rank; + ios->num_iotasks = 4; + ios->num_comptasks = 4; + ios->num_uniontasks = 4; + if (!(ios->ioranks = calloc(ios->num_iotasks, sizeof(int)))) + return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); + for (int i = 0; i < TARGET_NTASKS; i++) + ios->ioranks[i] = i; + if (!(ios->compranks = calloc(ios->num_comptasks, sizeof(int)))) + return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); + for (int i = 0; i < TARGET_NTASKS; i++) + ios->compranks[i] = i; + + /* This is how we allocate a region. */ + if ((ret = alloc_region2(NULL, NDIM1, &ior1))) + return ret; + if (my_rank == 0) + ior1->count[0] = 8; + + iodesc->firstregion = ior1; + + /* We are finally ready to run the code under test. */ + if ((ret = box_rearrange_create(ios, maplen, compmap, gdimlen, ndims, iodesc))) + return ret; + + /* Check some results. */ + if (iodesc->rearranger != PIO_REARR_BOX || iodesc->ndof != maplen || + iodesc->llen != my_rank ? 0 : 8 || !iodesc->needsfill) + return ERR_WRONG; + + /* for (int i = 0; i < ios->num_iotasks; i++) */ + /* { */ + /* /\* sindex is only allocated if scount[i] > 0. *\/ */ + /* if (iodesc->scount[i] != i ? 0 : 1 || */ + /* (iodesc->scount[i] && iodesc->sindex[i] != 0)) */ + /* return ERR_WRONG; */ + /* } */ + + /* for (int i = 0; i < iodesc->ndof; i++) */ + /* { */ + /* /\* rcount is 1 for rank 0, 0 on other tasks. *\/ */ + /* if (iodesc->rcount[i] != my_rank ? 0 : 1) */ + /* return ERR_WRONG; */ + + /* /\* rfrom is 0 everywhere, except task 0, array elemnt 1. *\/ */ + /* if (my_rank == 0 && i == 1) */ + /* { */ + /* if (iodesc->rfrom[i] != 1) */ + /* return ERR_WRONG; */ + /* } */ + /* else */ + /* { */ + /* if (iodesc->rfrom[i] != 0) */ + /* return ERR_WRONG; */ + /* } */ + + /* /\* rindex is only allocated where there is a non-zero count. *\/ */ + /* if (iodesc->rcount[i]) */ + /* if (iodesc->rindex[i] != 0) */ + /* return ERR_WRONG; */ + /* } */ + + /* Free resources allocated in compute_counts(). */ + free(iodesc->scount); + free(iodesc->sindex); + free(iodesc->rcount); + free(iodesc->rfrom); + free(iodesc->rindex); + + /* Free resources from test. */ + free(ior1->start); + free(ior1->count); + free(ior1); + free(ios->ioranks); + free(ios->compranks); + free(iodesc); + free(ios); + + return 0; +} + +/* Test for the box_rearrange_create() function. */ +int test_box_rearrange_create_2(MPI_Comm test_comm, int my_rank) +{ +#define MAPLEN2 2 + iosystem_desc_t *ios; + io_desc_t *iodesc; + io_region *ior1; + int maplen = MAPLEN2; + PIO_Offset compmap[MAPLEN2] = {1, 0}; + const int gdimlen[NDIM1] = {8}; + int ndims = NDIM1; + int ret; + + /* Allocate IO system info struct for this test. */ + if (!(ios = calloc(1, sizeof(iosystem_desc_t)))) + return PIO_ENOMEM; + + /* Allocate IO desc struct for this test. */ + if (!(iodesc = calloc(1, sizeof(io_desc_t)))) + return PIO_ENOMEM; + + /* Default rearranger options. */ + iodesc->rearr_opts.comm_type = PIO_REARR_COMM_COLL; + iodesc->rearr_opts.fcd = PIO_REARR_COMM_FC_2D_DISABLE; + + /* Set up for determine_fill(). */ + ios->union_comm = test_comm; + ios->io_comm = test_comm; + iodesc->ndims = NDIM1; + iodesc->rearranger = PIO_REARR_BOX; + + /* This is the size of the map in computation tasks. */ + iodesc->ndof = 2; + + /* Set up the IO task info for the test. */ + ios->ioproc = 1; + ios->compproc = 1; + ios->union_rank = my_rank; + ios->num_iotasks = 4; + ios->num_comptasks = 4; + ios->num_uniontasks = 4; + if (!(ios->ioranks = calloc(ios->num_iotasks, sizeof(int)))) + return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); + for (int i = 0; i < TARGET_NTASKS; i++) + ios->ioranks[i] = i; + if (!(ios->compranks = calloc(ios->num_comptasks, sizeof(int)))) + return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); + for (int i = 0; i < TARGET_NTASKS; i++) + ios->compranks[i] = i; + + /* This is how we allocate a region. */ + if ((ret = alloc_region2(NULL, NDIM1, &ior1))) + return ret; + ior1->next = NULL; + if (my_rank == 0) + ior1->count[0] = 8; + + iodesc->firstregion = ior1; + + /* We are finally ready to run the code under test. */ + if ((ret = box_rearrange_create(ios, maplen, compmap, gdimlen, ndims, iodesc))) + return ret; + + /* Check some results. */ + if (iodesc->rearranger != PIO_REARR_BOX || iodesc->ndof != maplen || + iodesc->llen != my_rank ? 0 : 8 || !iodesc->needsfill) + return ERR_WRONG; + + for (int i = 0; i < ios->num_iotasks; i++) + { + /* sindex is only allocated if scount[i] > 0. */ + if (iodesc->scount[i] != i ? 0 : 1 || + (iodesc->scount[i] && iodesc->sindex[i] != 0)) + return ERR_WRONG; + } + + if (my_rank == 0) + { + for (int i = 0; i < iodesc->ndof; i++) + { + /* rcount is 1 for rank 0, 0 on other tasks. */ + if (iodesc->rcount[i] != 1) + return ERR_WRONG; + + /* rfrom only matters if there is a non-zero count. */ + if (iodesc->rcount[i]) + if (iodesc->rfrom[i] != i ? 1 : 0) + return ERR_WRONG; + + /* rindex is only allocated where there is a non-zero count. */ + if (iodesc->rcount[i]) + if (iodesc->rindex[i] != 0) + return ERR_WRONG; + } + } + + /* Free resources allocated in compute_counts(). */ + free(iodesc->scount); + free(iodesc->sindex); + free(iodesc->rcount); + free(iodesc->rfrom); + free(iodesc->rindex); + + /* Free resources from test. */ + free(ior1->start); + free(ior1->count); + free(ior1); + free(ios->ioranks); + free(ios->compranks); + free(iodesc); + free(ios); + + return 0; +} + +/* Test function default_subset_partition. */ +int test_default_subset_partition(MPI_Comm test_comm, int my_rank) +{ + iosystem_desc_t *ios; + io_desc_t *iodesc; + int mpierr; + int ret; + + /* Allocate IO system info struct for this test. */ + if (!(ios = calloc(1, sizeof(iosystem_desc_t)))) + return PIO_ENOMEM; + + /* Allocate IO desc struct for this test. */ + if (!(iodesc = calloc(1, sizeof(io_desc_t)))) + return PIO_ENOMEM; + + ios->ioproc = 1; + ios->io_rank = my_rank; + ios->comp_comm = test_comm; + + /* Run the function to test. */ + if ((ret = default_subset_partition(ios, iodesc))) + return ret; + + /* Free the created communicator. */ + if ((mpierr = MPI_Comm_free(&iodesc->subset_comm))) + MPIERR(mpierr); + + /* Free resources from test. */ + free(iodesc); + free(ios); + + return 0; +} + +/* Test function rearrange_comp2io. */ +int test_rearrange_comp2io(MPI_Comm test_comm, int my_rank) +{ + iosystem_desc_t *ios; + io_desc_t *iodesc; + void *sbuf = NULL; + void *rbuf = NULL; + int nvars = 1; + io_region *ior1; + int maplen = 2; + PIO_Offset compmap[2] = {1, 0}; + const int gdimlen[NDIM1] = {8}; + int ndims = NDIM1; + int mpierr; + int ret; + + /* Allocate some space for data. */ + if (!(sbuf = calloc(4, sizeof(int)))) + return PIO_ENOMEM; + if (!(rbuf = calloc(4, sizeof(int)))) + return PIO_ENOMEM; + + /* Allocate IO system info struct for this test. */ + if (!(ios = calloc(1, sizeof(iosystem_desc_t)))) + return PIO_ENOMEM; + + /* Allocate IO desc struct for this test. */ + if (!(iodesc = calloc(1, sizeof(io_desc_t)))) + return PIO_ENOMEM; + + ios->ioproc = 1; + ios->compproc = 1; + ios->io_rank = my_rank; + ios->union_comm = test_comm; + ios->num_iotasks = TARGET_NTASKS; + ios->num_uniontasks = TARGET_NTASKS; + iodesc->rearranger = PIO_REARR_BOX; + iodesc->basetype = MPI_INT; + + /* Set up test for IO task with BOX rearranger to create one type. */ + iodesc->rtype = NULL; /* Array of MPI types will be created here. */ + iodesc->nrecvs = 1; /* Number of types created. */ + iodesc->basetype = MPI_INT; + iodesc->stype = NULL; /* Array of MPI types will be created here. */ + + /* The two rearrangers create a different number of send types. */ + int num_send_types = iodesc->rearranger == PIO_REARR_BOX ? ios->num_iotasks : 1; + + /* Default rearranger options. */ + iodesc->rearr_opts.comm_type = PIO_REARR_COMM_COLL; + iodesc->rearr_opts.fcd = PIO_REARR_COMM_FC_2D_DISABLE; + + /* Set up for determine_fill(). */ + ios->union_comm = test_comm; + ios->io_comm = test_comm; + iodesc->ndims = NDIM1; + iodesc->rearranger = PIO_REARR_BOX; + + iodesc->ndof = 4; + + /* Set up the IO task info for the test. */ + ios->union_rank = my_rank; + ios->num_comptasks = 4; + if (!(ios->ioranks = calloc(ios->num_iotasks, sizeof(int)))) + return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); + for (int i = 0; i < TARGET_NTASKS; i++) + ios->ioranks[i] = i; + if (!(ios->compranks = calloc(ios->num_comptasks, sizeof(int)))) + return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); + for (int i = 0; i < TARGET_NTASKS; i++) + ios->compranks[i] = i; + + /* This is how we allocate a region. */ + if ((ret = alloc_region2(NULL, NDIM1, &ior1))) + return ret; + ior1->next = NULL; + if (my_rank == 0) + ior1->count[0] = 8; + + iodesc->firstregion = ior1; + + /* Create the box rearranger. */ + if ((ret = box_rearrange_create(ios, maplen, compmap, gdimlen, ndims, iodesc))) + return ret; + + /* Run the function to test. */ + if ((ret = rearrange_comp2io(ios, iodesc, sbuf, rbuf, nvars))) + return ret; + printf("returned from rearrange_comp2io\n"); + + /* We created send types, so free them. */ + for (int st = 0; st < num_send_types; st++) + if (iodesc->stype[st] != PIO_DATATYPE_NULL) + if ((mpierr = MPI_Type_free(&iodesc->stype[st]))) + MPIERR(mpierr); + + /* We created one receive type, so free it. */ + if (iodesc->rtype) + for (int r = 0; r < iodesc->nrecvs; r++) + if (iodesc->rtype[r] != PIO_DATATYPE_NULL) + if ((mpierr = MPI_Type_free(&iodesc->rtype[r]))) + MPIERR(mpierr); + + /* Free resources allocated in library code. */ + free(iodesc->rtype); + free(iodesc->sindex); + free(iodesc->scount); + free(iodesc->stype); + free(iodesc->rcount); + free(iodesc->rfrom); + free(iodesc->rindex); + + /* Free resources from test. */ + free(ior1->start); + free(ior1->count); + free(ior1); + free(ios->ioranks); + free(ios->compranks); + free(iodesc); + free(ios); + free(sbuf); + free(rbuf); + + return 0; +} + +/* Test function rearrange_io2comp. */ +int test_rearrange_io2comp(MPI_Comm test_comm, int my_rank) +{ + iosystem_desc_t *ios; + io_desc_t *iodesc; + void *sbuf = NULL; + void *rbuf = NULL; + io_region *ior1; + int maplen = 2; + PIO_Offset compmap[2] = {1, 0}; + const int gdimlen[NDIM1] = {8}; + int ndims = NDIM1; + int mpierr; + int ret; + + /* Allocate some space for data. */ + if (!(sbuf = calloc(4, sizeof(int)))) + return PIO_ENOMEM; + if (!(rbuf = calloc(4, sizeof(int)))) + return PIO_ENOMEM; + + /* Allocate IO system info struct for this test. */ + if (!(ios = calloc(1, sizeof(iosystem_desc_t)))) + return PIO_ENOMEM; + + /* Allocate IO desc struct for this test. */ + if (!(iodesc = calloc(1, sizeof(io_desc_t)))) + return PIO_ENOMEM; + + ios->ioproc = 1; + ios->io_rank = my_rank; + ios->union_comm = test_comm; + ios->num_iotasks = TARGET_NTASKS; + iodesc->rearranger = PIO_REARR_BOX; + iodesc->basetype = MPI_INT; + + /* Set up test for IO task with BOX rearranger to create one type. */ + ios->ioproc = 1; /* this is IO proc. */ + ios->num_iotasks = 4; /* The number of IO tasks. */ + iodesc->rtype = NULL; /* Array of MPI types will be created here. */ + iodesc->nrecvs = 1; /* Number of types created. */ + iodesc->basetype = MPI_INT; + iodesc->stype = NULL; /* Array of MPI types will be created here. */ + + /* The two rearrangers create a different number of send types. */ + int num_send_types = iodesc->rearranger == PIO_REARR_BOX ? ios->num_iotasks : 1; + + /* Default rearranger options. */ + iodesc->rearr_opts.comm_type = PIO_REARR_COMM_COLL; + iodesc->rearr_opts.fcd = PIO_REARR_COMM_FC_2D_DISABLE; + + /* Set up for determine_fill(). */ + ios->union_comm = test_comm; + ios->io_comm = test_comm; + iodesc->ndims = NDIM1; + iodesc->rearranger = PIO_REARR_BOX; + + iodesc->ndof = 4; + + /* Set up the IO task info for the test. */ + ios->ioproc = 1; + ios->compproc = 1; + ios->union_rank = my_rank; + ios->num_iotasks = 4; + ios->num_comptasks = 4; + ios->num_uniontasks = 4; + if (!(ios->ioranks = calloc(ios->num_iotasks, sizeof(int)))) + return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); + for (int i = 0; i < TARGET_NTASKS; i++) + ios->ioranks[i] = i; + if (!(ios->compranks = calloc(ios->num_comptasks, sizeof(int)))) + return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); + for (int i = 0; i < TARGET_NTASKS; i++) + ios->compranks[i] = i; + + /* This is how we allocate a region. */ + if ((ret = alloc_region2(NULL, NDIM1, &ior1))) + return ret; + ior1->next = NULL; + if (my_rank == 0) + ior1->count[0] = 8; + + iodesc->firstregion = ior1; + + /* Create the box rearranger. */ + if ((ret = box_rearrange_create(ios, maplen, compmap, gdimlen, ndims, iodesc))) + return ret; + + /* Run the function to test. */ + if ((ret = rearrange_io2comp(ios, iodesc, sbuf, rbuf))) + return ret; + printf("returned from rearrange_comp2io\n"); + + /* We created send types, so free them. */ + for (int st = 0; st < num_send_types; st++) + if (iodesc->stype[st] != PIO_DATATYPE_NULL) + if ((mpierr = MPI_Type_free(&iodesc->stype[st]))) + MPIERR(mpierr); + + /* We created one receive type, so free it. */ + if (iodesc->rtype) + for (int r = 0; r < iodesc->nrecvs; r++) + if (iodesc->rtype[r] != PIO_DATATYPE_NULL) + if ((mpierr = MPI_Type_free(&iodesc->rtype[r]))) + MPIERR(mpierr); + + /* Free resources allocated in library code. */ + free(iodesc->rtype); + free(iodesc->sindex); + free(iodesc->scount); + free(iodesc->stype); + free(iodesc->rcount); + free(iodesc->rfrom); + free(iodesc->rindex); + + /* Free resources from test. */ + free(ior1->start); + free(ior1->count); + free(ior1); + free(ios->ioranks); + free(ios->compranks); + free(iodesc); + free(ios); + free(sbuf); + free(rbuf); + + return 0; +} + +/* These tests do not need an iosysid. */ +int run_no_iosys_tests(int my_rank, MPI_Comm test_comm) +{ + int ret; + + printf("%d running idx_to_dim_list tests\n", my_rank); + if ((ret = test_idx_to_dim_list())) + return ret; + + printf("%d running coord_to_lindex tests\n", my_rank); + if ((ret = test_coord_to_lindex())) + return ret; + + printf("%d running compute_maxIObuffersize tests\n", my_rank); + if ((ret = test_compute_maxIObuffersize(test_comm, my_rank))) + return ret; + + printf("%d running determine_fill\n", my_rank); + if ((ret = test_determine_fill(test_comm))) + return ret; + + printf("%d running tests for expand_region()\n", my_rank); + if ((ret = test_expand_region())) + return ret; + + printf("%d running tests for find_region()\n", my_rank); + if ((ret = test_find_region())) + return ret; + + printf("%d running tests for get_regions()\n", my_rank); + if ((ret = test_get_regions(my_rank))) + return ret; + + printf("%d running create_mpi_datatypes tests\n", my_rank); + if ((ret = test_create_mpi_datatypes())) + return ret; + + printf("%d running define_iodesc_datatypes tests\n", my_rank); + if ((ret = test_define_iodesc_datatypes())) + return ret; + + printf("%d running compare_offsets tests\n", my_rank); + if ((ret = test_compare_offsets())) + return ret; + + printf("%d running compute_counts tests for box rearranger\n", my_rank); + if ((ret = test_compute_counts(test_comm, my_rank))) + return ret; + + printf("%d running tests for box_rearrange_create\n", my_rank); + if ((ret = test_box_rearrange_create(test_comm, my_rank))) + return ret; + + printf("%d running more tests for box_rearrange_create\n", my_rank); + if ((ret = test_box_rearrange_create_2(test_comm, my_rank))) + return ret; + + printf("%d running tests for default_subset_partition\n", my_rank); + if ((ret = test_default_subset_partition(test_comm, my_rank))) + return ret; + + printf("%d running tests for rearrange_comp2io\n", my_rank); + if ((ret = test_rearrange_comp2io(test_comm, my_rank))) + return ret; + + printf("%d running tests for rearrange_io2comp\n", my_rank); + if ((ret = test_rearrange_io2comp(test_comm, my_rank))) + return ret; + + return 0; +} + +/* Test scalar vars. */ +int test_scalar(int numio, int iosysid, MPI_Comm test_comm, int my_rank, + int num_flavors, int *flavor) +{ + + int var_type[NUM_NETCDF4_TYPES - 1] = {PIO_BYTE, PIO_CHAR, PIO_SHORT, PIO_INT, PIO_FLOAT, + PIO_DOUBLE, PIO_UBYTE, PIO_USHORT, PIO_UINT, PIO_INT64, + PIO_UINT64}; + char char_data = 2; + signed char byte_data = -42; + short short_data = -300; + int int_data = -10000; + float float_data = -42.42; + double double_data = -420000000000.5; + unsigned char ubyte_data = 43; + unsigned short ushort_data = 666; + unsigned int uint_data = 666666; + long long int64_data = -99999999999; + unsigned long long uint64_data = 99999999999; + char char_data_in; + signed char byte_data_in; + short short_data_in; + unsigned char ubyte_data_in; + int int_data_in; + float float_data_in; + double double_data_in; + unsigned short ushort_data_in; + unsigned int uint_data_in; + long long int64_data_in; + unsigned long long uint64_data_in; + + int ret; + + /* Run tests with all available iotypes. */ + for (int fmt = 0; fmt < num_flavors; fmt++) + { + /* For netcdf-4, there are extra types. */ + int num_types = (flavor[fmt] == PIO_IOTYPE_NETCDF4C || flavor[fmt] == PIO_IOTYPE_NETCDF4P) ? + NUM_NETCDF4_TYPES - 1 : NUM_CLASSIC_TYPES; + + /* For each available type, create a file with a scalar var of + * that type. */ + for (int t = 0; t < num_types; t++) + { + int ncid; + int varid; + char filename[PIO_MAX_NAME + 1]; + + printf("test with t = %d\n", t); + + /* These iotypes only handle netCDF classic types. */ + if (t >= NUM_CLASSIC_TYPES && + (flavor[fmt] == PIO_IOTYPE_PNETCDF || flavor[fmt] == PIO_IOTYPE_NETCDF)) + continue; + + /* Create filename. */ + sprintf(filename, "%s_scalar_numio_%d_iotype_%d_var_type_%d.nc", TEST_NAME, + numio, flavor[fmt], var_type[t]); + + /* Create the file. */ + if ((ret = PIOc_createfile(iosysid, &ncid, &flavor[fmt], filename, NC_CLOBBER))) + return ret; + + /* Define scalar var. */ + if ((ret = PIOc_def_var(ncid, VAR_NAME, var_type[t], 0, NULL, &varid))) + return ret; + + /* End define mode. */ + if ((ret = PIOc_enddef(ncid))) + return ret; + + /* Write a value. */ + switch (var_type[t]) + { + case PIO_BYTE: + if ((ret = PIOc_put_var_schar(ncid, varid, &byte_data))) + return ret; + break; + case PIO_CHAR: + if ((ret = PIOc_put_var_text(ncid, varid, &char_data))) + return ret; + break; + case PIO_SHORT: + if ((ret = PIOc_put_var_short(ncid, varid, &short_data))) + return ret; + break; + case PIO_INT: + if ((ret = PIOc_put_var_int(ncid, varid, &int_data))) + return ret; + break; + case PIO_FLOAT: + if ((ret = PIOc_put_var_float(ncid, varid, &float_data))) + return ret; + break; + case PIO_DOUBLE: + if ((ret = PIOc_put_var_double(ncid, varid, &double_data))) + return ret; + break; +#ifdef _NETCDF4 + case PIO_UBYTE: + if ((ret = PIOc_put_var_uchar(ncid, varid, &ubyte_data))) + return ret; + break; + case PIO_USHORT: + if ((ret = PIOc_put_var_ushort(ncid, varid, &ushort_data))) + return ret; + break; + case PIO_UINT: + if ((ret = PIOc_put_var_uint(ncid, varid, &uint_data))) + return ret; + break; + case PIO_INT64: + if ((ret = PIOc_put_var_longlong(ncid, varid, &int64_data))) + return ret; + break; + case PIO_UINT64: + if ((ret = PIOc_put_var_ulonglong(ncid, varid, &uint64_data))) + return ret; + break; +#endif /* _NETCDF4 */ + default: + return ERR_WRONG; + } + + /* Close the file. */ + if ((ret = PIOc_closefile(ncid))) + return ret; + + /* Reopen the file. */ + if ((ret = PIOc_openfile(iosysid, &ncid, &flavor[fmt], filename, NC_NOWRITE))) + return ret; + + /* Read the value. Is the value correct? */ + switch (var_type[t]) + { + case PIO_BYTE: + if ((ret = PIOc_get_var_schar(ncid, varid, &byte_data_in))) + return ret; + if (byte_data_in != byte_data) + return ERR_WRONG; + break; + case PIO_CHAR: + if ((ret = PIOc_get_var_text(ncid, varid, &char_data_in))) + return ret; + if (char_data_in != char_data) + return ERR_WRONG; + break; + case PIO_SHORT: + if ((ret = PIOc_get_var_short(ncid, varid, &short_data_in))) + return ret; + if (short_data_in != short_data) + return ERR_WRONG; + break; + case PIO_INT: + if ((ret = PIOc_get_var_int(ncid, varid, &int_data_in))) + return ret; + if (int_data_in != int_data) + return ERR_WRONG; + break; + case PIO_FLOAT: + if ((ret = PIOc_get_var_float(ncid, varid, &float_data_in))) + return ret; + if (float_data_in != float_data) + return ERR_WRONG; + break; + case PIO_DOUBLE: + if ((ret = PIOc_get_var_double(ncid, varid, &double_data_in))) + return ret; + if (double_data_in != double_data) + return ERR_WRONG; + break; +#ifdef _NETCDF4 + case PIO_UBYTE: + if ((ret = PIOc_get_var_uchar(ncid, varid, &ubyte_data_in))) + return ret; + if (ubyte_data_in != ubyte_data) + return ERR_WRONG; + break; + case PIO_USHORT: + if ((ret = PIOc_get_var_ushort(ncid, varid, &ushort_data_in))) + return ret; + if (ushort_data_in != ushort_data) + return ERR_WRONG; + break; + case PIO_UINT: + if ((ret = PIOc_get_var_uint(ncid, varid, &uint_data_in))) + return ret; + if (uint_data_in != uint_data) + return ERR_WRONG; + break; + case PIO_INT64: + if ((ret = PIOc_get_var_longlong(ncid, varid, &int64_data_in))) + return ret; + if (int64_data_in != int64_data) + return ERR_WRONG; + break; + case PIO_UINT64: + if ((ret = PIOc_get_var_ulonglong(ncid, varid, &uint64_data_in))) + return ret; + if (uint64_data_in != uint64_data) + return ERR_WRONG; + break; +#endif /* _NETCDF4 */ + default: + return ERR_WRONG; + } + + /* Close the file. */ + if ((ret = PIOc_closefile(ncid))) + return ret; + + } /* next iotype */ + } /* next type */ + + return 0; +} + +/* These tests are run with different rearrangers and numbers of IO + * tasks. */ +int run_iosys_tests(int numio, int iosysid, int my_rank, MPI_Comm test_comm, + int num_flavors, int *flavor) +{ + int ret; + + printf("%d running rearranger opts tests 1\n", my_rank); + if ((ret = test_rearranger_opts1(iosysid))) + return ret; + + printf("%d running test for init_decomp\n", my_rank); + if ((ret = test_init_decomp(iosysid, test_comm, my_rank))) + return ret; + + printf("%d running test for init_decomp\n", my_rank); + if ((ret = test_scalar(numio, iosysid, test_comm, my_rank, num_flavors, flavor))) + return ret; + + return 0; +} + +/* Run Tests for pio_spmd.c functions. */ +int main(int argc, char **argv) +{ + int my_rank; /* Zero-based rank of processor. */ + int ntasks; /* Number of processors involved in current execution. */ + int num_flavors; /* Number of PIO netCDF flavors in this build. */ + int flavor[NUM_FLAVORS]; /* iotypes for the supported netCDF IO flavors. */ + MPI_Comm test_comm; /* A communicator for this test. */ + int ret; /* Return code. */ + + /* Initialize test. */ + if ((ret = pio_test_init2(argc, argv, &my_rank, &ntasks, MIN_NTASKS, + TARGET_NTASKS, 3, &test_comm))) + ERR(ERR_INIT); + if ((ret = PIOc_set_iosystem_error_handling(PIO_DEFAULT, PIO_RETURN_ERROR, NULL))) + return ret; + + /* Figure out iotypes. */ + if ((ret = get_iotypes(&num_flavors, flavor))) + ERR(ret); + printf("Runnings tests for %d flavors\n", num_flavors); + + /* Test code runs on TARGET_NTASKS tasks. The left over tasks do + * nothing. */ + if (my_rank < TARGET_NTASKS) + { + /* Run the tests that don't need an iosysid. */ + if ((ret = run_no_iosys_tests(my_rank, test_comm))) + return ret; + + /* Test code with both rearrangers. */ + for (int r = 0; r < NUM_REARRANGERS; r++) + { + /* Test code with 1, 2, 3, and 4 io tasks. */ + for (int numio = 1; numio <= TARGET_NTASKS; numio++) + { + int iosysid; + int rearranger = r ? PIO_REARR_SUBSET : PIO_REARR_BOX; + + if ((ret = PIOc_Init_Intracomm(test_comm, numio, 1, 0, rearranger, + &iosysid))) + return ret; + + /* Run the tests that need an iosysid. */ + if ((ret = run_iosys_tests(numio, iosysid, my_rank, test_comm, + num_flavors, flavor))) + return ret; + + /* Finalize PIO system. */ + if ((ret = PIOc_finalize(iosysid))) + return ret; + } /* next numio */ + } /* next rearranger */ + } /* endif my_rank < TARGET_NTASKS */ + + /* Finalize the MPI library. */ + printf("%d %s Finalizing...\n", my_rank, TEST_NAME); + if ((ret = pio_test_finalize(&test_comm))) + return ret; + + printf("%d %s SUCCESS!!\n", my_rank, TEST_NAME); + + return 0; +} diff --git a/src/externals/pio2/tests/cunit/test_shared.c b/src/externals/pio2/tests/cunit/test_shared.c index 3d6eb9cd1ffb..0926e0be9fd9 100644 --- a/src/externals/pio2/tests/cunit/test_shared.c +++ b/src/externals/pio2/tests/cunit/test_shared.c @@ -20,22 +20,21 @@ int test_async2(int my_rank, int num_flavors, int *flavor, MPI_Comm test_comm, int component_count, int num_io_procs, int target_ntasks, char *test_name) { int iosysid[component_count]; /* The ID for the parallel I/O system. */ - int num_procs[component_count + 1]; /* Num procs in each component. */ + int num_procs[component_count]; /* Num procs in each component. */ MPI_Comm io_comm; /* Will get a duplicate of IO communicator. */ MPI_Comm comp_comm[component_count]; /* Will get duplicates of computation communicators. */ int mpierr; /* Return code from MPI functions. */ int ret; /* Return code. */ - num_procs[0] = 1; - num_procs[1] = target_ntasks - 1; + num_procs[0] = target_ntasks - 1; /* Is the current process a computation task? */ int comp_task = my_rank < num_io_procs ? 0 : 1; printf("%d comp_task = %d\n", my_rank, comp_task); /* Initialize the IO system. */ - if ((ret = PIOc_Init_Async(test_comm, num_io_procs, NULL, component_count, - num_procs, NULL, &io_comm, comp_comm, iosysid))) + if ((ret = PIOc_init_async(test_comm, num_io_procs, NULL, component_count, + num_procs, NULL, &io_comm, comp_comm, PIO_REARR_BOX, iosysid))) ERR(ERR_INIT); for (int c = 0; c < component_count; c++) printf("%d iosysid[%d] = %d\n", my_rank, c, iosysid[c]); @@ -103,17 +102,17 @@ int test_no_async2(int my_rank, int num_flavors, int *flavor, MPI_Comm test_comm ioproc_start, PIO_REARR_SUBSET, &iosysid))) return ret; - /* Describe the decomposition. This is a 1-based array, so add 1! */ + /* Describe the decomposition. This is a 0-based array, so don't add 1! */ elements_per_pe = x_dim_len * y_dim_len / target_ntasks; if (!(compdof = malloc(elements_per_pe * sizeof(PIO_Offset)))) return PIO_ENOMEM; for (int i = 0; i < elements_per_pe; i++) - compdof[i] = my_rank * elements_per_pe + i + 1; + compdof[i] = my_rank * elements_per_pe + i; /* Create the PIO decomposition for this test. */ printf("%d Creating decomposition...\n", my_rank); - if ((ret = PIOc_InitDecomp(iosysid, PIO_FLOAT, 2, slice_dimlen, (PIO_Offset)elements_per_pe, - compdof, &ioid, NULL, NULL, NULL))) + if ((ret = PIOc_init_decomp(iosysid, PIO_FLOAT, 2, slice_dimlen, (PIO_Offset)elements_per_pe, + compdof, &ioid, 0, NULL, NULL))) return ret; free(compdof); diff --git a/src/externals/pio2/tests/cunit/test_spmd.c b/src/externals/pio2/tests/cunit/test_spmd.c index b4947137004f..2c51ce2ccab3 100644 --- a/src/externals/pio2/tests/cunit/test_spmd.c +++ b/src/externals/pio2/tests/cunit/test_spmd.c @@ -1,5 +1,5 @@ /* - * This program tests some internal functions in the library. + * This program tests some internal functions in the PIO library. * * Jim Edwards * Ed Hartnett, 11/23/16 @@ -20,8 +20,6 @@ /* Number of test cases in inner loop of test. */ #define NUM_TEST_CASES 5 -#define TEST_MAX_GATHER_BLOCK_SIZE 32 - /* Test MPI_Alltoallw by having processor i send different amounts of * data to each processor. The first test sends i items to processor * i from all processors. */ @@ -85,8 +83,9 @@ int run_spmd_tests(MPI_Comm test_comm) for (int itest = 0; itest < NUM_TEST_CASES; itest++) { - bool hs = false; - bool isend = false; + rearr_comm_fc_opt_t fc; + fc.hs = false; + fc.isend = false; /* Wait for all tasks. */ MPI_Barrier(test_comm); @@ -99,28 +98,28 @@ int run_spmd_tests(MPI_Comm test_comm) /* Set the parameters different for each test case. */ if (itest == 1) { - hs = true; - isend = true; + fc.hs = true; + fc.isend = true; } else if (itest == 2) { - hs = false; - isend = true; + fc.hs = false; + fc.isend = true; } else if (itest == 3) { - hs = false; - isend = false; + fc.hs = false; + fc.isend = false; } else if (itest == 4) { - hs = true; - isend = false; + fc.hs = true; + fc.isend = false; } /* Run the swapm function. */ if ((ret = pio_swapm(sbuf, sendcounts, sdispls, sendtypes, rbuf, recvcounts, - rdispls, recvtypes, test_comm, hs, isend, msg_cnt))) + rdispls, recvtypes, test_comm, &fc))) return ret; /* Print results. */ @@ -189,59 +188,19 @@ int run_sc_tests(MPI_Comm test_comm) if (gcd_array(SC_ARRAY_LEN, array4) != 1) return ERR_WRONG; - return 0; -} - -/* This test code was recovered from main() in pioc_sc.c. */ -int test_CalcStartandCount() -{ - int ndims = 2; - int gdims[2] = {31, 777602}; - int num_io_procs = 24; - bool converged = false; - PIO_Offset start[ndims], kount[ndims]; - int iorank, numaiotasks = 0; - long int tpsize = 0; - long int psize; - long int pgdims = 1; - int scnt; - - for (int i = 0; i < ndims; i++) - pgdims *= gdims[i]; - - while (!converged) - { - for (iorank = 0; iorank < num_io_procs; iorank++) - { - numaiotasks = CalcStartandCount(PIO_DOUBLE, ndims, gdims, num_io_procs, iorank, - start, kount); - if (iorank < numaiotasks) - printf("iorank %d start %lld %lld count %lld %lld\n", iorank, start[0], - start[1], kount[0], kount[1]); - - if (numaiotasks < 0) - return numaiotasks; - - psize = 1; - scnt = 0; - for (int i = 0; i < ndims; i++) - { - psize *= kount[i]; - scnt += kount[i]; - } - tpsize += psize; - } - - if (tpsize == pgdims) - converged = true; - else - { - printf("Failed to converge %ld %ld %d\n", tpsize, pgdims, num_io_procs); - tpsize = 0; - num_io_procs--; - } - } - + /* Test compute_one_dim. */ + PIO_Offset start, count; + compute_one_dim(4, 4, my_rank, &start, &count); + if (start != my_rank || count != 1) + return ERR_WRONG; + compute_one_dim(400, 4, my_rank, &start, &count); + if (start != my_rank * 100 || count != 100) + return ERR_WRONG; + /* Left over data will go to task 3. */ + compute_one_dim(5, 4, my_rank, &start, &count); + if (start != my_rank || count != (my_rank == 3 ? 2 : 1)) + return ERR_WRONG; + printf("my_rank = %d start = %lld count = %lld\n", my_rank, start, count); return 0; } @@ -264,94 +223,6 @@ int test_lists() return 0; } -/* Test some of the rearranger utility functions. */ -int test_rearranger_opts1() -{ - rearr_comm_fc_opt_t *ro1; - rearr_comm_fc_opt_t *ro2; - rearr_comm_fc_opt_t *ro3; - - if (!(ro1 = calloc(1, sizeof(rearr_comm_fc_opt_t)))) - return ERR_AWFUL; - if (!(ro2 = calloc(1, sizeof(rearr_comm_fc_opt_t)))) - return ERR_AWFUL; - if (!(ro3 = calloc(1, sizeof(rearr_comm_fc_opt_t)))) - return ERR_AWFUL; - - /* This should not work. */ - if (PIOc_set_rearr_opts(42, 1, 1, 0, 0, 0, 0, 0, 0) != PIO_EBADID) - return ERR_WRONG; - - /* ro1 and ro2 are the same. */ - if (!cmp_rearr_comm_fc_opts(ro1, ro2)) - return ERR_WRONG; - - /* Make ro3 different. */ - ro3->enable_hs = 1; - if (cmp_rearr_comm_fc_opts(ro1, ro3)) - return ERR_WRONG; - ro3->enable_hs = 0; - ro3->enable_isend = 1; - if (cmp_rearr_comm_fc_opts(ro1, ro3)) - return ERR_WRONG; - ro3->enable_isend = 0; - ro3->max_pend_req = 1; - if (cmp_rearr_comm_fc_opts(ro1, ro3)) - return ERR_WRONG; - - /* Free resourses. */ - free(ro1); - free(ro2); - free(ro3); - - return 0; -} - -/* Test some of the rearranger utility functions. */ -int test_rearranger_opts2() -{ - iosystem_desc_t my_ios; - iosystem_desc_t *ios = &my_ios; - - /* I'm not sure what the point of this function is... */ - check_and_reset_rearr_opts(ios); - - return 0; -} - -/* Test the compare_offsets() function. */ -int test_compare_offsets() -{ - mapsort m1, m2, m3; - - m1.rfrom = 0; - m1.soffset = 0; - m1.iomap = 0; - m2.rfrom = 0; - m2.soffset = 0; - m2.iomap = 0; - m3.rfrom = 0; - m3.soffset = 0; - m3.iomap = 1; - - /* Return 0 if either or both parameters are null. */ - if (compare_offsets(NULL, &m2)) - return ERR_WRONG; - if (compare_offsets(&m1, NULL)) - return ERR_WRONG; - if (compare_offsets(NULL, NULL)) - return ERR_WRONG; - - /* m1 and m2 are the same. */ - if (compare_offsets(&m1, &m2)) - return ERR_WRONG; - - /* m1 and m3 are different. */ - if (compare_offsets(&m1, &m3) != -1) - return ERR_WRONG; - return 0; -} - /* Test the ceil2() and pair() functions. */ int test_ceil2_pair() { @@ -382,72 +253,85 @@ int test_ceil2_pair() int test_find_mpi_type() { MPI_Datatype mpi_type; + int type_size; int ret; /* This should not work. */ - if (find_mpi_type(PIO_BYTE + 42, &mpi_type) != PIO_EBADTYPE) + if (find_mpi_type(PIO_BYTE + 42, &mpi_type, &type_size) != PIO_EBADTYPE) return ERR_WRONG; /* Try every atomic type. */ - if ((ret = find_mpi_type(PIO_BYTE, &mpi_type))) + if ((ret = find_mpi_type(PIO_BYTE, &mpi_type, &type_size))) return ret; - if (mpi_type != MPI_BYTE) + if (mpi_type != MPI_BYTE || type_size != 1) return ERR_WRONG; - if ((ret = find_mpi_type(PIO_CHAR, &mpi_type))) + if ((ret = find_mpi_type(PIO_CHAR, &mpi_type, &type_size))) return ret; - if (mpi_type != MPI_CHAR) + if (mpi_type != MPI_CHAR || type_size != 1) return ERR_WRONG; - if ((ret = find_mpi_type(PIO_SHORT, &mpi_type))) + if ((ret = find_mpi_type(PIO_SHORT, &mpi_type, &type_size))) return ret; - if (mpi_type != MPI_SHORT) + if (mpi_type != MPI_SHORT || type_size != 2) return ERR_WRONG; - if ((ret = find_mpi_type(PIO_INT, &mpi_type))) + if ((ret = find_mpi_type(PIO_INT, &mpi_type, &type_size))) return ret; - if (mpi_type != MPI_INT) + if (mpi_type != MPI_INT || type_size != 4) return ERR_WRONG; - if ((ret = find_mpi_type(PIO_FLOAT, &mpi_type))) + if ((ret = find_mpi_type(PIO_FLOAT, &mpi_type, &type_size))) return ret; - if (mpi_type != MPI_FLOAT) + if (mpi_type != MPI_FLOAT || type_size != 4) return ERR_WRONG; - if ((ret = find_mpi_type(PIO_DOUBLE, &mpi_type))) + if ((ret = find_mpi_type(PIO_DOUBLE, &mpi_type, &type_size))) return ret; - if (mpi_type != MPI_DOUBLE) + if (mpi_type != MPI_DOUBLE || type_size != 8) return ERR_WRONG; + /* These should also work. */ + if ((ret = find_mpi_type(PIO_INT, &mpi_type, NULL))) + return ret; + if (mpi_type != MPI_INT) + return ERR_WRONG; + if ((ret = find_mpi_type(PIO_INT, NULL, &type_size))) + return ret; + if (type_size != 4) + return ERR_WRONG; + if ((ret = find_mpi_type(PIO_INT, NULL, NULL))) + return ret; + #ifdef _NETCDF4 - if ((ret = find_mpi_type(PIO_UBYTE, &mpi_type))) + if ((ret = find_mpi_type(PIO_UBYTE, &mpi_type, &type_size))) return ret; - if (mpi_type != MPI_UNSIGNED_CHAR) + if (mpi_type != MPI_UNSIGNED_CHAR || type_size != 1) return ERR_WRONG; - if ((ret = find_mpi_type(PIO_USHORT, &mpi_type))) + if ((ret = find_mpi_type(PIO_USHORT, &mpi_type, &type_size))) return ret; - if (mpi_type != MPI_UNSIGNED_SHORT) + if (mpi_type != MPI_UNSIGNED_SHORT || type_size != 2) return ERR_WRONG; - if ((ret = find_mpi_type(PIO_UINT, &mpi_type))) + if ((ret = find_mpi_type(PIO_UINT, &mpi_type, &type_size))) return ret; - if (mpi_type != MPI_UNSIGNED) + if (mpi_type != MPI_UNSIGNED || type_size != 4) return ERR_WRONG; - if ((ret = find_mpi_type(PIO_INT64, &mpi_type))) + if ((ret = find_mpi_type(PIO_INT64, &mpi_type, &type_size))) return ret; - if (mpi_type != MPI_LONG_LONG) + if (mpi_type != MPI_LONG_LONG || type_size != 8) return ERR_WRONG; - if ((ret = find_mpi_type(PIO_UINT64, &mpi_type))) + if ((ret = find_mpi_type(PIO_UINT64, &mpi_type, &type_size))) return ret; - if (mpi_type != MPI_UNSIGNED_LONG_LONG) + if (mpi_type != MPI_UNSIGNED_LONG_LONG || type_size != 8) return ERR_WRONG; - if ((ret = find_mpi_type(PIO_STRING, &mpi_type))) + if ((ret = find_mpi_type(PIO_STRING, &mpi_type, &type_size))) return ret; - if (mpi_type != MPI_CHAR) + if (mpi_type != MPI_CHAR || type_size != 1) return ERR_WRONG; #endif /* _NETCDF4 */ @@ -461,6 +345,117 @@ int test_misc() /* This should not work. */ if (flush_buffer(TEST_VAL_42, &wmb, 0) != PIO_EBADID) return ERR_WRONG; + + return 0; +} + +/* This test code was recovered from main() in pioc_sc.c. */ +int test_CalcStartandCount() +{ + int ndims = 2; + int gdims[2] = {31, 777602}; + int num_io_procs = 24; + bool converged = false; + PIO_Offset start[ndims], kount[ndims]; + int iorank, numaiotasks = 0; + long int tpsize = 0; + long int psize; + long int pgdims = 1; + int scnt; + int ret; + + for (int i = 0; i < ndims; i++) + pgdims *= gdims[i]; + + while (!converged) + { + for (iorank = 0; iorank < num_io_procs; iorank++) + { + if ((ret = CalcStartandCount(PIO_DOUBLE, ndims, gdims, num_io_procs, iorank, + start, kount, &numaiotasks))) + return ret; + if (iorank < numaiotasks) + printf("iorank %d start %lld %lld count %lld %lld\n", iorank, start[0], + start[1], kount[0], kount[1]); + + if (numaiotasks < 0) + return numaiotasks; + + psize = 1; + scnt = 0; + for (int i = 0; i < ndims; i++) + { + psize *= kount[i]; + scnt += kount[i]; + } + tpsize += psize; + } + + if (tpsize == pgdims) + converged = true; + else + { + printf("Failed to converge %ld %ld %d\n", tpsize, pgdims, num_io_procs); + tpsize = 0; + num_io_procs--; + } + } + + return 0; +} + +/* Test the GDCblocksize() function. */ +int run_GDCblocksize_tests(MPI_Comm test_comm) +{ + { + int arrlen = 1; + PIO_Offset arr_in[1] = {0}; + PIO_Offset blocksize; + + blocksize = GCDblocksize(arrlen, arr_in); + if (blocksize != 1) + return ERR_WRONG; + } + + { + int arrlen = 4; + PIO_Offset arr_in[4] = {0, 1, 2, 3}; + PIO_Offset blocksize; + + blocksize = GCDblocksize(arrlen, arr_in); + if (blocksize != 4) + return ERR_WRONG; + } + + { + int arrlen = 4; + PIO_Offset arr_in[4] = {0, 2, 3, 4}; + PIO_Offset blocksize; + + blocksize = GCDblocksize(arrlen, arr_in); + if (blocksize != 1) + return ERR_WRONG; + } + + { + int arrlen = 4; + PIO_Offset arr_in[4] = {0, 1, 3, 4}; + PIO_Offset blocksize; + + blocksize = GCDblocksize(arrlen, arr_in); + if (blocksize != 1) + return ERR_WRONG; + } + + { + int arrlen = 4; + PIO_Offset arr_in[4] = {0, 1, 2, 4}; + PIO_Offset blocksize; + + blocksize = GCDblocksize(arrlen, arr_in); + if (blocksize != 1) + return ERR_WRONG; + } return 0; } @@ -482,10 +477,20 @@ int main(int argc, char **argv) * nothing. */ if (my_rank < TARGET_NTASKS) { + /* I don't need this iosystem, but it's the only way to get + * the logs to write. */ + int iosysid; + if ((ret = PIOc_Init_Intracomm(test_comm, TARGET_NTASKS, 1, 0, PIO_REARR_BOX, &iosysid))) + return ret; + printf("%d running tests for functions in pioc_sc.c\n", my_rank); if ((ret = run_sc_tests(test_comm))) return ret; + printf("%d running tests for GCDblocksize()\n", my_rank); + if ((ret = run_GDCblocksize_tests(test_comm))) + return ret; + printf("%d running spmd test code\n", my_rank); if ((ret = run_spmd_tests(test_comm))) return ret; @@ -498,18 +503,6 @@ int main(int argc, char **argv) if ((ret = test_lists())) return ret; - printf("%d running rearranger opts tests 1\n", my_rank); - if ((ret = test_rearranger_opts1())) - return ret; - - printf("%d running rearranger opts tests 2\n", my_rank); - if ((ret = test_rearranger_opts2())) - return ret; - - printf("%d running compare_offsets tests\n", my_rank); - if ((ret = test_compare_offsets())) - return ret; - printf("%d running ceil2/pair tests\n", my_rank); if ((ret = test_ceil2_pair())) return ret; @@ -522,6 +515,10 @@ int main(int argc, char **argv) if ((ret = test_misc())) return ret; + /* Finalize PIO system. */ + if ((ret = PIOc_finalize(iosysid))) + return ret; + } /* endif my_rank < TARGET_NTASKS */ /* Finalize the MPI library. */ diff --git a/src/externals/pio2/tests/general/ncdf_get_put.F90.in b/src/externals/pio2/tests/general/ncdf_get_put.F90.in index 75ccc75f3e9b..ea8e5933b710 100644 --- a/src/externals/pio2/tests/general/ncdf_get_put.F90.in +++ b/src/externals/pio2/tests/general/ncdf_get_put.F90.in @@ -118,6 +118,67 @@ PIO_TF_AUTO_TEST_SUB_BEGIN test_put_get_1datt PIO_TF_AUTO_TEST_SUB_END test_put_get_1datt +PIO_TF_TEMPLATE +PIO_TF_AUTO_TEST_SUB_BEGIN test_put_get_0dvar + Implicit none + type(file_desc_t) :: pio_file + character(len=PIO_TF_MAX_STR_LEN) :: filename + type(var_desc_t) :: pio_var, pio_cvar + PIO_TF_FC_DATA_TYPE, dimension(1) :: pval, gval + CHARACTER(len=1) :: pcval, gcval + integer, dimension(:), allocatable :: iotypes + character(len=PIO_TF_MAX_STR_LEN), dimension(:), allocatable :: iotype_descs + integer :: num_iotypes + integer :: i, ret + + pval = pio_tf_world_sz_ + pcval = "D" + num_iotypes = 0 + call PIO_TF_Get_nc_iotypes(iotypes, iotype_descs, num_iotypes) + filename = "test_pio_ncdf_get_put.testfile" + do i=1,num_iotypes + PIO_TF_LOG(0,*) "Testing type :", iotype_descs(i) + ret = PIO_createfile(pio_tf_iosystem_, pio_file, iotypes(i), filename, PIO_CLOBBER) + PIO_TF_CHECK_ERR(ret, "Failed to open:" // trim(filename)) + + ! Since file is just created no need to enter redef + ret = PIO_def_var(pio_file, 'dummy_scalar_var_put_val', PIO_TF_DATA_TYPE, pio_var) + PIO_TF_CHECK_ERR(ret, "Failed to define scalar var:" // trim(filename)) + + ret = PIO_def_var(pio_file, 'dummy_scalar_var_put_cval', PIO_char, pio_cvar) + PIO_TF_CHECK_ERR(ret, "Failed to define scalar char var:" // trim(filename)) + + ret = PIO_enddef(pio_file) + PIO_TF_CHECK_ERR(ret, "Failed to enddef:" // trim(filename)) + + ret = PIO_put_var(pio_file, pio_var, pval); + PIO_TF_CHECK_ERR(ret, "Failed to put scalar var:" // trim(filename)) + + ret = PIO_put_var(pio_file, pio_cvar, pcval); + PIO_TF_CHECK_ERR(ret, "Failed to put scalar char var:" // trim(filename)) + + call PIO_syncfile(pio_file) + + ret = PIO_get_var(pio_file, pio_var, gval); + PIO_TF_CHECK_ERR(ret, "Failed to get scalar var:" // trim(filename)) + + PIO_TF_CHECK_VAL((gval, pval), "Got wrong value") + + ret = PIO_get_var(pio_file, pio_cvar, gcval); + PIO_TF_CHECK_ERR(ret, "Failed to get scalar char var:" // trim(filename)) + + PIO_TF_CHECK_VAL((gcval, pcval), "Got wrong value") + + call PIO_closefile(pio_file) + call PIO_deletefile(pio_tf_iosystem_, filename); + end do + if(allocated(iotypes)) then + deallocate(iotypes) + deallocate(iotype_descs) + end if + +PIO_TF_AUTO_TEST_SUB_END test_put_get_0dvar + PIO_TF_TEMPLATE PIO_TF_AUTO_TEST_SUB_BEGIN test_put_get_1dvar Implicit none @@ -184,6 +245,71 @@ PIO_TF_AUTO_TEST_SUB_BEGIN test_put_get_1dvar PIO_TF_AUTO_TEST_SUB_END test_put_get_1dvar +! Write out a 1d var slice from a 2d var +PIO_TF_TEMPLATE +PIO_TF_AUTO_TEST_SUB_BEGIN test_put_get_1dvar_slice + Implicit none + type(file_desc_t) :: pio_file + character(len=PIO_TF_MAX_STR_LEN) :: filename + type(var_desc_t) :: pio_var, pio_cvar + integer :: pio_dim + integer, parameter :: MAX_ROW_DIM_LEN = 100 + PIO_TF_FC_DATA_TYPE, dimension(MAX_ROW_DIM_LEN) :: gval, exp_val + integer, parameter :: MAX_COL_DIM_LEN = 4 + ! Only COL_WRITE_DIM of MAX_COL_DIM_LEN columns in pval is written out + integer, parameter :: COL_WRITE_DIM = 2 + PIO_TF_FC_DATA_TYPE, dimension(MAX_ROW_DIM_LEN, MAX_COL_DIM_LEN) :: pval + integer, dimension(:) :: start(4), count(4) + integer, dimension(:), allocatable :: iotypes + character(len=PIO_TF_MAX_STR_LEN), dimension(:), allocatable :: iotype_descs + integer :: num_iotypes + integer :: i, ret + + pval = -1 + pval(:,COL_WRITE_DIM) = pio_tf_world_sz_ + exp_val = pio_tf_world_sz_ + start = 0 + count = 0 + start(1) = 1 + count(1) = MAX_ROW_DIM_LEN + num_iotypes = 0 + call PIO_TF_Get_nc_iotypes(iotypes, iotype_descs, num_iotypes) + filename = "test_pio_ncdf_get_put_slice.testfile" + do i=1,num_iotypes + PIO_TF_LOG(0,*) "Testing type :", iotype_descs(i) + ret = PIO_createfile(pio_tf_iosystem_, pio_file, iotypes(i), filename, PIO_CLOBBER) + PIO_TF_CHECK_ERR(ret, "Failed to open:" // trim(filename)) + + ! Since file is just created no need to enter redef + ret = PIO_def_dim(pio_file, 'dummy_dim_put_val', MAX_ROW_DIM_LEN, pio_dim) + PIO_TF_CHECK_ERR(ret, "Failed to define dim:" // trim(filename)) + + ret = PIO_def_var(pio_file, 'dummy_var_put_val', PIO_TF_DATA_TYPE, (/pio_dim/), pio_var) + PIO_TF_CHECK_ERR(ret, "Failed to define var:" // trim(filename)) + + ret = PIO_enddef(pio_file) + PIO_TF_CHECK_ERR(ret, "Failed to enddef:" // trim(filename)) + + ret = PIO_put_var(pio_file, pio_var, start, count, pval(:,COL_WRITE_DIM)); + PIO_TF_CHECK_ERR(ret, "Failed to put var:" // trim(filename)) + + call PIO_syncfile(pio_file) + + ret = PIO_get_var(pio_file, pio_var, gval); + PIO_TF_CHECK_ERR(ret, "Failed to get var:" // trim(filename)) + + PIO_TF_CHECK_VAL((gval, exp_val), "Got wrong value") + + call PIO_closefile(pio_file) + call PIO_deletefile(pio_tf_iosystem_, filename); + end do + if(allocated(iotypes)) then + deallocate(iotypes) + deallocate(iotype_descs) + end if + +PIO_TF_AUTO_TEST_SUB_END test_put_get_1dvar_slice + PIO_TF_TEMPLATE PIO_TF_AUTO_TEST_SUB_BEGIN test_put_get_1dvar_4parts Implicit none @@ -267,3 +393,177 @@ PIO_TF_AUTO_TEST_SUB_BEGIN test_put_get_1dvar_4parts PIO_TF_AUTO_TEST_SUB_END test_put_get_1dvar_4parts +! Write out 2d/3d/4d vars, one time slice at a time +PIO_TF_TEMPLATE +PIO_TF_AUTO_TEST_SUB_BEGIN test_put_get_md2mdplus1_var + Implicit none + type(file_desc_t) :: pio_file + character(len=PIO_TF_MAX_STR_LEN) :: filename + integer, parameter :: MAX_DIMS = 4 + integer, parameter :: MAX_ROWS = 10 + integer, parameter :: MAX_COLS = 10 + integer, parameter :: MAX_LEVS = 3 + integer, parameter :: MAX_TIMES = 3 + integer, dimension(MAX_DIMS) :: pio_dims + type(var_desc_t) :: pio_2dvar, pio_3dvar, pio_4dvar + PIO_TF_FC_DATA_TYPE, dimension(MAX_ROWS,MAX_TIMES) :: gval_2d, exp_val_2d + PIO_TF_FC_DATA_TYPE, dimension(MAX_ROWS,MAX_COLS,MAX_TIMES) :: gval_3d, exp_val_3d + PIO_TF_FC_DATA_TYPE, dimension(MAX_ROWS,MAX_COLS,MAX_LEVS,MAX_TIMES) ::& + gval_4d, exp_val_4d + ! Only one slice is written out at a time + ! pval_1d is a 1d slice of gval_2d ... + PIO_TF_FC_DATA_TYPE, dimension(MAX_ROWS) :: pval_1d + PIO_TF_FC_DATA_TYPE, dimension(MAX_ROWS, MAX_COLS) :: pval_2d + PIO_TF_FC_DATA_TYPE, dimension(MAX_ROWS, MAX_COLS, MAX_LEVS) :: pval_3d + integer, dimension(:) :: start(MAX_DIMS), count(MAX_DIMS) + integer :: pval_start + integer, dimension(:), allocatable :: iotypes + character(len=PIO_TF_MAX_STR_LEN), dimension(:), allocatable :: iotype_descs + integer :: num_iotypes + integer :: i, k, l, m, n, tstep, ret + + num_iotypes = 0 + call PIO_TF_Get_nc_iotypes(iotypes, iotype_descs, num_iotypes) + filename = "test_pio_ncdf_get_put_md_slice.testfile" + do i=1,num_iotypes + PIO_TF_LOG(0,*) "Testing type :", iotype_descs(i) + ret = PIO_createfile(pio_tf_iosystem_, pio_file, iotypes(i), filename, PIO_CLOBBER) + PIO_TF_CHECK_ERR(ret, "Failed to open:" // trim(filename)) + + ! Since file is just created no need to enter redef + ret = PIO_def_dim(pio_file, 'nrows', MAX_ROWS, pio_dims(1)) + PIO_TF_CHECK_ERR(ret, "Failed to define dim:" // trim(filename)) + + ret = PIO_def_dim(pio_file, 'ncols', MAX_COLS, pio_dims(2)) + PIO_TF_CHECK_ERR(ret, "Failed to define dim:" // trim(filename)) + + ret = PIO_def_dim(pio_file, 'nlevs', MAX_LEVS, pio_dims(3)) + PIO_TF_CHECK_ERR(ret, "Failed to define dim:" // trim(filename)) + + ret = PIO_def_dim(pio_file, 'timesteps', MAX_TIMES, pio_dims(4)) + PIO_TF_CHECK_ERR(ret, "Failed to define dim:" // trim(filename)) + + ret = PIO_def_var(pio_file, '2d_val', PIO_TF_DATA_TYPE,& + (/pio_dims(1),pio_dims(4)/), pio_2dvar) + PIO_TF_CHECK_ERR(ret, "Failed to define var:" // trim(filename)) + + ret = PIO_def_var(pio_file, '3d_val', PIO_TF_DATA_TYPE,& + (/pio_dims(1),pio_dims(2),pio_dims(4)/), pio_3dvar) + PIO_TF_CHECK_ERR(ret, "Failed to define var:" // trim(filename)) + + ret = PIO_def_var(pio_file, '4d_val', PIO_TF_DATA_TYPE,& + pio_dims, pio_4dvar) + PIO_TF_CHECK_ERR(ret, "Failed to define var:" // trim(filename)) + + ret = PIO_enddef(pio_file) + PIO_TF_CHECK_ERR(ret, "Failed to enddef:" // trim(filename)) + + ! Put vals are for each timestep & + ! expected vals are combined for all timesteps + do k=1,MAX_ROWS + pval_1d(k) = k + end do + do tstep=1,MAX_TIMES + pval_start = (tstep - 1) * MAX_ROWS + exp_val_2d(:,tstep) = pval_1d + pval_start + end do + do l=1,MAX_COLS + do k=1,MAX_ROWS + pval_2d(k,l) = (l - 1)*MAX_ROWS + k + end do + end do + do tstep=1,MAX_TIMES + do l=1,MAX_COLS + do k=1,MAX_ROWS + pval_start = (tstep - 1) * (MAX_ROWS * MAX_COLS) + exp_val_3d(:,:,tstep) = pval_2d + pval_start + end do + end do + end do + do m=1,MAX_LEVS + do l=1,MAX_COLS + do k=1,MAX_ROWS + pval_3d(k,l,m) = ((m-1)*(MAX_COLS*MAX_ROWS)+(l - 1)*MAX_ROWS + k) + end do + end do + end do + do tstep=1,MAX_TIMES + do m=1,MAX_LEVS + do l=1,MAX_COLS + do k=1,MAX_ROWS + pval_start = (tstep - 1) * (MAX_ROWS * MAX_COLS * MAX_LEVS) + exp_val_4d(:,:,:,tstep) = pval_3d + pval_start + end do + end do + end do + end do + ! Put 2d/3d/4d vals, one timestep at a time + do tstep=1,MAX_TIMES + start = 0 + count = 0 + + start(1) = 1 + count(1) = MAX_ROWS + start(2) = tstep + count(2) = 1 + pval_start = (tstep - 1) * MAX_ROWS + ret = PIO_put_var(pio_file, pio_2dvar, start, count,& + pval_1d(:)+pval_start) + PIO_TF_CHECK_ERR(ret, "Failed to put 2d var:" // trim(filename)) + + start(1) = 1 + count(1) = MAX_ROWS + start(2) = 1 + count(2) = MAX_COLS + start(3) = tstep + count(3) = 1 + pval_start = (tstep - 1) * (MAX_ROWS * MAX_COLS) + ret = PIO_put_var(pio_file, pio_3dvar, start, count,& + pval_2d(:,:)+pval_start) + PIO_TF_CHECK_ERR(ret, "Failed to put 3d var:" // trim(filename)) + + start(1) = 1 + count(1) = MAX_ROWS + start(2) = 1 + count(2) = MAX_COLS + start(3) = 1 + count(3) = MAX_LEVS + start(4) = tstep + count(4) = 1 + pval_start = (tstep - 1) * (MAX_ROWS * MAX_COLS * MAX_LEVS) + ret = PIO_put_var(pio_file, pio_4dvar, start, count,& + pval_3d(:,:,:)+pval_start) + PIO_TF_CHECK_ERR(ret, "Failed to put 4d var:" // trim(filename)) + end do + + call PIO_syncfile(pio_file) + + ret = PIO_get_var(pio_file, pio_2dvar, gval_2d) + PIO_TF_CHECK_ERR(ret, "Failed to get 2d var:" // trim(filename)) + + PIO_TF_CHECK_VAL((gval_2d, exp_val_2d), "Got wrong value (2d var)") + + ret = PIO_get_var(pio_file, pio_3dvar, gval_3d) + PIO_TF_CHECK_ERR(ret, "Failed to get 3d var:" // trim(filename)) + + PIO_TF_CHECK_VAL((gval_3d, exp_val_3d), "Got wrong value (3d var)") + + ret = PIO_get_var(pio_file, pio_4dvar, gval_4d) + PIO_TF_CHECK_ERR(ret, "Failed to get 4d var:" // trim(filename)) + + ! Special code to handle 4d vals is required since the framework + ! currently does not support comparing 4d arrays + do tstep=1,MAX_TIMES + PIO_TF_CHECK_VAL((gval_4d(:,:,:,tstep), exp_val_4d(:,:,:,tstep)), "Got wrong value (4d var)") + end do + + call PIO_closefile(pio_file) + call PIO_deletefile(pio_tf_iosystem_, filename); + end do + if(allocated(iotypes)) then + deallocate(iotypes) + deallocate(iotype_descs) + end if + +PIO_TF_AUTO_TEST_SUB_END test_put_get_md2mdplus1_var + diff --git a/src/externals/pio2/tests/general/pio_decomp_frame_tests.F90.in b/src/externals/pio2/tests/general/pio_decomp_frame_tests.F90.in index e92b483c3c02..e75fdce61de4 100644 --- a/src/externals/pio2/tests/general/pio_decomp_frame_tests.F90.in +++ b/src/externals/pio2/tests/general/pio_decomp_frame_tests.F90.in @@ -225,3 +225,196 @@ PIO_TF_AUTO_TEST_SUB_BEGIN nc_write_read_4d_col_decomp deallocate(rbuf) deallocate(wbuf) PIO_TF_AUTO_TEST_SUB_END nc_write_read_4d_col_decomp + +! Using a 3d decomp for writing out a 3d and a 4d var +! Write with one decomp (to force rearrangement) and read with another (no +! rearrangement) +PIO_TF_TEMPLATE +PIO_TF_AUTO_TEST_SUB_BEGIN nc_reuse_3d_decomp + implicit none + integer, parameter :: NDIMS = 4 + integer, parameter :: NFRAMES = 3 + type(var_desc_t) :: pio_var3d, pio_var4d + type(file_desc_t) :: pio_file + character(len=PIO_TF_MAX_STR_LEN) :: filename + type(io_desc_t) :: wr_iodesc, rd_iodesc + integer, dimension(:), allocatable :: compdof + integer, dimension(NDIMS) :: start, count + PIO_TF_FC_DATA_TYPE, dimension(:,:,:,:), allocatable :: rbuf4d, wbuf4d, exp_val4d + PIO_TF_FC_DATA_TYPE, dimension(:,:,:), allocatable :: rbuf3d, wbuf3d, exp_val3d + integer, dimension(NDIMS-1) :: dims + integer, dimension(NDIMS) :: pio_dims + integer :: i, j, k, tmp_idx, ierr, lsz, nrows, ncols, nhgts + integer(kind=pio_offset_kind) :: f + ! iotypes = valid io types + integer, dimension(:), allocatable :: iotypes + character(len=PIO_TF_MAX_STR_LEN), dimension(:), allocatable :: iotype_descs + integer :: num_iotypes + + ! Set the decomposition for writing data - forcing rearrangement + call get_3d_col_decomp_info(pio_tf_world_rank_, pio_tf_world_sz_, dims, start, count, .true.) + nrows = count(1) + ncols = count(2) + nhgts = count(3) + + ! Initialize the 4d var + allocate(wbuf4d(nrows, ncols, nhgts, NFRAMES)) + do f=1,NFRAMES + do k=1,nhgts + do j=1,ncols + do i=1,nrows + wbuf4d(i,j,k,f) = (start(3) - 1 + k - 1) * (dims(1) * dims(2)) +& + (start(2) - 1 + j - 1) * dims(1) + i + wbuf4d(i,j,k,f) = wbuf4d(i,j,k,f) + (f - 1) * (dims(1) * dims(2) * dims(3)) + end do + end do + end do + end do + allocate(compdof(nrows * ncols * nhgts)) + do k=1,nhgts + do j=1,ncols + do i=1,nrows + tmp_idx = (k - 1) * (ncols * nrows) + (j - 1) * nrows + i + compdof(tmp_idx) = wbuf4d(i,j,k,1) + end do + end do + end do + ! Initialize the 3d var + allocate(wbuf3d(nrows, ncols, nhgts)) + do k=1,nhgts + do j=1,ncols + do i=1,nrows + wbuf3d(i,j,k) = (start(3) - 1 + k - 1) * (dims(1) * dims(2)) +& + (start(2) - 1 + j - 1) * dims(1) + i + end do + end do + end do + + call PIO_initdecomp(pio_tf_iosystem_, PIO_TF_DATA_TYPE, dims, compdof, wr_iodesc) + deallocate(compdof) + + ! Set the decomposition for reading data - different from the write decomp + call get_3d_col_decomp_info(pio_tf_world_rank_, pio_tf_world_sz_, dims, start, count, .false.) + nrows = count(1) + ncols = count(2) + nhgts = count(3) + + allocate(rbuf4d(nrows, ncols, nhgts, NFRAMES)) + rbuf4d = 0 + ! Expected val for 4d var + allocate(exp_val4d(nrows, ncols, nhgts, NFRAMES)) + do f=1,NFRAMES + do k=1,nhgts + do j=1,ncols + do i=1,nrows + exp_val4d(i,j,k,f) = (start(3) - 1 + k - 1) * (dims(1) * dims(2)) +& + (start(2) - 1 + j - 1) * dims(1) + i + exp_val4d(i,j,k,f) = exp_val4d(i,j,k,f)+(f - 1) * (dims(1) * dims(2) * dims(3)) + end do + end do + end do + end do + allocate(compdof(nrows * ncols * nhgts)) + do k=1,nhgts + do j=1,ncols + do i=1,nrows + tmp_idx = (k - 1) * (ncols * nrows) + (j - 1) * nrows + i + compdof(tmp_idx) = exp_val4d(i,j,k,1) + end do + end do + end do + + allocate(rbuf3d(nrows, ncols, nhgts)) + rbuf3d = 0 + ! Expected val for 3d var + allocate(exp_val3d(nrows, ncols, nhgts)) + do k=1,nhgts + do j=1,ncols + do i=1,nrows + exp_val3d(i,j,k) = (start(3) - 1 + k - 1) * (dims(1) * dims(2)) +& + (start(2) - 1 + j - 1) * dims(1) + i + end do + end do + end do + + call PIO_initdecomp(pio_tf_iosystem_, PIO_TF_DATA_TYPE, dims, compdof, rd_iodesc) + deallocate(compdof) + + num_iotypes = 0 + call PIO_TF_Get_nc_iotypes(iotypes, iotype_descs, num_iotypes) + filename = "test_pio_decomp_simple_tests.testfile" + do i=1,num_iotypes + PIO_TF_LOG(0,*) "Testing : PIO_TF_DATA_TYPE : ", iotype_descs(i) + ierr = PIO_createfile(pio_tf_iosystem_, pio_file, iotypes(i), filename, PIO_CLOBBER) + PIO_TF_CHECK_ERR(ierr, "Could not create file " // trim(filename)) + + ierr = PIO_def_dim(pio_file, 'PIO_TF_test_dim_row', dims(1), pio_dims(1)) + PIO_TF_CHECK_ERR(ierr, "Failed to define a dim : " // trim(filename)) + + ierr = PIO_def_dim(pio_file, 'PIO_TF_test_dim_col', dims(2), pio_dims(2)) + PIO_TF_CHECK_ERR(ierr, "Failed to define a dim : " // trim(filename)) + + ierr = PIO_def_dim(pio_file, 'PIO_TF_test_dim_hgt', dims(3), pio_dims(3)) + PIO_TF_CHECK_ERR(ierr, "Failed to define a dim : " // trim(filename)) + + ierr = PIO_def_dim(pio_file, 'PIO_TF_test_dim_time', pio_unlimited, pio_dims(4)) + PIO_TF_CHECK_ERR(ierr, "Failed to define a dim : " // trim(filename)) + + ierr = PIO_def_var(pio_file, 'PIO_TF_test_3d_var', PIO_TF_DATA_TYPE, pio_dims(1:3), pio_var3d) + PIO_TF_CHECK_ERR(ierr, "Failed to define a 3d var : " // trim(filename)) + + ierr = PIO_def_var(pio_file, 'PIO_TF_test_4d_var', PIO_TF_DATA_TYPE, pio_dims, pio_var4d) + PIO_TF_CHECK_ERR(ierr, "Failed to define a 4d var : " // trim(filename)) + + ierr = PIO_enddef(pio_file) + PIO_TF_CHECK_ERR(ierr, "Failed to end redef mode : " // trim(filename)) + + call PIO_write_darray(pio_file, pio_var3d, wr_iodesc, wbuf3d, ierr) + PIO_TF_CHECK_ERR(ierr, "Failed to write 3d darray : " // trim(filename)) + + do f=1,NFRAMES + call PIO_setframe(pio_file, pio_var4d, f) + ! Write the current frame + call PIO_write_darray(pio_file, pio_var4d, wr_iodesc, wbuf4d(:,:,:,f), ierr) + PIO_TF_CHECK_ERR(ierr, "Failed to write 4d darray : " // trim(filename)) + end do + call PIO_syncfile(pio_file) + + rbuf4d = 0 + rbuf3d = 0 + + call PIO_read_darray(pio_file, pio_var3d, rd_iodesc, rbuf3d, ierr) + PIO_TF_CHECK_ERR(ierr, "Failed to read 3d darray : " // trim(filename)) + + do f=1,NFRAMES + call PIO_setframe(pio_file, pio_var4d, f) + call PIO_read_darray(pio_file, pio_var4d, rd_iodesc, rbuf4d(:,:,:,f), ierr) + PIO_TF_CHECK_ERR(ierr, "Failed to read 4d darray : " // trim(filename)) + end do + + do f=1,NFRAMES + PIO_TF_CHECK_VAL((rbuf4d(:,:,:,f), exp_val4d(:,:,:,f)), "Got wrong 4d val, frame=", f) + end do + PIO_TF_CHECK_VAL((rbuf3d, exp_val3d), "Got wrong 3dd val") + + call PIO_closefile(pio_file) + + call PIO_deletefile(pio_tf_iosystem_, filename); + end do + + if(allocated(iotypes)) then + deallocate(iotypes) + deallocate(iotype_descs) + end if + + call PIO_freedecomp(pio_tf_iosystem_, rd_iodesc) + call PIO_freedecomp(pio_tf_iosystem_, wr_iodesc) + + deallocate(exp_val3d) + deallocate(rbuf3d) + deallocate(wbuf3d) + + deallocate(exp_val4d) + deallocate(rbuf4d) + deallocate(wbuf4d) +PIO_TF_AUTO_TEST_SUB_END nc_reuse_3d_decomp diff --git a/src/externals/pio2/tests/general/util/pio_tutil.F90 b/src/externals/pio2/tests/general/util/pio_tutil.F90 index 43c0b634b4a1..e4a076f3a101 100644 --- a/src/externals/pio2/tests/general/util/pio_tutil.F90 +++ b/src/externals/pio2/tests/general/util/pio_tutil.F90 @@ -129,7 +129,7 @@ SUBROUTINE PIO_TF_Init_(rearr) - pio_tf_log_level_ = 0 + pio_tf_log_level_ = 3 pio_tf_num_aggregators_ = 0 pio_tf_num_io_tasks_ = 0 pio_tf_stride_ = 1 From fd8e01652f64572e54d2b89149411f72ff70987e Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Mon, 1 May 2017 12:38:29 -0600 Subject: [PATCH 181/219] updates example 1 --- src/externals/pio2/examples/c/example1.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/externals/pio2/examples/c/example1.c b/src/externals/pio2/examples/c/example1.c index 1fcd8ab222a2..ad650f0b1faf 100644 --- a/src/externals/pio2/examples/c/example1.c +++ b/src/externals/pio2/examples/c/example1.c @@ -285,7 +285,7 @@ int check_file(int ntasks, char *filename) { /* Initialize MPI. */ if ((ret = MPI_Init(&argc, &argv))) MPIERR(ret); - if ((ret = MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN))) + if ((ret = MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN))) MPIERR(ret); /* Learn my rank and the total number of processors. */ From f719bac36dc4cc517975dcda0ee0c2fb41541a0c Mon Sep 17 00:00:00 2001 From: Mariana Vertenstein Date: Sun, 16 Apr 2017 21:38:58 -0600 Subject: [PATCH 182/219] added :Faxa_nhx:Faxa_noy to Faxa fields if ndep_inparm contained ndep_list = 'nhx', 'noy' --- .../namelist_definition_drv_flds.xml | 13 ++ src/drivers/mct/shr/seq_flds_mod.F90 | 40 ++++-- src/drivers/mct/shr/shr_ndep_mod.F90 | 117 ++++++++++++++++++ 3 files changed, 161 insertions(+), 9 deletions(-) create mode 100644 src/drivers/mct/shr/shr_ndep_mod.F90 diff --git a/src/drivers/mct/cime_config/namelist_definition_drv_flds.xml b/src/drivers/mct/cime_config/namelist_definition_drv_flds.xml index 1a9cc9376b8e..08847103afde 100644 --- a/src/drivers/mct/cime_config/namelist_definition_drv_flds.xml +++ b/src/drivers/mct/cime_config/namelist_definition_drv_flds.xml @@ -86,6 +86,19 @@
+ + + + + + char(2) + nitrogen deposition + ndep_inparm + + List of nitrogen deposition fluxes to be sent from CAM to surfae models. + + + diff --git a/src/drivers/mct/shr/seq_flds_mod.F90 b/src/drivers/mct/shr/seq_flds_mod.F90 index d00f84f87b19..1a3e703de540 100644 --- a/src/drivers/mct/shr/seq_flds_mod.F90 +++ b/src/drivers/mct/shr/seq_flds_mod.F90 @@ -121,13 +121,14 @@ module seq_flds_mod ! variables CCSM_VOC, CCSM_BGC and GLC_NEC. !==================================================================== - use shr_kind_mod, only : CX => shr_kind_CX, CXX => shr_kind_CXX - use shr_sys_mod, only : shr_sys_abort - use seq_drydep_mod, only : seq_drydep_init, seq_drydep_readnl, lnd_drydep - use seq_comm_mct, only : seq_comm_iamroot, seq_comm_setptrs, logunit - use shr_megan_mod, only : shr_megan_readnl, shr_megan_mechcomps_n - use shr_fire_emis_mod, only : shr_fire_emis_readnl, shr_fire_emis_mechcomps_n, shr_fire_emis_ztop_token - use shr_carma_mod, only : shr_carma_readnl + use shr_kind_mod , only : CX => shr_kind_CX, CXX => shr_kind_CXX + use shr_sys_mod , only : shr_sys_abort + use seq_comm_mct , only : seq_comm_iamroot, seq_comm_setptrs, logunit + use seq_drydep_mod , only : seq_drydep_init, seq_drydep_readnl, lnd_drydep + use shr_megan_mod , only : shr_megan_readnl, shr_megan_mechcomps_n + use shr_fire_emis_mod , only : shr_fire_emis_readnl, shr_fire_emis_mechcomps_n, shr_fire_emis_ztop_token + use shr_carma_mod , only : shr_carma_readnl + use shr_ndep_mod , only : shr_ndep_readnl implicit none public @@ -143,8 +144,10 @@ module seq_flds_mod character(len=CXX) :: megan_voc_fields ! List of MEGAN VOC emission fields character(len=CXX) :: fire_emis_fields ! List of fire emission fields character(len=CX) :: carma_fields ! List of CARMA fields from lnd->atm + character(len=CX) :: ndep_fields ! List of nitrogen deposition fields from atm->lnd/ocn integer :: ice_ncat ! number of sea ice thickness categories logical :: seq_flds_i2o_per_cat! .true. if select per ice thickness category fields are passed from ice to ocean + logical :: add_ndep_fields ! .true. => add ndep fields !---------------------------------------------------------------------------- ! metadata @@ -3126,7 +3129,6 @@ subroutine seq_flds_set(nmlfile, ID, infodata) units = 'm' call metadata_set(shr_fire_emis_ztop_token, longname, stdname, units) - endif !----------------------------------------------------------------------------- @@ -3146,11 +3148,31 @@ subroutine seq_flds_set(nmlfile, ID, infodata) longname = 'dry deposition velocity' stdname = 'drydep_vel' units = 'cm/sec' - call metadata_set(seq_drydep_fields, longname, stdname, units) + call metadata_set(seq_drydep_fields, longname, stdname, units) endif call seq_drydep_init( ) + !----------------------------------------------------------------------------- + ! Nitrogen Deposition fields + ! First read namelist and figure out the ndepdep field list to pass + ! Then check if file exists and if not, n_drydep will be zero + ! Then add nitrogen deposition fields to atm export, lnd import and ocn import + !----------------------------------------------------------------------------- + + call shr_ndep_readnl(nlfilename="drv_flds_in", ID=ID, ndep_fields=ndep_fields, add_ndep_fields=add_ndep_fields) + if (add_ndep_fields) then + call seq_flds_add(a2x_fluxes, ndep_fields) + call seq_flds_add(x2l_fluxes, ndep_fields) + call seq_flds_add(x2o_fluxes, ndep_fields) + + longname = 'nitrongen deposition flux' + stdname = 'ndep' + units = 'kg/m2/sec' !TODO (mv) - check this + + call metadata_set(ndep_fields, longname, stdname, units) + end if + !---------------------------------------------------------------------------- ! state + flux fields !---------------------------------------------------------------------------- diff --git a/src/drivers/mct/shr/shr_ndep_mod.F90 b/src/drivers/mct/shr/shr_ndep_mod.F90 new file mode 100644 index 000000000000..64cadf107f35 --- /dev/null +++ b/src/drivers/mct/shr/shr_ndep_mod.F90 @@ -0,0 +1,117 @@ +module shr_ndep_mod + + !======================================================================== + ! Module for handling nitrogen depostion of tracers. + ! This module is shared by land and atmosphere models for the computations of + ! dry deposition of tracers + !======================================================================== + + !USES: + use shr_sys_mod, only : shr_sys_abort + use shr_log_mod, only : s_loglev => shr_log_Level + use shr_kind_mod, only : r8 => shr_kind_r8, CS => SHR_KIND_CS, CX => SHR_KIND_CX + + implicit none + save + + private + + ! !PUBLIC MEMBER FUNCTIONS + public :: shr_ndep_readnl ! Read namelist + !==================================================================================== + +CONTAINS + + !==================================================================================== + subroutine shr_ndep_readnl(NLFilename, ID, ndep_fields, add_ndep_fields) + + !======================================================================== + ! reads ndep_inparm namelist and sets up driver list of fields for + ! atmosphere -> land and atmosphere -> ocn communications. + !======================================================================== + + use shr_file_mod , only : shr_file_getUnit, shr_file_freeUnit + use shr_log_mod , only : s_logunit => shr_log_Unit + use seq_comm_mct , only : seq_comm_iamroot, seq_comm_setptrs + use shr_mpi_mod , only : shr_mpi_bcast + use shr_nl_mod , only : shr_nl_find_group_name + implicit none + + character(len=*), intent(in) :: NLFilename ! Namelist filename + integer , intent(in) :: ID ! seq_comm ID + character(len=*), intent(out) :: ndep_fields + logical , intent(out) :: add_ndep_fields + + !----- local ----- + integer :: i ! Indices + integer :: unitn ! namelist unit number + integer :: ierr ! error code + logical :: exists ! if file exists or not + character(len=8) :: token ! dry dep field name to add + integer :: mpicom ! MPI communicator + + integer, parameter :: maxspc = 100 ! Maximum number of species + character(len=32) :: ndep_list(maxspc) = '' ! List of ndep species + + !----- formats ----- + character(*),parameter :: subName = '(shr_ndep_read) ' + character(*),parameter :: F00 = "('(shr_ndep_read) ',8a)" + character(*),parameter :: FI1 = "('(shr_ndep_init) ',a,I2)" + + namelist /ndep_inparm/ ndep_list + + !----------------------------------------------------------------------------- + ! Read namelist and figure out the ndep field list to pass + ! First check if file exists and if not, n_ndep will be zero + !----------------------------------------------------------------------------- + + !--- Open and read namelist --- + if ( len_trim(NLFilename) == 0 ) then + call shr_sys_abort( subName//'ERROR: nlfilename not set' ) + end if + call seq_comm_setptrs(ID,mpicom=mpicom) + if (seq_comm_iamroot(ID)) then + inquire( file=trim(NLFileName), exist=exists) + if ( exists ) then + unitn = shr_file_getUnit() + open( unitn, file=trim(NLFilename), status='old' ) + if ( s_loglev > 0 ) then + write(s_logunit,F00) 'Read in ndep_inparm namelist from: ', trim(NLFilename) + end if + call shr_nl_find_group_name(unitn, 'ndep_inparm', ierr) + if (ierr == 0) then + ierr = 1 + do while ( ierr /= 0 ) + read(unitn, ndep_inparm, iostat=ierr) + if (ierr < 0) then + call shr_sys_abort( subName//'ERROR: encountered end-of-file on namelist read' ) + endif + end do + close( unitn ) + else + write(s_logunit,*) 'shr_ndep_readnl: no ndep_inparm namelist found in ',NLFilename + endif + call shr_file_freeUnit( unitn ) + end if + end if + call shr_mpi_bcast( ndep_list, mpicom ) + + ndep_fields = ' ' + if (len_trim(ndep_list(1)) == 0) then + add_ndep_fields = .false. + else + ! Loop over species to fill list of fields to communicate for ndep + add_ndep_fields = .true. + do i=1,maxspc + if ( len_trim(ndep_list(i))==0 ) exit + if ( i == 1 ) then + ndep_fields = 'Faxa_' // trim(ndep_list(i)) + else + ndep_fields = trim(ndep_fields)//':'//'Faxa_' // trim(ndep_list(i)) + endif + enddo + end if + + end subroutine shr_ndep_readnl + +end module shr_ndep_mod From ceee27276917ed22a502bfd2610e6db161acca1c Mon Sep 17 00:00:00 2001 From: Mariana Vertenstein Date: Sat, 29 Apr 2017 14:13:58 -0600 Subject: [PATCH 183/219] put in correct units --- src/drivers/mct/shr/seq_flds_mod.F90 | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/drivers/mct/shr/seq_flds_mod.F90 b/src/drivers/mct/shr/seq_flds_mod.F90 index 1a3e703de540..4c9de8fd468b 100644 --- a/src/drivers/mct/shr/seq_flds_mod.F90 +++ b/src/drivers/mct/shr/seq_flds_mod.F90 @@ -3166,9 +3166,9 @@ subroutine seq_flds_set(nmlfile, ID, infodata) call seq_flds_add(x2l_fluxes, ndep_fields) call seq_flds_add(x2o_fluxes, ndep_fields) - longname = 'nitrongen deposition flux' - stdname = 'ndep' - units = 'kg/m2/sec' !TODO (mv) - check this + longname = 'nitrogen deposition flux' + stdname = 'nitrogen_deposition' + units = 'kg(N)/m2/sec' call metadata_set(ndep_fields, longname, stdname, units) end if From b091fb2c97966854d55e16d34806732bf259840e Mon Sep 17 00:00:00 2001 From: James Foucar Date: Mon, 1 May 2017 14:53:50 -0600 Subject: [PATCH 184/219] Update queue selection to take walltime into account Adds concept of strict walltime --- config/acme/machines/config_batch.xml | 10 +-- config/xml_schemas/config_batch.xsd | 3 +- scripts/create_test | 2 +- scripts/lib/CIME/XML/env_batch.py | 94 +++++++++++++++++++-------- 4 files changed, 75 insertions(+), 34 deletions(-) diff --git a/config/acme/machines/config_batch.xml b/config/acme/machines/config_batch.xml index aa9abfb4e3a5..50e909b52eef 100644 --- a/config/acme/machines/config_batch.xml +++ b/config/acme/machines/config_batch.xml @@ -158,7 +158,7 @@ -l nodes={{ num_nodes }}:ppn={{ tasks_per_node }} - shared + shared batch @@ -177,8 +177,8 @@ + debug regular - debug @@ -198,8 +198,8 @@ --constraint=haswell + debug regular - debug @@ -208,8 +208,8 @@ --constraint=knl,quad,cache + debug regular - debug @@ -329,8 +329,8 @@ -l nodes={{ num_nodes }} + debug batch - debug diff --git a/config/xml_schemas/config_batch.xsd b/config/xml_schemas/config_batch.xsd index da8e028a5b38..ad810c028708 100644 --- a/config/xml_schemas/config_batch.xsd +++ b/config/xml_schemas/config_batch.xsd @@ -69,7 +69,7 @@ @@ -130,6 +130,7 @@ + diff --git a/scripts/create_test b/scripts/create_test index 3ed88253cceb..f55dfe47131d 100755 --- a/scripts/create_test +++ b/scripts/create_test @@ -420,7 +420,7 @@ def single_submit_impl(machine_name, test_id, proc_pool, project, args, job_cost else: wall_time_bab = wall_time - queue = env_batch.select_best_queue(proc_pool) + queue = env_batch.select_best_queue(proc_pool, wall_time_bab) wall_time_max_bab = env_batch.get_max_walltime(queue) if wall_time_max_bab is not None: wall_time_max = convert_to_seconds(wall_time_max_bab) diff --git a/scripts/lib/CIME/XML/env_batch.py b/scripts/lib/CIME/XML/env_batch.py index 68ba5fde7b21..78ebfe39fee8 100644 --- a/scripts/lib/CIME/XML/env_batch.py +++ b/scripts/lib/CIME/XML/env_batch.py @@ -7,7 +7,7 @@ from CIME.XML.standard_module_setup import * from CIME.utils import format_time from CIME.XML.env_base import EnvBase -from CIME.utils import transform_vars, get_cime_root +from CIME.utils import transform_vars, get_cime_root, convert_to_seconds from copy import deepcopy logger = logging.getLogger(__name__) @@ -195,18 +195,31 @@ def set_job_defaults(self, batch_jobs, pesize=None, walltime=None, force_queue=N else: task_count = int(task_count) - queue = force_queue if force_queue is not None else self.select_best_queue(task_count, job) - self.set_value("JOB_QUEUE", queue, subgroup=job) - - walltime = self.get_max_walltime(queue) if walltime is None else walltime - if walltime is None: - logger.warn("Could not find a queue matching task count %d, falling back to deprecated default walltime parameter"%task_count) - #if the user names a queue which is not defined in config_batch.xml and does not set a - #walltime, fall back to the max walltime in the default queue - if force_queue: - self.get_default_queue() - walltime = self._default_walltime + if force_queue: + if not self.queue_meets_spec(force_queue, task_count, walltime=walltime, job=job): + logger.warning("User-request queue '%s' does not meet requirements for job '%s'" % (force_queue, job)) + else: + queue = self.select_best_queue(task_count, walltime=walltime, job=job) + if queue is None and walltime is not None: + # Try to see if walltime was the holdup + queue = self.select_best_queue(task_count, walltime=None, job=job) + if queue is not None: + # It was, override the walltime to avoid failure + new_walltime = self.get_queue_specs(queue)[3] + expect(new_walltime is not None, "Should never make it here") + logger.warning("Requested walltime '%s' could not be matched by any queue, using '%s' instead" % (walltime, new_walltime)) + walltime = new_walltime + + if queue is None: + logger.warning("No queue on this system met the requirements for this job. Falling back to defaults") + default_queue_node = self.get_default_queue() + queue = default_queue_node.text + walltime = self.get_queue_specs(queue)[3] + + walltime = self.get_queue_specs(queue)[3] if walltime is None else walltime + walltime = self._default_walltime if walltime is None else walltime # last-chance fallback + self.set_value("JOB_QUEUE", queue, subgroup=job) self.set_value("JOB_WALLCLOCK_TIME", walltime, subgroup=job) logger.debug("Job %s queue %s walltime %s" % (job, queue, walltime)) @@ -362,7 +375,6 @@ def _submit_single_job(self, case, job, depid=None, no_batch=False, batch_args=N function_name = job.replace(".", "_") if not dry_run: - function_name = job.replace(".", "_") locals()[function_name](case) return @@ -418,36 +430,64 @@ def get_job_id(self, output): jobid = search_match.group(1) return jobid - def select_best_queue(self, num_pes, job=None): + def queue_meets_spec(self, queue, num_pes, walltime=None, job=None): + jobmin, jobmax, jobname, walltimemax, strict = self.get_queue_specs(queue) + + # A job name match automatically meets spec + if job is not None and jobname is not None: + return jobname == job + + if jobmin is not None and num_pes < int(jobmin): + return False + + if jobmax is not None and num_pes > int(jobmax): + return False + + if walltime is not None and walltimemax is not None and strict: + walltime_s = convert_to_seconds(walltime) + walltimemax_s = convert_to_seconds(walltimemax) + if walltime_s > walltimemax_s: + return False + + return True + + def select_best_queue(self, num_pes, walltime=None, job=None): # Make sure to check default queue first. all_queues = [] all_queues.append( self.get_default_queue()) all_queues = all_queues + self.get_all_queues() for queue in all_queues: if queue is not None: - jobmin = queue.get("jobmin") - jobmax = queue.get("jobmax") - jobname = queue.get("jobname") - if jobname is not None: - if job == jobname: - return queue.text - # if the fullsum is between the min and max # jobs, then use this queue. - elif jobmin is not None and jobmax is not None and num_pes >= int(jobmin) and num_pes <= int(jobmax): - return queue.text + qname = queue.text + if self.queue_meets_spec(qname, num_pes, walltime=walltime, job=job): + return qname + return None - def get_max_walltime(self, queue): + def get_queue_specs(self, queue): + """ + Get queue specifications by name. + + Returns (jobmin, jobmax, jobname, walltimemax, is_strict) + """ for queue_node in self.get_all_queues(): if queue_node.text == queue: - return queue_node.get("walltimemax") + jobmin = queue.get("jobmin") + jobmax = queue.get("jobmax") + jobname = queue.get("jobname") + walltimemax = queue.get("walltimemax") + strict = queue.get("strict") == "true" + + return jobmin, jobmax, jobname, walltimemax, strict + + expect(False, "Queue '%s' is unknown to this system" % queue) def get_default_queue(self): node = self.get_optional_node("queue", attributes={"default" : "true"}) if node is None: node = self.get_optional_node("queue") expect(node is not None, "No queues found") - self._default_walltime = node.get("walltimemax") - return(node) + return node def get_all_queues(self): return self.get_nodes("queue") From 1e7a85b6c1971f55b93705a0e9e719142962f631 Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Wed, 19 Apr 2017 14:33:21 -0600 Subject: [PATCH 185/219] add check of command line args --- scripts/Tools/acme_check_env | 4 +--- scripts/Tools/bless_test_results | 4 +--- scripts/Tools/case.build | 4 +--- scripts/Tools/case.cmpgen_namelists | 4 +--- scripts/Tools/case.setup | 4 +--- scripts/Tools/case.submit | 4 +--- scripts/Tools/case_diff | 4 +--- scripts/Tools/check_case | 5 +---- scripts/Tools/check_input_data | 5 +---- scripts/Tools/check_lockedfiles | 4 +--- scripts/Tools/cime_bisect | 4 +--- scripts/Tools/code_checker | 4 +--- scripts/Tools/compare_namelists | 4 +--- scripts/Tools/compare_test_results | 4 +--- scripts/Tools/component_compare_baseline | 4 +--- scripts/Tools/component_compare_copy | 4 +--- scripts/Tools/component_compare_test | 4 +--- scripts/Tools/component_generate_baseline | 4 +--- scripts/Tools/getTiming | 4 ++-- scripts/Tools/jenkins_generic_job | 4 +--- scripts/Tools/list_acme_tests | 4 +--- scripts/Tools/normalize_cases | 4 +--- scripts/Tools/pelayout | 4 +--- scripts/Tools/preview_namelists | 4 +--- scripts/Tools/save_provenance | 4 +--- scripts/Tools/simple_compare | 4 +--- scripts/Tools/update_acme_tests | 4 +--- scripts/Tools/wait_for_tests | 4 +--- scripts/Tools/xmlchange | 4 +--- scripts/Tools/xmlquery | 4 +--- scripts/create_clone | 4 +--- scripts/create_newcase | 4 +--- scripts/create_test | 4 +--- scripts/lib/CIME/XML/env_mach_specific.py | 4 ++-- scripts/lib/CIME/case.py | 8 ++++---- scripts/lib/CIME/utils.py | 19 ++++++++++++++++++- scripts/manage_case | 4 +--- scripts/manage_pes | 4 ++-- scripts/query_testlists | 4 +--- scripts/tests/scripts_regression_tests.py | 2 +- 40 files changed, 63 insertions(+), 116 deletions(-) diff --git a/scripts/Tools/acme_check_env b/scripts/Tools/acme_check_env index 7abdbcf71fad..1cb1726de8cf 100755 --- a/scripts/Tools/acme_check_env +++ b/scripts/Tools/acme_check_env @@ -30,9 +30,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter CIME.utils.setup_standard_logging_options(parser) - args = parser.parse_args(args[1:]) - - CIME.utils.handle_standard_logging_options(args) + args = CIME.utils.handle_standard_logging_options(args, parser) ############################################################################### def check_sh(): diff --git a/scripts/Tools/bless_test_results b/scripts/Tools/bless_test_results index 4b01e4768f8c..fbbb2430e9fe 100755 --- a/scripts/Tools/bless_test_results +++ b/scripts/Tools/bless_test_results @@ -89,9 +89,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("bless_tests", nargs="*", help="When blessing, limit the bless to tests matching these regex") - args = parser.parse_args(args[1:]) - - CIME.utils.handle_standard_logging_options(args) + args = CIME.utils.handle_standard_logging_options(args, parser) expect(not (args.report_only and args.force), "Makes no sense to use -r and -f simultaneously") diff --git a/scripts/Tools/case.build b/scripts/Tools/case.build index 7fba3c18d617..c9d29a9a0058 100755 --- a/scripts/Tools/case.build +++ b/scripts/Tools/case.build @@ -56,9 +56,7 @@ OR parser.add_argument("--clean-all", action="store_true", help="clean all objects including sharedlibobjects that may be used by other builds") - args = parser.parse_args(args[1:]) - - CIME.utils.handle_standard_logging_options(args) + args = CIME.utils.handle_standard_logging_options(args, parser) cleanlist = args.clean if args.clean is None or len(args.clean) else comps diff --git a/scripts/Tools/case.cmpgen_namelists b/scripts/Tools/case.cmpgen_namelists index e874121a205c..4cd8a03878f0 100755 --- a/scripts/Tools/case.cmpgen_namelists +++ b/scripts/Tools/case.cmpgen_namelists @@ -49,9 +49,7 @@ OR help="Force generation to use baselines with this name. " "Default will be to follow the case specification") - args = parser.parse_args(args[1:]) - - CIME.utils.handle_standard_logging_options(args) + args = CIME.utils.handle_standard_logging_options(args, parser) return args.caseroot, args.compare, args.generate, args.compare_name, args.generate_name diff --git a/scripts/Tools/case.setup b/scripts/Tools/case.setup index 3aca10bb130d..929aa870de2e 100755 --- a/scripts/Tools/case.setup +++ b/scripts/Tools/case.setup @@ -45,9 +45,7 @@ OR parser.add_argument("-r", "--reset", action="store_true", help="Does a clean followed by setup") - args = parser.parse_args(args[1:]) - - CIME.utils.handle_standard_logging_options(args) + args = CIME.utils.handle_standard_logging_options(args, parser) return args.caseroot, args.clean, args.test_mode, args.reset diff --git a/scripts/Tools/case.submit b/scripts/Tools/case.submit index c307035dc43c..f65029f8b156 100755 --- a/scripts/Tools/case.submit +++ b/scripts/Tools/case.submit @@ -48,12 +48,10 @@ OR parser.add_argument("-a", "--batch-args", help="Used to pass additional arguments to batch system. ") - args = parser.parse_args(args[1:]) + args = CIME.utils.handle_standard_logging_options(args, parser) CIME.utils.expect(args.prereq is None, "--prereq not currently supported") - CIME.utils.handle_standard_logging_options(args) - return args.caseroot, args.job, args.no_batch, args.resubmit, args.batch_args ############################################################################### diff --git a/scripts/Tools/case_diff b/scripts/Tools/case_diff index 6cd5db1b7069..2be58d909699 100755 --- a/scripts/Tools/case_diff +++ b/scripts/Tools/case_diff @@ -42,9 +42,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("-b", "--show-binary", action="store_true", help="Show binary diffs") - args = parser.parse_args(args[1:]) - - CIME.utils.handle_standard_logging_options(args) + args = CIME.utils.handle_standard_logging_options(args, parser) return args.case1, args.case2, args.show_binary, args.skip_list diff --git a/scripts/Tools/check_case b/scripts/Tools/check_case index 2553e1a197fc..0289e458f10d 100755 --- a/scripts/Tools/check_case +++ b/scripts/Tools/check_case @@ -38,9 +38,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter CIME.utils.setup_standard_logging_options(parser) - args = parser.parse_args(args[1:]) - - CIME.utils.handle_standard_logging_options(args) + args = CIME.utils.handle_standard_logging_options(args, parser) ############################################################################### def _main_func(description): @@ -68,4 +66,3 @@ def _main_func(description): if __name__ == "__main__": _main_func(__doc__) - diff --git a/scripts/Tools/check_input_data b/scripts/Tools/check_input_data index 7795c063cac1..58296663c87e 100755 --- a/scripts/Tools/check_input_data +++ b/scripts/Tools/check_input_data @@ -52,9 +52,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("--download", action="store_true", help="Attempt to download missing input files") - args = parser.parse_args(args[1:]) - - CIME.utils.handle_standard_logging_options(args) + args = CIME.utils.handle_standard_logging_options(args, parser) return args.svn_loc, args.input_data_root, args.data_list_dir, args.download @@ -78,4 +76,3 @@ def _main_func(description): if (__name__ == "__main__"): _main_func(__doc__) - diff --git a/scripts/Tools/check_lockedfiles b/scripts/Tools/check_lockedfiles index 39b68a1bd907..daf957e3e11b 100755 --- a/scripts/Tools/check_lockedfiles +++ b/scripts/Tools/check_lockedfiles @@ -29,9 +29,7 @@ OR parser.add_argument("--caseroot", default=os.getcwd(), help="Case directory to build") - args = parser.parse_args(args[1:]) - - CIME.utils.handle_standard_logging_options(args) + args = CIME.utils.handle_standard_logging_options(args, parser) return args.caseroot diff --git a/scripts/Tools/cime_bisect b/scripts/Tools/cime_bisect index 6e2b37e93bd8..f4a7c4323b2a 100755 --- a/scripts/Tools/cime_bisect +++ b/scripts/Tools/cime_bisect @@ -74,9 +74,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("-a", "--all-commits", action="store_true", help="Test all commits, not just merges") - args = parser.parse_args(args[1:]) - - CIME.utils.handle_standard_logging_options(args) + args = CIME.utils.handle_standard_logging_options(args, parser) if (args.test_root is None): args.test_root = os.path.join(_MACHINE.get_value("CIME_OUTPUT_ROOT"), "cime_bisect") diff --git a/scripts/Tools/code_checker b/scripts/Tools/code_checker index a81908a99512..2321384bafd2 100755 --- a/scripts/Tools/code_checker +++ b/scripts/Tools/code_checker @@ -46,9 +46,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("files", nargs="*", help="Restrict checking to specific files. Relative name is fine.") - args = parser.parse_args(args[1:]) - - CIME.utils.handle_standard_logging_options(args) + args = CIME.utils.handle_standard_logging_options(args, parser) return args.num_procs, args.files diff --git a/scripts/Tools/compare_namelists b/scripts/Tools/compare_namelists index 69f86659afc2..c1ccd419b4b3 100755 --- a/scripts/Tools/compare_namelists +++ b/scripts/Tools/compare_namelists @@ -40,9 +40,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("-c", "--case", action="store", dest="case", default=None, help="The case base id (..). Helps us normalize data.") - args = parser.parse_args(args[1:]) - - CIME.utils.handle_standard_logging_options(args) + args = CIME.utils.handle_standard_logging_options(args, parser) # Normalize case if (args.case is not None): diff --git a/scripts/Tools/compare_test_results b/scripts/Tools/compare_test_results index ae3ac037b142..e4f48f010307 100755 --- a/scripts/Tools/compare_test_results +++ b/scripts/Tools/compare_test_results @@ -90,9 +90,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("compare_tests", nargs="*", help="When comparing, limit the comparison to tests matching these regex") - args = parser.parse_args(args[1:]) - - CIME.utils.handle_standard_logging_options(args) + args = CIME.utils.handle_standard_logging_options(args, parser) return args.baseline_name, args.baseline_root, args.test_root, args.compiler, args.test_id, args.compare_tests, args.namelists_only, args.hist_only diff --git a/scripts/Tools/component_compare_baseline b/scripts/Tools/component_compare_baseline index ea8e43343a7c..c8a0e5ccac8a 100755 --- a/scripts/Tools/component_compare_baseline +++ b/scripts/Tools/component_compare_baseline @@ -35,9 +35,7 @@ OR parser.add_argument("-b", "--baseline-dir", help="Use custom baseline dir") - args = parser.parse_args(args[1:]) - - CIME.utils.handle_standard_logging_options(args) + args = CIME.utils.handle_standard_logging_options(args, parser) return args.caseroot, args.baseline_dir diff --git a/scripts/Tools/component_compare_copy b/scripts/Tools/component_compare_copy index 209358ff6047..985b6f8137d3 100755 --- a/scripts/Tools/component_compare_copy +++ b/scripts/Tools/component_compare_copy @@ -36,9 +36,7 @@ OR parser.add_argument("caseroot", nargs="?", default=os.getcwd(), help="Case directory") - args = parser.parse_args(args[1:]) - - CIME.utils.handle_standard_logging_options(args) + args = CIME.utils.handle_standard_logging_options(args, parser) return args.suffix, args.caseroot diff --git a/scripts/Tools/component_compare_test b/scripts/Tools/component_compare_test index aae11911e49c..b186ad3b63fe 100755 --- a/scripts/Tools/component_compare_test +++ b/scripts/Tools/component_compare_test @@ -38,9 +38,7 @@ OR parser.add_argument("caseroot", nargs="?", default=os.getcwd(), help="Case directory") - args = parser.parse_args(args[1:]) - - CIME.utils.handle_standard_logging_options(args) + args = CIME.utils.handle_standard_logging_options(args, parser) return args.suffix1, args.suffix2, args.caseroot diff --git a/scripts/Tools/component_generate_baseline b/scripts/Tools/component_generate_baseline index 0dcca7681c8b..b8a59f7f3a41 100755 --- a/scripts/Tools/component_generate_baseline +++ b/scripts/Tools/component_generate_baseline @@ -40,9 +40,7 @@ OR "will raise an error. Specifying this option allows " "existing baseline directories to be silently overwritten.") - args = parser.parse_args(args[1:]) - - CIME.utils.handle_standard_logging_options(args) + args = CIME.utils.handle_standard_logging_options(args, parser) return args.caseroot, args.baseline_dir, args.allow_baseline_overwrite diff --git a/scripts/Tools/getTiming b/scripts/Tools/getTiming index a2ba123ec3b4..5694f7f0070f 100755 --- a/scripts/Tools/getTiming +++ b/scripts/Tools/getTiming @@ -21,8 +21,8 @@ def parse_command_line(args, description): default="999999-999999") parser.add_argument("--caseroot", default=os.getcwd(), help="Case directory to get timing for") - args = parser.parse_args(args[1:]) - CIME.utils.handle_standard_logging_options(args) + + args = CIME.utils.handle_standard_logging_options(args, parser) return args.caseroot, args.lid def __main_func(description): diff --git a/scripts/Tools/jenkins_generic_job b/scripts/Tools/jenkins_generic_job index 5e4ae54369d2..c28a03eb85c3 100755 --- a/scripts/Tools/jenkins_generic_job +++ b/scripts/Tools/jenkins_generic_job @@ -84,9 +84,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter help="Number of tasks create_test should perform simultaneously. Default " "will be min(num_cores, num_tests).") - args = parser.parse_args(args[1:]) - - CIME.utils.handle_standard_logging_options(args) + args = CIME.utils.handle_standard_logging_options(args, parser) expect(not (args.submit_to_cdash and args.generate_baselines), "Does not make sense to use --generate-baselines and --submit-to-cdash together") diff --git a/scripts/Tools/list_acme_tests b/scripts/Tools/list_acme_tests index 915f0a380e1b..2a992a6a9ee3 100755 --- a/scripts/Tools/list_acme_tests +++ b/scripts/Tools/list_acme_tests @@ -43,9 +43,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("categories", nargs="*", help="The test categories to list. Default will list all. Test categories: %s" % (", ".join(update_acme_tests.get_test_suites()))) - args = parser.parse_args(args[1:]) - - CIME.utils.handle_standard_logging_options(args) + args = CIME.utils.handle_standard_logging_options(args, parser) if (not args.categories): args.categories = update_acme_tests.get_test_suites() diff --git a/scripts/Tools/normalize_cases b/scripts/Tools/normalize_cases index 1eca9367d5df..fdad9ae5af34 100755 --- a/scripts/Tools/normalize_cases +++ b/scripts/Tools/normalize_cases @@ -38,9 +38,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("case2", help="Second case. This one will not be changed") - args = parser.parse_args(args[1:]) - - CIME.utils.handle_standard_logging_options(args) + args = CIME.utils.handle_standard_logging_options(args, parser) return args.case1, args.case2 diff --git a/scripts/Tools/pelayout b/scripts/Tools/pelayout index 64848d3a64e6..cd3be9bf48ed 100755 --- a/scripts/Tools/pelayout +++ b/scripts/Tools/pelayout @@ -68,9 +68,7 @@ def parse_command_line(args): parser.add_argument("-caseroot" , "--caseroot", default=os.getcwd(), help="Case directory to reference") - args = parser.parse_args(args[1:]) - - CIME.utils.handle_standard_logging_options(args) + args = CIME.utils.handle_standard_logging_options(args, parser) if (args.no_header): args.header = None diff --git a/scripts/Tools/preview_namelists b/scripts/Tools/preview_namelists index 2d3f8efcbf01..a86e662b75a9 100755 --- a/scripts/Tools/preview_namelists +++ b/scripts/Tools/preview_namelists @@ -27,9 +27,7 @@ def parse_command_line(args, description): parser.add_argument('--test', action='store_true', help="Run preview_namelist in test mode.") - args = parser.parse_args() - - CIME.utils.handle_standard_logging_options(args) + args = CIME.utils.handle_standard_logging_options(args, parser) return args diff --git a/scripts/Tools/save_provenance b/scripts/Tools/save_provenance index 885c55a4eb6e..d12d387479b4 100755 --- a/scripts/Tools/save_provenance +++ b/scripts/Tools/save_provenance @@ -42,9 +42,7 @@ OR parser.add_argument("-l", "--lid", help="Force system to save provenance with this LID") - args = parser.parse_args(args[1:]) - - CIME.utils.handle_standard_logging_options(args) + args = CIME.utils.handle_standard_logging_options(args, parser) return args.mode, args.caseroot, args.lid diff --git a/scripts/Tools/simple_compare b/scripts/Tools/simple_compare index 47b465c14fa5..885067505406 100755 --- a/scripts/Tools/simple_compare +++ b/scripts/Tools/simple_compare @@ -40,9 +40,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("-c", "--case", action="store", dest="case", default=None, help="The case base id (..). Helps us normalize data.") - args = parser.parse_args(args[1:]) - - CIME.utils.handle_standard_logging_options(args) + args = CIME.utils.handle_standard_logging_options(args, parser) # Normalize case if (args.case is not None): diff --git a/scripts/Tools/update_acme_tests b/scripts/Tools/update_acme_tests index f5aeb4a0207c..7f7d24759b9c 100755 --- a/scripts/Tools/update_acme_tests +++ b/scripts/Tools/update_acme_tests @@ -48,9 +48,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("-p", "--platform", help="Only add tests for a specific platform, format=machine,compiler. Useful for adding new platforms.") - args = parser.parse_args(args[1:]) - - CIME.utils.handle_standard_logging_options(args) + args = CIME.utils.handle_standard_logging_options(args, parser) expect(os.path.isfile(args.test_list_path), "'%s' is not a valid file" % args.test_list_path) diff --git a/scripts/Tools/wait_for_tests b/scripts/Tools/wait_for_tests index 3e1b88d343f5..116d76a2b911 100755 --- a/scripts/Tools/wait_for_tests +++ b/scripts/Tools/wait_for_tests @@ -66,9 +66,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("-g", "--cdash-build-group", default=CIME.wait_for_tests.CDASH_DEFAULT_BUILD_GROUP, help="The build group to be used to display results on the CDash dashboard.") - args = parser.parse_args(args[1:]) - - CIME.utils.handle_standard_logging_options(args) + args = CIME.utils.handle_standard_logging_options(args, parser) return args.paths, args.no_wait, args.check_throughput, args.check_memory, args.ignore_namelist_diffs, args.ignore_memleak, args.cdash_build_name, args.cdash_project, args.cdash_build_group diff --git a/scripts/Tools/xmlchange b/scripts/Tools/xmlchange index a8686fac4a0c..8c15b547b3d0 100755 --- a/scripts/Tools/xmlchange +++ b/scripts/Tools/xmlchange @@ -78,9 +78,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("-f","--force", action="store_true", help="ignore typing checks and store value") - args = parser.parse_args(args[1:]) - - CIME.utils.handle_standard_logging_options(args) + args = CIME.utils.handle_standard_logging_options(args, parser) listofsettings = [] if( len(args.listofsettings )): diff --git a/scripts/Tools/xmlquery b/scripts/Tools/xmlquery index 95bb0996fc70..380dc3c90a15 100755 --- a/scripts/Tools/xmlquery +++ b/scripts/Tools/xmlquery @@ -105,9 +105,7 @@ epilog=textwrap.dedent(__doc__)) group.add_argument("--valid-values", default=False, action="store_true", help="Print the valid values associated with this variable if defined") - args = parser.parse_args(args[1:]) - - CIME.utils.handle_standard_logging_options(args) + args = CIME.utils.handle_standard_logging_options(args, parser) if (len(sys.argv) == 1) : parser.print_help() diff --git a/scripts/create_clone b/scripts/create_clone index 968848a7a425..e8ca78ed4d46 100755 --- a/scripts/create_clone +++ b/scripts/create_clone @@ -40,9 +40,7 @@ def parse_command_line(args): help="Specify the root output directory" "default: setting in case, create_clone will fail if this directory is not writable") - args = parser.parse_args() - - CIME.utils.handle_standard_logging_options(args) + args = CIME.utils.handle_standard_logging_options(args, parser) if args.case is None: expect(False, diff --git a/scripts/create_newcase b/scripts/create_newcase index 1524e76d958a..d33ace410ab5 100755 --- a/scripts/create_newcase +++ b/scripts/create_newcase @@ -121,9 +121,7 @@ OR parser.add_argument("-i", "--input-dir", help="Use a non-default location for input files") - args = parser.parse_args() - - CIME.utils.handle_standard_logging_options(args) + args = CIME.utils.handle_standard_logging_options(args, parser) if args.srcroot is not None: expect(os.path.isdir(args.srcroot), diff --git a/scripts/create_test b/scripts/create_test index 3ed88253cceb..fc491892306e 100755 --- a/scripts/create_test +++ b/scripts/create_test @@ -226,9 +226,7 @@ OR parser.add_argument("-i", "--input-dir", help="Use a non-default location for input files") - args = parser.parse_args(args[1:]) - - CIME.utils.handle_standard_logging_options(args) + args = CIME.utils.handle_standard_logging_options(args, parser) # generate and compare flags may not point to the same directory if model == "cesm": diff --git a/scripts/lib/CIME/XML/env_mach_specific.py b/scripts/lib/CIME/XML/env_mach_specific.py index 8e3ef188c71e..e5d80ee4bdbb 100644 --- a/scripts/lib/CIME/XML/env_mach_specific.py +++ b/scripts/lib/CIME/XML/env_mach_specific.py @@ -336,7 +336,7 @@ def get_mpirun(self, case, attribs, check_members=None, job="case.run", exe_only best_num_matched = -1 default_match = None best_num_matched_default = -1 - args = {} + args = [] for mpirun_node in mpirun_nodes: xml_attribs = mpirun_node.attrib all_match = True @@ -388,7 +388,7 @@ def get_mpirun(self, case, attribs, check_members=None, job="case.run", exe_only subgroup=job, check_members=check_members, default=arg_node.get("default")) - args[arg_node.get("name")] = arg_value + args.append(arg_value) exec_node = self.get_node("executable", root=the_match) expect(exec_node is not None,"No executable found") diff --git a/scripts/lib/CIME/case.py b/scripts/lib/CIME/case.py index 9e143418fd55..f28f707743df 100644 --- a/scripts/lib/CIME/case.py +++ b/scripts/lib/CIME/case.py @@ -1129,7 +1129,7 @@ def get_mpirun_cmd(self, job="case.run"): "unit_testing" : False } - executable, args = env_mach_specific.get_mpirun(self, mpi_attribs, job=job) + executable, mpi_arg_string = env_mach_specific.get_mpirun(self, mpi_attribs, job=job) # special case for aprun if executable is not None and "aprun" in executable: @@ -1140,10 +1140,10 @@ def get_mpirun_cmd(self, job="case.run"): else: mpi_arg_string = " ".join(args.values()) - if self.get_value("BATCH_SYSTEM") == "cobalt": - mpi_arg_string += " : " + if self.get_value("BATCH_SYSTEM") == "cobalt": + mpi_arg_string += " : " - return "%s %s %s" % (executable if executable is not None else "", mpi_arg_string, run_suffix) + return "%s %s %s" % (executable if executable is not None else "", mpi_arg_string, run_suffix) def set_model_version(self, model): version = "unknown" diff --git a/scripts/lib/CIME/utils.py b/scripts/lib/CIME/utils.py index 247b04c6f616..a4b2b6c8b922 100644 --- a/scripts/lib/CIME/utils.py +++ b/scripts/lib/CIME/utils.py @@ -637,7 +637,7 @@ def filter(self, record): #non-zero return means we log this message return 1 if record.levelno < self.max_level else 0 -def handle_standard_logging_options(args): +def handle_standard_logging_options(args, parser): """ Guide to logging in CIME. @@ -660,6 +660,11 @@ def handle_standard_logging_options(args): stderr_stream_handler = logging.StreamHandler(stream=sys.stderr) stderr_stream_handler.setLevel(logging.WARNING) + # scripts_regression_tests is the only thing that should pass a None argument in parser + if parser is not None: + _check_for_invalid_args(args[1:]) + args = parser.parse_args(args[1:]) + # --verbose adds to the message format but does not impact the log level if args.verbose: stdout_stream_handler.setFormatter(verbose_formatter) @@ -682,6 +687,8 @@ def handle_standard_logging_options(args): root_logger.setLevel(logging.WARN) else: root_logger.setLevel(logging.INFO) + return args + def get_logging_options(): """ @@ -1229,6 +1236,16 @@ def run_and_log_case_status(func, phase, caseroot='.'): return rv +def _check_for_invalid_args(args): + for arg in args: + if arg.startswith("--"): + continue + if arg.startswith("-") and len(arg) > 2: + expect(False, "Invalid argument %s\n Arguments should begin with -- or be single character (-s)\n Use --help for a complete list of available options"%arg) + + + + class SharedArea(object): """ Enable 0002 umask within this manager diff --git a/scripts/manage_case b/scripts/manage_case index c561d6cede48..dc03e50bac1e 100755 --- a/scripts/manage_case +++ b/scripts/manage_case @@ -140,9 +140,7 @@ def parse_command_line(args): parser.add_argument("--long", action="store_true", help="Provide long output for queries") - args = parser.parse_args() - - CIME.utils.handle_standard_logging_options(args) + args = CIME.utils.handle_standard_logging_options(args, parser) return args diff --git a/scripts/manage_pes b/scripts/manage_pes index 6258b76e6d6c..374e33f088d8 100755 --- a/scripts/manage_pes +++ b/scripts/manage_pes @@ -98,8 +98,8 @@ def parse_command_line(args, description): help=queryhelp) parser.add_argument("-machine", "--machine", default=None, help="can be a supported machine name") - args = parser.parse_args(args[1:]) - CIME.utils.handle_standard_logging_options(args) + + args = CIME.utils.handle_standard_logging_options(args, parser) CIME.utils.expect(args.add or args.query, "Either --query or --add must be on command line") diff --git a/scripts/query_testlists b/scripts/query_testlists index 12d3fc0c3eb8..882348e44f01 100755 --- a/scripts/query_testlists +++ b/scripts/query_testlists @@ -45,9 +45,7 @@ def parse_command_line(description): parser.add_argument("--xml-testlist", help="Use this testlist to lookup tests, default specified in config_files.xml") - args = parser.parse_args() - - CIME.utils.handle_standard_logging_options(args) + args = CIME.utils.handle_standard_logging_options(args, parser) expect(not(args.count and args.list_type), "Cannot specify both --count and --list arguments.") diff --git a/scripts/tests/scripts_regression_tests.py b/scripts/tests/scripts_regression_tests.py index 7dac780e46d2..a315a7fffe47 100755 --- a/scripts/tests/scripts_regression_tests.py +++ b/scripts/tests/scripts_regression_tests.py @@ -2217,7 +2217,7 @@ def _main_func(): else: setattr(args, log_param, False) - CIME.utils.handle_standard_logging_options(args) + args = CIME.utils.handle_standard_logging_options(args, None) write_provenance_info() From cfe46bd45b21a66cad6a71e912842badb1da5d99 Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Thu, 20 Apr 2017 13:59:49 -0600 Subject: [PATCH 186/219] passing scripts_regression_tests --- scripts/lib/CIME/buildlib.py | 4 +--- scripts/lib/CIME/case.py | 4 ++-- scripts/lib/CIME/utils.py | 5 ++++- scripts/query_testlists | 2 +- src/build_scripts/buildlib.gptl | 4 ++-- src/build_scripts/buildlib.pio | 6 ++---- 6 files changed, 12 insertions(+), 13 deletions(-) diff --git a/scripts/lib/CIME/buildlib.py b/scripts/lib/CIME/buildlib.py index 33b4ffce9597..0ebd4e2cc46a 100644 --- a/scripts/lib/CIME/buildlib.py +++ b/scripts/lib/CIME/buildlib.py @@ -30,9 +30,7 @@ def parse_input(argv): parser.add_argument("bldroot", help="root for building library") - args = parser.parse_args() - - handle_standard_logging_options(args) + args = handle_standard_logging_options(argv, parser) return args.caseroot, args.libroot, args.bldroot diff --git a/scripts/lib/CIME/case.py b/scripts/lib/CIME/case.py index f28f707743df..204343f89306 100644 --- a/scripts/lib/CIME/case.py +++ b/scripts/lib/CIME/case.py @@ -1132,8 +1132,8 @@ def get_mpirun_cmd(self, job="case.run"): executable, mpi_arg_string = env_mach_specific.get_mpirun(self, mpi_attribs, job=job) # special case for aprun - if executable is not None and "aprun" in executable: - aprun_args, num_nodes = get_aprun_cmd_for_case(self, run_exe) + if executable is not None and "aprun" in executable: + aprun_cmd, num_nodes = get_aprun_cmd_for_case(self, run_exe) expect(num_nodes == self.num_nodes, "Not using optimized num nodes") return executable + aprun_args + " " + run_misc_suffix diff --git a/scripts/lib/CIME/utils.py b/scripts/lib/CIME/utils.py index a4b2b6c8b922..c5e094d02948 100644 --- a/scripts/lib/CIME/utils.py +++ b/scripts/lib/CIME/utils.py @@ -1241,7 +1241,10 @@ def _check_for_invalid_args(args): if arg.startswith("--"): continue if arg.startswith("-") and len(arg) > 2: - expect(False, "Invalid argument %s\n Arguments should begin with -- or be single character (-s)\n Use --help for a complete list of available options"%arg) + if arg == "-value": + logger.warn("This argument is deprecated, please use -%s"%arg) + else: + expect(False, "Invalid argument %s\n Multi-character arguments should begin with \"--\" and single character with \"-\"\n Use --help for a complete list of available options"%arg) diff --git a/scripts/query_testlists b/scripts/query_testlists index 882348e44f01..4db0cb6bbae6 100755 --- a/scripts/query_testlists +++ b/scripts/query_testlists @@ -45,7 +45,7 @@ def parse_command_line(description): parser.add_argument("--xml-testlist", help="Use this testlist to lookup tests, default specified in config_files.xml") - args = CIME.utils.handle_standard_logging_options(args, parser) + args = CIME.utils.handle_standard_logging_options(description, parser) expect(not(args.count and args.list_type), "Cannot specify both --count and --list arguments.") diff --git a/src/build_scripts/buildlib.gptl b/src/build_scripts/buildlib.gptl index 2a5fea6f8528..b92c788d396d 100755 --- a/src/build_scripts/buildlib.gptl +++ b/src/build_scripts/buildlib.gptl @@ -6,8 +6,8 @@ cd $CASEROOT # CASEROOT is always assumed to be an environment variable -set CIMEROOT = `./xmlquery CIMEROOT -value ` -set GMAKE = `./xmlquery GMAKE -value ` +set CIMEROOT = `./xmlquery CIMEROOT -value ` +set GMAKE = `./xmlquery GMAKE --value ` # NOTE- (mv, 2015-01-02) SHAREDPATH is an environment variable set in # the $CASE.build script diff --git a/src/build_scripts/buildlib.pio b/src/build_scripts/buildlib.pio index cb4d29e40bf7..3a37c05b54ef 100755 --- a/src/build_scripts/buildlib.pio +++ b/src/build_scripts/buildlib.pio @@ -38,9 +38,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("caseroot", nargs="?", default=os.getcwd(), help="Case directory to build") - args = parser.parse_args(args[1:]) - - CIME.utils.handle_standard_logging_options(args) + args = CIME.utils.handle_standard_logging_options(args, parser) return args.buildroot, args.installpath, args.caseroot @@ -144,7 +142,7 @@ def _main_func(description): case.set_valid_values("PIO_TYPENAME",valid_values) # nothing means use the general default valid_values += ",nothing" - + for comp in case.get_values("COMP_CLASSES"): comp_pio_typename = "%s_PIO_TYPENAME"%comp case.set_valid_values(comp_pio_typename,valid_values) From 28f2d4b7a78006c33102751739769cc9deec6b93 Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Fri, 21 Apr 2017 08:14:52 -0600 Subject: [PATCH 187/219] remove white space --- scripts/lib/CIME/utils.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/scripts/lib/CIME/utils.py b/scripts/lib/CIME/utils.py index c5e094d02948..6725161591fc 100644 --- a/scripts/lib/CIME/utils.py +++ b/scripts/lib/CIME/utils.py @@ -1246,9 +1246,6 @@ def _check_for_invalid_args(args): else: expect(False, "Invalid argument %s\n Multi-character arguments should begin with \"--\" and single character with \"-\"\n Use --help for a complete list of available options"%arg) - - - class SharedArea(object): """ Enable 0002 umask within this manager From 42df128273e6bbf0f387d70c0163b70056a3607b Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Fri, 21 Apr 2017 09:30:56 -0600 Subject: [PATCH 188/219] update template files --- config/cesm/machines/template.case.run | 4 +--- config/cesm/machines/template.case.test | 4 +--- config/cesm/machines/template.lt_archive | 4 +--- config/cesm/machines/template.st_archive | 4 +--- 4 files changed, 4 insertions(+), 12 deletions(-) diff --git a/config/cesm/machines/template.case.run b/config/cesm/machines/template.case.run index 24162fe45951..fa89acc59b1d 100755 --- a/config/cesm/machines/template.case.run +++ b/config/cesm/machines/template.case.run @@ -52,9 +52,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("--caseroot", help="Case directory to build") - args = parser.parse_args() - - CIME.utils.handle_standard_logging_options(args) + args = CIME.utils.handle_standard_logging_options(args, parser) if args.caseroot is not None: os.chdir(args.caseroot) diff --git a/config/cesm/machines/template.case.test b/config/cesm/machines/template.case.test index e918de6ec21f..f508dc754bf3 100755 --- a/config/cesm/machines/template.case.test +++ b/config/cesm/machines/template.case.test @@ -46,9 +46,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("--caseroot", help="Case directory to build") - args = parser.parse_args(args[1:]) - - CIME.utils.handle_standard_logging_options(args) + args = CIME.utils.handle_standard_logging_options(args, parser) if args.caseroot is not None: os.chdir(args.caseroot) diff --git a/config/cesm/machines/template.lt_archive b/config/cesm/machines/template.lt_archive index 1dd9de1a6bc0..c44ccacb4da1 100755 --- a/config/cesm/machines/template.lt_archive +++ b/config/cesm/machines/template.lt_archive @@ -47,9 +47,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("--caseroot", default=os.getcwd(), help="Case directory to build") - args = parser.parse_args() - - CIME.utils.handle_standard_logging_options(args) + args = CIME.utils.handle_standard_logging_options(args, parser) if args.caseroot is not None: os.chdir(args.caseroot) diff --git a/config/cesm/machines/template.st_archive b/config/cesm/machines/template.st_archive index 0a1d935d5b0b..93c36d0c6f1e 100755 --- a/config/cesm/machines/template.st_archive +++ b/config/cesm/machines/template.st_archive @@ -47,9 +47,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("--caseroot", default=os.getcwd(), help="Case directory to build") - args = parser.parse_args() - - CIME.utils.handle_standard_logging_options(args) + args = CIME.utils.handle_standard_logging_options(args, parser) if args.caseroot is not None: os.chdir(args.caseroot) From 6027d324dd6c5df58a56cfbd93f5996afb5f2082 Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Fri, 21 Apr 2017 09:45:15 -0600 Subject: [PATCH 189/219] fix mpirun string issue --- scripts/lib/CIME/case.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/lib/CIME/case.py b/scripts/lib/CIME/case.py index 204343f89306..328dd8e439b3 100644 --- a/scripts/lib/CIME/case.py +++ b/scripts/lib/CIME/case.py @@ -1129,7 +1129,7 @@ def get_mpirun_cmd(self, job="case.run"): "unit_testing" : False } - executable, mpi_arg_string = env_mach_specific.get_mpirun(self, mpi_attribs, job=job) + executable, mpi_arg_list = env_mach_specific.get_mpirun(self, mpi_attribs, job=job) # special case for aprun if executable is not None and "aprun" in executable: @@ -1138,7 +1138,7 @@ def get_mpirun_cmd(self, job="case.run"): return executable + aprun_args + " " + run_misc_suffix else: - mpi_arg_string = " ".join(args.values()) + mpi_arg_string = " ".join(mpi_arg_list) if self.get_value("BATCH_SYSTEM") == "cobalt": mpi_arg_string += " : " From 887ca67b1fc79911838d3d52b166d9a9c399cfd9 Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Fri, 21 Apr 2017 11:55:26 -0600 Subject: [PATCH 190/219] update run_tests.py --- scripts/fortran_unit_testing/run_tests.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/fortran_unit_testing/run_tests.py b/scripts/fortran_unit_testing/run_tests.py index 715a98f7fdec..3cffb4cd18d8 100755 --- a/scripts/fortran_unit_testing/run_tests.py +++ b/scripts/fortran_unit_testing/run_tests.py @@ -127,8 +127,8 @@ def parse_command_line(args): "--xml-test-list", help="""Path to an XML file listing directories to run tests from.""" ) - args = parser.parse_args() - CIME.utils.handle_standard_logging_options(args) + + args = CIME.utils.handle_standard_logging_options(args, parser) output = Printer(color=args.color) if args.xml_test_list is None and args.test_spec_dir is None: From b4202626d2243b416325021c3f41c7af1658c959 Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Fri, 21 Apr 2017 12:31:59 -0600 Subject: [PATCH 191/219] fix query_testlist --- scripts/lib/CIME/case.py | 2 +- scripts/query_testlists | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/scripts/lib/CIME/case.py b/scripts/lib/CIME/case.py index 328dd8e439b3..8fc3d2b26a69 100644 --- a/scripts/lib/CIME/case.py +++ b/scripts/lib/CIME/case.py @@ -1133,7 +1133,7 @@ def get_mpirun_cmd(self, job="case.run"): # special case for aprun if executable is not None and "aprun" in executable: - aprun_cmd, num_nodes = get_aprun_cmd_for_case(self, run_exe) + aprun_args, num_nodes = get_aprun_cmd_for_case(self, run_exe) expect(num_nodes == self.num_nodes, "Not using optimized num nodes") return executable + aprun_args + " " + run_misc_suffix diff --git a/scripts/query_testlists b/scripts/query_testlists index 4db0cb6bbae6..a03c50da9466 100755 --- a/scripts/query_testlists +++ b/scripts/query_testlists @@ -11,7 +11,7 @@ from CIME.utils import expect logger = logging.getLogger(__name__) ############################################################################### -def parse_command_line(description): +def parse_command_line(args, description): ############################################################################### parser = argparse.ArgumentParser( description=description) @@ -45,7 +45,7 @@ def parse_command_line(description): parser.add_argument("--xml-testlist", help="Use this testlist to lookup tests, default specified in config_files.xml") - args = CIME.utils.handle_standard_logging_options(description, parser) + args = CIME.utils.handle_standard_logging_options(args, parser) expect(not(args.count and args.list_type), "Cannot specify both --count and --list arguments.") @@ -147,7 +147,7 @@ def list_test_data(test_data, list_type): ############################################################################### def _main_func(description): ############################################################################### - args = parse_command_line(description) + args = parse_command_line(sys.argv, description) test_data = get_tests_from_xml( xml_machine = args.xml_machine, From 971938809c5f3aaaf073cd70fb8831aeb016c43f Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Fri, 21 Apr 2017 13:19:56 -0600 Subject: [PATCH 192/219] fix buildnml sub --- scripts/lib/CIME/buildnml.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/scripts/lib/CIME/buildnml.py b/scripts/lib/CIME/buildnml.py index 34f863155b67..11445d0a0bf5 100644 --- a/scripts/lib/CIME/buildnml.py +++ b/scripts/lib/CIME/buildnml.py @@ -25,9 +25,7 @@ def parse_input(argv): parser.add_argument("caseroot", default=os.getcwd(), help="Case directory") - args = parser.parse_args() - - handle_standard_logging_options(args) + args = handle_standard_logging_options(argv, parser) return args.caseroot From d452bf18d78781efb1ac007986e084fd5a00ff15 Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Mon, 24 Apr 2017 10:34:30 -0600 Subject: [PATCH 193/219] fix bug in test_scheduler, allow old handler call format --- scripts/lib/CIME/utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/lib/CIME/utils.py b/scripts/lib/CIME/utils.py index 6725161591fc..fff888b8e201 100644 --- a/scripts/lib/CIME/utils.py +++ b/scripts/lib/CIME/utils.py @@ -637,7 +637,7 @@ def filter(self, record): #non-zero return means we log this message return 1 if record.levelno < self.max_level else 0 -def handle_standard_logging_options(args, parser): +def handle_standard_logging_options(args, parser=None): """ Guide to logging in CIME. @@ -1241,8 +1241,8 @@ def _check_for_invalid_args(args): if arg.startswith("--"): continue if arg.startswith("-") and len(arg) > 2: - if arg == "-value": - logger.warn("This argument is deprecated, please use -%s"%arg) + if arg == "-value" or arg == "-noecho": + logger.warn("This argument is depricated, please use -%s"%arg) else: expect(False, "Invalid argument %s\n Multi-character arguments should begin with \"--\" and single character with \"-\"\n Use --help for a complete list of available options"%arg) From a777895c0bbf33ebf22ee104b14f694902bb461a Mon Sep 17 00:00:00 2001 From: Mariana Vertenstein Date: Sat, 22 Apr 2017 17:35:22 -0600 Subject: [PATCH 194/219] first implementation of cam aquaplanet in docn --- .../data_comps/docn/cime_config/buildnml | 5 + .../docn/cime_config/config_component.xml | 16 +- .../cime_config/namelist_definition_docn.xml | 12 +- .../data_comps/docn/docn_comp_mod.F90 | 248 +++++++++++++++++- src/share/util/shr_strdata_mod.F90 | 18 +- 5 files changed, 276 insertions(+), 23 deletions(-) diff --git a/src/components/data_comps/docn/cime_config/buildnml b/src/components/data_comps/docn/cime_config/buildnml index 2afb78283e54..64edb9943a82 100755 --- a/src/components/data_comps/docn/cime_config/buildnml +++ b/src/components/data_comps/docn/cime_config/buildnml @@ -108,6 +108,11 @@ def _create_namelists(case, confdir, inst_string, infile, nmlgen): else: nmlgen.create_stream_file_and_update_shr_strdata_nml(config, stream, stream_path, data_list_path) + # For aquaplanet prescribed have no streams + if docn_mode == 'pres_aquap': + value = ['null'] + nmlgen.set_value("streams",value) + #---------------------------------------------------- # Create `shr_strdata_nml` namelist group. #---------------------------------------------------- diff --git a/src/components/data_comps/docn/cime_config/config_component.xml b/src/components/data_comps/docn/cime_config/config_component.xml index fda915c9241d..32183c29b272 100644 --- a/src/components/data_comps/docn/cime_config/config_component.xml +++ b/src/components/data_comps/docn/cime_config/config_component.xml @@ -15,7 +15,7 @@ char - prescribed,pres_aquap,som,som_aquap,copyall,interannual,null + prescribed,aquap1,aquap2,aquap3,aquap4,aquap5,aquap6,aquap7,aquap8,aquap9,aquap10,som,som_aquap,copyall,interannual,null prescribed null @@ -24,9 +24,16 @@ us20 interannual copyall - pres_aquap - som_aquap - ww3 + aquap1 + aquap2 + aquap3 + aquap4 + aquap5 + aquap6 + aquap7 + aquap8 + aquap9 + aquap10 run_component_docn env_run.xml @@ -215,6 +222,7 @@ docn slab ocean mode: docn data mode: docn interannual mode: + docn aquaplanet mode: diff --git a/src/components/data_comps/docn/cime_config/namelist_definition_docn.xml b/src/components/data_comps/docn/cime_config/namelist_definition_docn.xml index 26ac6b980401..a682df12e4f1 100644 --- a/src/components/data_comps/docn/cime_config/namelist_definition_docn.xml +++ b/src/components/data_comps/docn/cime_config/namelist_definition_docn.xml @@ -45,10 +45,10 @@ streams_file List of streams used for the given docn_mode. - prescribed - prescribed - som - som + prescribed + '' + som + som interannual @@ -245,7 +245,7 @@ char streams shr_strdata_nml - SSTDATA,SOM,IAF,NULL,COPYALL + SSTDATA,AQUAP1,SOM,IAF,NULL,COPYALL General method that operates on the data. This is generally implemented in the data models but is set in the strdata method for @@ -299,7 +299,7 @@ NULL SSTDATA - SSTDATA + AQUAP1 SOM SOM IAF diff --git a/src/components/data_comps/docn/docn_comp_mod.F90 b/src/components/data_comps/docn/docn_comp_mod.F90 index 6a2bf18f4a00..b1b1b3486918 100644 --- a/src/components/data_comps/docn/docn_comp_mod.F90 +++ b/src/components/data_comps/docn/docn_comp_mod.F90 @@ -81,11 +81,14 @@ module docn_comp_mod integer(IN) :: kh,kqbot type(shr_strdata_type) :: SDOCN - type(mct_rearr) :: rearr - type(mct_avect) :: avstrm ! av of data from stream - real(R8), pointer :: somtp(:) - real(R8), pointer :: tfreeze(:) - integer , pointer :: imask(:) + type(mct_rearr) :: rearr + type(mct_avect) :: avstrm ! av of data from stream + real(R8), pointer :: somtp(:) + real(R8), pointer :: tfreeze(:) + integer , pointer :: imask(:) + real(R8), pointer :: xc(:), yc(:) ! arryas of model latitudes and longitudes + integer(IN) :: aquap_option + character(len=*),parameter :: flds_strm = 'strm_h:strm_qbot' integer(IN),parameter :: ktrans = 29 @@ -144,6 +147,8 @@ subroutine docn_comp_init( EClock, cdata, x2o, o2x, NLFilename ) integer(IN) :: shrlogunit, shrloglev ! original log unit and level integer(IN) :: nunit ! unit number integer(IN) :: kmask ! field reference + integer(IN) :: klat ! field reference + integer(IN) :: klon ! field reference logical :: ocn_present ! flag logical :: ocn_prognostic ! flag logical :: ocnrof_prognostic ! flag @@ -292,12 +297,22 @@ subroutine docn_comp_init( EClock, cdata, x2o, o2x, NLFilename ) ocn_mode = trim(SDOCN%dataMode) + if (ocn_mode(1:5) == 'AQUAP') then + if (len_trim(ocn_mode) == 6) then + read(ocn_mode(6:6),'(i)') aquap_option + else if (len_trim(ocn_mode) == 7) then + read(ocn_mode(6:7),'(i)') aquap_option + end if + ocn_mode = 'AQUAP' + end if + ! check that we know how to handle the mode - if (trim(ocn_mode) == 'NULL' .or. & + if (trim(ocn_mode) == 'NULL' .or. & trim(ocn_mode) == 'SSTDATA' .or. & trim(ocn_mode) == 'COPYALL' .or. & - trim(ocn_mode) == 'IAF' .or. & + trim(ocn_mode) == 'AQUAP' .or. & + trim(ocn_mode) == 'IAF' .or. & trim(ocn_mode) == 'SOM') then if (my_task == master_task) & write(logunit,F00) ' ocn mode = ',trim(ocn_mode) @@ -426,10 +441,18 @@ subroutine docn_comp_init( EClock, cdata, x2o, o2x, NLFilename ) allocate(somtp(lsize)) allocate(tfreeze(lsize)) allocate(imask(lsize)) + allocate(xc(lsize)) + allocate(yc(lsize)) kmask = mct_aVect_indexRA(ggrid%data,'mask') imask(:) = nint(ggrid%data%rAttr(kmask,:)) + klon = mct_aVect_indexRA(ggrid%data,'lon') + xc(:) = ggrid%data%rAttr(klon,:) + + klat = mct_aVect_indexRA(ggrid%data,'lat') + yc(:) = ggrid%data%rAttr(klat,:) + call t_stopf('docn_initmctavs') !---------------------------------------------------------------------------- @@ -639,6 +662,8 @@ subroutine docn_comp_run( EClock, cdata, x2o, o2x) call t_startf('docn_mode') + write(6,*)'DEBUG: case is ',trim(ocn_mode) + select case (trim(ocn_mode)) case('COPYALL') @@ -657,6 +682,13 @@ subroutine docn_comp_run( EClock, cdata, x2o, o2x) o2x%rAttr(kswp ,n) = swp enddo + case('AQUAP') + lsize = mct_avect_lsize(o2x) + call prescribed_sst(xc, yc, lsize, aquap_option, o2x%rAttr(kt,:)) + do n = 1,lsize + o2x%rAttr(kt,n) = o2x%rAttr(kt,n) + enddo + case('IAF') lsize = mct_avect_lsize(o2x) do n = 1,lsize @@ -800,8 +832,210 @@ subroutine docn_comp_final() call t_stopf('DOCN_FINAL') end subroutine docn_comp_final + !=============================================================================== !=============================================================================== +subroutine prescribed_sst(rlat, rlon, lsize, sst_option, sst) + + real(R8) , intent(in) :: rlat(:) + real(R8) , intent(in) :: rlon(:) + integer(IN) , intent(in) :: lsize + integer(IN) , intent(in) :: sst_option + real(R8) , intent(inout) :: sst(:) + + ! local + integer :: i + real(r8) :: tmp, tmp1, pi + + real(r8), parameter :: pio180 = SHR_CONST_PI/180._r8 + + ! Parameters for zonally symmetric experiments + real(r8), parameter :: t0_max = 27._r8 + real(r8), parameter :: t0_min = 0._r8 + real(r8), parameter :: maxlat = 60._r8*pio180 + real(r8), parameter :: shift = 5._r8*pio180 + real(r8), parameter :: shift9 = 10._r8*pio180 + real(r8), parameter :: shift10 = 15._r8*pio180 + + ! Parameters for zonally asymmetric experiments + real(r8), parameter :: t0_max6 = 1._r8 + real(r8), parameter :: t0_max7 = 3._r8 + real(r8), parameter :: latcen = 0._r8*pio180 + real(r8), parameter :: loncen = 0._r8*pio180 + real(r8), parameter :: latrad6 = 15._r8*pio180 + real(r8), parameter :: latrad8 = 30._r8*pio180 + real(r8), parameter :: lonrad = 30._r8*pio180 + !------------------------------------------------------------------------------- + + pi = SHR_CONST_PI + + ! Control + + if (sst_option < 1 .or. sst_option > 10) then + call shr_sys_abort ('prescribed_sst: ERROR: sst_option must be between 1 and 10') + end if + + write(6,*)'DEBUG: sst_option = ',sst_option + + if (sst_option == 1 .or. sst_option == 6 .or. sst_option == 7 .or. sst_option == 8) then + do i = 1,lsize + if (abs(rlat(i)) > maxlat) then + sst(i) = t0_min + else + tmp = sin(rlat(i)*pi*0.5_r8/maxlat) + tmp = 1._r8 - tmp*tmp + sst(i) = tmp*(t0_max - t0_min) + t0_min + end if + end do + write(6,*)'DEBUG:',i,sst(i) + end if + + ! Flat + + if (sst_option == 2) then + do i = 1,lsize + if (abs(rlat(i)) > maxlat) then + sst(i) = t0_min + else + tmp = sin(rlat(i)*pi*0.5_r8/maxlat) + tmp = 1._r8 - tmp*tmp*tmp*tmp + sst(i) = tmp*(t0_max - t0_min) + t0_min + end if + end do + end if + + ! Qobs + + if (sst_option == 3) then + do i = 1,lsize + if (abs(rlat(i)) > maxlat) then + sst(i) = t0_min + else + tmp = sin(rlat(i)*pi*0.5_r8/maxlat) + tmp = (2._r8 - tmp*tmp*tmp*tmp - tmp*tmp)*0.5_r8 + sst(i) = tmp*(t0_max - t0_min) + t0_min + end if + end do + end if + + ! Peaked + + if (sst_option == 4) then + do i = 1,lsize + if (abs(rlat(i)) > maxlat) then + sst(i) = t0_min + else + tmp = (maxlat - abs(rlat(i)))/maxlat + tmp1 = 1._r8 - tmp + sst(i) = t0_max*tmp + t0_min*tmp1 + end if + end do + end if + + ! Control-5N + + if (sst_option == 5) then + do i = 1,lsize + if (abs(rlat(i)) > maxlat) then + sst(i) = t0_min + else if (rlat(i) > shift) then + tmp = sin((rlat(i)-shift)*pi*0.5_r8/(maxlat-shift)) + tmp = 1._r8 - tmp*tmp + sst(i) = tmp*(t0_max - t0_min) + t0_min + else + tmp = sin((rlat(i)-shift)*pi*0.5_r8/(maxlat+shift)) + tmp = 1._r8 - tmp*tmp + sst(i) = tmp*(t0_max - t0_min) + t0_min + end if + end do + end if + + ! 1KEQ + + if (sst_option == 6) then + do i = 1,lsize + if (abs(rlat(i)-latcen) <= latrad6) then + tmp1 = cos((rlat(i)-latcen)*pi*0.5_r8/latrad6) + tmp1 = tmp1*tmp1 + tmp = abs(rlon(i)-loncen) + tmp = min(tmp , 2._r8*pi-tmp) + if(tmp <= lonrad) then + tmp = cos(tmp*pi*0.5_r8/lonrad) + tmp = tmp*tmp + sst(i) = sst(i) + t0_max6*tmp*tmp1 + end if + end if + end do + end if + + ! 3KEQ + + if (sst_option == 7) then + do i = 1,lsize + if (abs(rlat(i)-latcen) <= latrad6) then + tmp1 = cos((rlat(i)-latcen)*pi*0.5_r8/latrad6) + tmp1 = tmp1*tmp1 + tmp = abs(rlon(i)-loncen) + tmp = min(tmp , 2._r8*pi-tmp) + if (tmp <= lonrad) then + tmp = cos(tmp*pi*0.5_r8/lonrad) + tmp = tmp*tmp + sst(i) = sst(i) + t0_max7*tmp*tmp1 + end if + end if + end do + end if + + ! 3KW1 + + if (sst_option == 8) then + do i = 1,lsize + if (abs(rlat(i)-latcen) <= latrad8) then + tmp1 = cos((rlat(i)-latcen)*pi*0.5_r8/latrad8) + tmp1 = tmp1*tmp1 + tmp = cos(rlon(i)-loncen) + sst(i) = sst(i) + t0_max7*tmp*tmp1 + end if + end do + end if + + ! Control-10N + + if (sst_option == 9) then + do i = 1,lsize + if (abs(rlat(i)) > maxlat) then + sst(i) = t0_min + else if (rlat(i) > shift9) then + tmp = sin((rlat(i)-shift9)*pi*0.5_r8/(maxlat-shift9)) + tmp = 1._r8 - tmp*tmp + sst(i) = tmp*(t0_max - t0_min) + t0_min + else + tmp = sin((rlat(i)-shift9)*pi*0.5_r8/(maxlat+shift9)) + tmp = 1._r8 - tmp*tmp + sst(i) = tmp*(t0_max - t0_min) + t0_min + end if + end do + end if + + ! Control-15N + + if (sst_option == 10) then + do i = 1,lsize + if (abs(rlat(i)) > maxlat) then + sst(i) = t0_min + else if(rlat(i) > shift10) then + tmp = sin((rlat(i)-shift10)*pi*0.5_r8/(maxlat-shift10)) + tmp = 1._r8 - tmp*tmp + sst(i) = tmp*(t0_max - t0_min) + t0_min + else + tmp = sin((rlat(i)-shift10)*pi*0.5_r8/(maxlat+shift10)) + tmp = 1._r8 - tmp*tmp + sst(i) = tmp*(t0_max - t0_min) + t0_min + end if + end do + end if + +end subroutine prescribed_sst end module docn_comp_mod diff --git a/src/share/util/shr_strdata_mod.F90 b/src/share/util/shr_strdata_mod.F90 index 01dda274f6df..323e08fb9429 100644 --- a/src/share/util/shr_strdata_mod.F90 +++ b/src/share/util/shr_strdata_mod.F90 @@ -1165,7 +1165,7 @@ subroutine shr_strdata_readnml(SDAT,file,rc,mpicom) call MPI_COMM_SIZE(mpicom,ntasks,rCode) endif -!--master--task-- + !--master--task-- if (my_task == master_task) then !---------------------------------------------------------------------------- @@ -1241,15 +1241,21 @@ subroutine shr_strdata_readnml(SDAT,file,rc,mpicom) end do do n = 1,SDAT%nstreams - call shr_stream_parseInput(SDAT%streams(n),fileName,yearAlign,yearFirst,yearLast) - call shr_stream_init(SDAT%stream(n),fileName,yearFirst,yearLast,yearAlign, & - trim(SDAT%taxMode(n))) + if (trim(SDAT%streams(n)) /= shr_strdata_nullstr) then + + ! extract fileName (stream description text file), yearAlign, yearFirst, yearLast from SDAT%streams(n) + call shr_stream_parseInput(SDAT%streams(n), fileName, yearAlign, yearFirst, yearLast) + + ! initialize stream datatype, read description text file + call shr_stream_init(SDAT%stream(n), fileName, yearFirst, yearLast, yearAlign, trim(SDAT%taxMode(n))) + + end if enddo -! call shr_strdata_print(SDAT,trim(file)//' NML_ONLY') + ! call shr_strdata_print(SDAT,trim(file)//' NML_ONLY') endif ! master_task -!--master--task-- + !--master--task-- if (present(mpicom)) then call shr_strdata_bcastnml(SDAT,mpicom) From 97ccc221e2814c48bce90313b8623ddaf0108cf7 Mon Sep 17 00:00:00 2001 From: Mariana Vertenstein Date: Sat, 22 Apr 2017 20:44:08 -0600 Subject: [PATCH 195/219] obtained same values for sst_option = 1 --- .../data_comps/docn/cime_config/buildnml | 5 +-- .../data_comps/docn/docn_comp_mod.F90 | 34 +++++++++++-------- 2 files changed, 22 insertions(+), 17 deletions(-) diff --git a/src/components/data_comps/docn/cime_config/buildnml b/src/components/data_comps/docn/cime_config/buildnml index 64edb9943a82..f318fb247acb 100755 --- a/src/components/data_comps/docn/cime_config/buildnml +++ b/src/components/data_comps/docn/cime_config/buildnml @@ -9,7 +9,7 @@ # Disable these because this is our standard setup # pylint: disable=wildcard-import,unused-wildcard-import,wrong-import-position -import os, shutil, sys, glob +import os, shutil, sys, glob, re _CIMEROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..","..","..","..","..") sys.path.append(os.path.join(_CIMEROOT, "scripts", "Tools")) @@ -109,7 +109,8 @@ def _create_namelists(case, confdir, inst_string, infile, nmlgen): nmlgen.create_stream_file_and_update_shr_strdata_nml(config, stream, stream_path, data_list_path) # For aquaplanet prescribed have no streams - if docn_mode == 'pres_aquap': + match = re.match(r'^aquap\d+',docn_mode) + if match.group(0): value = ['null'] nmlgen.set_value("streams",value) diff --git a/src/components/data_comps/docn/docn_comp_mod.F90 b/src/components/data_comps/docn/docn_comp_mod.F90 index b1b1b3486918..7d929a93f7bd 100644 --- a/src/components/data_comps/docn/docn_comp_mod.F90 +++ b/src/components/data_comps/docn/docn_comp_mod.F90 @@ -662,8 +662,6 @@ subroutine docn_comp_run( EClock, cdata, x2o, o2x) call t_startf('docn_mode') - write(6,*)'DEBUG: case is ',trim(ocn_mode) - select case (trim(ocn_mode)) case('COPYALL') @@ -684,9 +682,12 @@ subroutine docn_comp_run( EClock, cdata, x2o, o2x) case('AQUAP') lsize = mct_avect_lsize(o2x) + do n = 1,lsize + o2x%rAttr(kt,n) = 0.0_r8 + end do call prescribed_sst(xc, yc, lsize, aquap_option, o2x%rAttr(kt,:)) do n = 1,lsize - o2x%rAttr(kt,n) = o2x%rAttr(kt,n) + o2x%rAttr(kt,n) = o2x%rAttr(kt,n) + TkFrz enddo case('IAF') @@ -836,10 +837,10 @@ end subroutine docn_comp_final !=============================================================================== !=============================================================================== -subroutine prescribed_sst(rlat, rlon, lsize, sst_option, sst) +subroutine prescribed_sst(xc, yc, lsize, sst_option, sst) - real(R8) , intent(in) :: rlat(:) - real(R8) , intent(in) :: rlon(:) + real(R8) , intent(in) :: xc(:) !degrees + real(R8) , intent(in) :: yc(:) !degrees integer(IN) , intent(in) :: lsize integer(IN) , intent(in) :: sst_option real(R8) , intent(inout) :: sst(:) @@ -847,8 +848,9 @@ subroutine prescribed_sst(rlat, rlon, lsize, sst_option, sst) ! local integer :: i real(r8) :: tmp, tmp1, pi + real(r8) :: rlon(lsize), rlat(lsize) - real(r8), parameter :: pio180 = SHR_CONST_PI/180._r8 + real(r8), parameter :: pio180 = SHR_CONST_PI/180._r8 ! Parameters for zonally symmetric experiments real(r8), parameter :: t0_max = 27._r8 @@ -857,7 +859,7 @@ subroutine prescribed_sst(rlat, rlon, lsize, sst_option, sst) real(r8), parameter :: shift = 5._r8*pio180 real(r8), parameter :: shift9 = 10._r8*pio180 real(r8), parameter :: shift10 = 15._r8*pio180 - + ! Parameters for zonally asymmetric experiments real(r8), parameter :: t0_max6 = 1._r8 real(r8), parameter :: t0_max7 = 3._r8 @@ -870,14 +872,17 @@ subroutine prescribed_sst(rlat, rlon, lsize, sst_option, sst) pi = SHR_CONST_PI + ! convert xc and yc from degrees to radians + + rlon(:) = xc(:) * pio180 + rlat(:) = yc(:) * pio180 + ! Control if (sst_option < 1 .or. sst_option > 10) then call shr_sys_abort ('prescribed_sst: ERROR: sst_option must be between 1 and 10') end if - write(6,*)'DEBUG: sst_option = ',sst_option - if (sst_option == 1 .or. sst_option == 6 .or. sst_option == 7 .or. sst_option == 8) then do i = 1,lsize if (abs(rlat(i)) > maxlat) then @@ -888,7 +893,6 @@ subroutine prescribed_sst(rlat, rlon, lsize, sst_option, sst) sst(i) = tmp*(t0_max - t0_min) + t0_min end if end do - write(6,*)'DEBUG:',i,sst(i) end if ! Flat @@ -972,7 +976,7 @@ subroutine prescribed_sst(rlat, rlon, lsize, sst_option, sst) ! 3KEQ if (sst_option == 7) then - do i = 1,lsize + do i = 1, lsize if (abs(rlat(i)-latcen) <= latrad6) then tmp1 = cos((rlat(i)-latcen)*pi*0.5_r8/latrad6) tmp1 = tmp1*tmp1 @@ -990,7 +994,7 @@ subroutine prescribed_sst(rlat, rlon, lsize, sst_option, sst) ! 3KW1 if (sst_option == 8) then - do i = 1,lsize + do i = 1, lsize if (abs(rlat(i)-latcen) <= latrad8) then tmp1 = cos((rlat(i)-latcen)*pi*0.5_r8/latrad8) tmp1 = tmp1*tmp1 @@ -1003,7 +1007,7 @@ subroutine prescribed_sst(rlat, rlon, lsize, sst_option, sst) ! Control-10N if (sst_option == 9) then - do i = 1,lsize + do i = 1, lsize if (abs(rlat(i)) > maxlat) then sst(i) = t0_min else if (rlat(i) > shift9) then @@ -1021,7 +1025,7 @@ subroutine prescribed_sst(rlat, rlon, lsize, sst_option, sst) ! Control-15N if (sst_option == 10) then - do i = 1,lsize + do i = 1, lsize if (abs(rlat(i)) > maxlat) then sst(i) = t0_min else if(rlat(i) > shift10) then From ca8b39f86be2e8fbec3a106a197f26dcaa6ba408 Mon Sep 17 00:00:00 2001 From: Mariana Vertenstein Date: Mon, 24 Apr 2017 21:20:13 -0600 Subject: [PATCH 196/219] changes to get input to cam same as cam aquaplanet model --- config/cesm/config_grids.xml | 46 ++++++++++++++++++- .../data_comps/docn/docn_comp_mod.F90 | 20 ++++---- 2 files changed, 53 insertions(+), 13 deletions(-) diff --git a/config/cesm/config_grids.xml b/config/cesm/config_grids.xml index a709e2d5338f..ff0ced485484 100644 --- a/config/cesm/config_grids.xml +++ b/config/cesm/config_grids.xml @@ -176,6 +176,13 @@ usgs
+ + T42 + T42 + T42 + null + + T85 T85 @@ -368,7 +375,7 @@ gx1v6 - + 0.9x1.25 0.9x1.25 0.9x1.25 @@ -513,7 +520,7 @@ gx1v6 - + 1.9x2.5 1.9x2.5 1.9x2.5 @@ -565,6 +572,13 @@ gx1v6 + + f02 + f02 + f02 + null + + 0.23x0.31 0.23x0.31 @@ -607,6 +621,13 @@ gx3v7 + + f45 + f45 + f45 + null + + 10x15 10x15 @@ -614,6 +635,13 @@ usgs + + f10 + f10 + f10 + null + + @@ -741,6 +769,13 @@ gx1v6 + + ne30np4 + ne30np4 + ne30np4 + null + + ne30np4 ne30np4 @@ -783,6 +818,13 @@ gx1v6 + + ne120np4 + ne120np4 + ne120np4 + null + + ne120np4 ne120np4 diff --git a/src/components/data_comps/docn/docn_comp_mod.F90 b/src/components/data_comps/docn/docn_comp_mod.F90 index 7d929a93f7bd..ee86636b24c4 100644 --- a/src/components/data_comps/docn/docn_comp_mod.F90 +++ b/src/components/data_comps/docn/docn_comp_mod.F90 @@ -297,6 +297,7 @@ subroutine docn_comp_init( EClock, cdata, x2o, o2x, NLFilename ) ocn_mode = trim(SDOCN%dataMode) + ! Special logic for aquaplanet if (ocn_mode(1:5) == 'AQUAP') then if (len_trim(ocn_mode) == 6) then read(ocn_mode(6:6),'(i)') aquap_option @@ -610,16 +611,13 @@ subroutine docn_comp_run( EClock, cdata, x2o, o2x) !-------------------- call t_startf('docn_unpack') - -! lsize = mct_avect_lsize(x2o) -! nflds_x2o = mct_avect_nRattr(x2o) - -! do nf=1,nflds_x2o -! do n=1,lsize -! ?? = x2o%rAttr(nf,n) -! enddo -! enddo - + ! lsize = mct_avect_lsize(x2o) + ! nflds_x2o = mct_avect_nRattr(x2o) + ! do nf=1,nflds_x2o + ! do n=1,lsize + ! ?? = x2o%rAttr(nf,n) + ! enddo + ! enddo call t_stopf('docn_unpack') !-------------------- @@ -683,7 +681,7 @@ subroutine docn_comp_run( EClock, cdata, x2o, o2x) case('AQUAP') lsize = mct_avect_lsize(o2x) do n = 1,lsize - o2x%rAttr(kt,n) = 0.0_r8 + o2x%rAttr(:,n) = 0.0_r8 end do call prescribed_sst(xc, yc, lsize, aquap_option, o2x%rAttr(kt,:)) do n = 1,lsize From 9bf19d22c9e60e1a19817883cb5c2700e418bfae Mon Sep 17 00:00:00 2001 From: Mariana Vertenstein Date: Wed, 26 Apr 2017 20:43:07 -0600 Subject: [PATCH 197/219] updates to have aquaplanet not depend on a new grid definition with a null mask --- config/cesm/config_grids.xml | 56 ----------------- .../data_comps/docn/cime_config/buildnml | 9 +-- .../docn/cime_config/config_component.xml | 47 ++++++++------ .../cime_config/namelist_definition_docn.xml | 30 +++++++-- .../data_comps/docn/docn_comp_mod.F90 | 62 ++++++++++++------- 5 files changed, 99 insertions(+), 105 deletions(-) diff --git a/config/cesm/config_grids.xml b/config/cesm/config_grids.xml index ff0ced485484..fce725be28fc 100644 --- a/config/cesm/config_grids.xml +++ b/config/cesm/config_grids.xml @@ -176,13 +176,6 @@ usgs - - T42 - T42 - T42 - null - - T85 T85 @@ -375,13 +368,6 @@ gx1v6 - - 0.9x1.25 - 0.9x1.25 - 0.9x1.25 - null - - 0.9x1.25 0.9x1.25 @@ -520,13 +506,6 @@ gx1v6 - - 1.9x2.5 - 1.9x2.5 - 1.9x2.5 - null - - 1.9x2.5 1.9x2.5 @@ -572,13 +551,6 @@ gx1v6 - - f02 - f02 - f02 - null - - 0.23x0.31 0.23x0.31 @@ -621,13 +593,6 @@ gx3v7 - - f45 - f45 - f45 - null - - 10x15 10x15 @@ -635,13 +600,6 @@ usgs - - f10 - f10 - f10 - null - - @@ -769,13 +727,6 @@ gx1v6 - - ne30np4 - ne30np4 - ne30np4 - null - - ne30np4 ne30np4 @@ -818,13 +769,6 @@ gx1v6 - - ne120np4 - ne120np4 - ne120np4 - null - - ne120np4 ne120np4 diff --git a/src/components/data_comps/docn/cime_config/buildnml b/src/components/data_comps/docn/cime_config/buildnml index f318fb247acb..360c84e2db27 100755 --- a/src/components/data_comps/docn/cime_config/buildnml +++ b/src/components/data_comps/docn/cime_config/buildnml @@ -109,10 +109,11 @@ def _create_namelists(case, confdir, inst_string, infile, nmlgen): nmlgen.create_stream_file_and_update_shr_strdata_nml(config, stream, stream_path, data_list_path) # For aquaplanet prescribed have no streams - match = re.match(r'^aquap\d+',docn_mode) - if match.group(0): - value = ['null'] - nmlgen.set_value("streams",value) + match = re.match(r'^sst_aquap\d+',docn_mode) + if match is not None: + if match.group(0): + value = ['null'] + nmlgen.set_value("streams",value) #---------------------------------------------------- # Create `shr_strdata_nml` namelist group. diff --git a/src/components/data_comps/docn/cime_config/config_component.xml b/src/components/data_comps/docn/cime_config/config_component.xml index 32183c29b272..1b0d88186967 100644 --- a/src/components/data_comps/docn/cime_config/config_component.xml +++ b/src/components/data_comps/docn/cime_config/config_component.xml @@ -15,25 +15,25 @@ char - prescribed,aquap1,aquap2,aquap3,aquap4,aquap5,aquap6,aquap7,aquap8,aquap9,aquap10,som,som_aquap,copyall,interannual,null + prescribed,sst_aquap1,sst_aquap2,sst_aquap3,sst_aquap4,sst_aquap5,sst_aquap6,sst_aquap7,sst_aquap8,sst_aquap9,sst_aquap10,som,som_aquap,copyall,interannual,null prescribed null prescribed som - us20 + som_aquap interannual + sst_aquap1 + sst_aquap2 + sst_aquap3 + sst_aquap4 + sst_aquap5 + sst_aquap6 + sst_aquap7 + sst_aquap8 + sst_aquap9 + sst_aquap10 copyall - aquap1 - aquap2 - aquap3 - aquap4 - aquap5 - aquap6 - aquap7 - aquap8 - aquap9 - aquap10 run_component_docn env_run.xml @@ -80,7 +80,8 @@ UNSET pop_frc.1x1d.090130.nc - /glade/u/home/benedict/ys/datain/cesm2_0_beta03.som.forcing/cam4.som.forcing.aquaplanet.QzaFix_h30Fix_TspunFix.fv19_CTL.nc + default.som.forcing.aquaplanet.Qflux0_h30_sstQOBS.2degFV_c20170421.nc + default.som.forcing.aquaplanet.Qflux0_h30_sstQOBS.1degFV_c20170421.nc run_component_docn env_run.xml @@ -218,11 +219,21 @@ - docn null mode: - docn slab ocean mode: - docn data mode: - docn interannual mode: - docn aquaplanet mode: + docn null mode + docn slab ocean mode + docn aquaplanet slab ocean mode + docn interannual mode + docn aquaplanet mode: + docn prescribed aquaplanet sst - option 1 + docn prescribed aquaplanet sst - option 2 + docn prescribed aquaplanet sst - option 3 + docn prescribed aquaplanet sst - option 4 + docn prescribed aquaplanet sst - option 5 + docn prescribed aquaplanet sst - option 6 + docn prescribed aquaplanet sst - option 7 + docn prescribed aquaplanet sst - option 8 + docn prescribed aquaplanet sst - option 9 + docn prescribed aquaplanet sst - option 10 diff --git a/src/components/data_comps/docn/cime_config/namelist_definition_docn.xml b/src/components/data_comps/docn/cime_config/namelist_definition_docn.xml index a682df12e4f1..46430d72fbff 100644 --- a/src/components/data_comps/docn/cime_config/namelist_definition_docn.xml +++ b/src/components/data_comps/docn/cime_config/namelist_definition_docn.xml @@ -46,7 +46,16 @@ List of streams used for the given docn_mode. prescribed - '' + '' + '' + '' + '' + '' + '' + '' + '' + '' + '' som som interannual @@ -245,7 +254,7 @@ char streams shr_strdata_nml - SSTDATA,AQUAP1,SOM,IAF,NULL,COPYALL + SSTDATA,SST_AQUAP1,SST_AQUAP2,SST_AQUAP3,SST_AQUAP4,SST_AQUAP5,SST_AQUAP6,SST_AQUAP7,SST_AQUAP8,SST_AQUAP9,SST_AQUAP10,SOM,SOM_AQUAP,IAF,NULL,COPYALL General method that operates on the data. This is generally implemented in the data models but is set in the strdata method for @@ -298,10 +307,19 @@ NULL - SSTDATA - AQUAP1 - SOM - SOM + SSTDATA + SST_AQUAP1 + SST_AQUAP2 + SST_AQUAP3 + SST_AQUAP4 + SST_AQUAP5 + SST_AQUAP6 + SST_AQUAP7 + SST_AQUAP8 + SST_AQUAP9 + SST_AQUAP10 + SOM + SOM_AQUAP IAF diff --git a/src/components/data_comps/docn/docn_comp_mod.F90 b/src/components/data_comps/docn/docn_comp_mod.F90 index ee86636b24c4..a648f88fbaef 100644 --- a/src/components/data_comps/docn/docn_comp_mod.F90 +++ b/src/components/data_comps/docn/docn_comp_mod.F90 @@ -147,6 +147,7 @@ subroutine docn_comp_init( EClock, cdata, x2o, o2x, NLFilename ) integer(IN) :: shrlogunit, shrloglev ! original log unit and level integer(IN) :: nunit ! unit number integer(IN) :: kmask ! field reference + integer(IN) :: kfrac ! field reference integer(IN) :: klat ! field reference integer(IN) :: klon ! field reference logical :: ocn_present ! flag @@ -297,26 +298,31 @@ subroutine docn_comp_init( EClock, cdata, x2o, o2x, NLFilename ) ocn_mode = trim(SDOCN%dataMode) - ! Special logic for aquaplanet - if (ocn_mode(1:5) == 'AQUAP') then - if (len_trim(ocn_mode) == 6) then - read(ocn_mode(6:6),'(i)') aquap_option - else if (len_trim(ocn_mode) == 7) then - read(ocn_mode(6:7),'(i)') aquap_option + ! Special logic for prescribed aquaplanet + if (ocn_mode(1:9) == 'SST_AQUAP') then + ! First determine the prescribed aquaplanet option + if (len_trim(ocn_mode) == 10) then + read(ocn_mode(10:10),'(i)') aquap_option + else if (len_trim(ocn_mode) == 11) then + read(ocn_mode(10:11),'(i)') aquap_option end if - ocn_mode = 'AQUAP' + ! Now remove the index from the ocn_mode value, to have a generic setting + ! for use below + ocn_mode = "SST_AQUAP" end if ! check that we know how to handle the mode - if (trim(ocn_mode) == 'NULL' .or. & - trim(ocn_mode) == 'SSTDATA' .or. & - trim(ocn_mode) == 'COPYALL' .or. & - trim(ocn_mode) == 'AQUAP' .or. & - trim(ocn_mode) == 'IAF' .or. & - trim(ocn_mode) == 'SOM') then - if (my_task == master_task) & + if (trim(ocn_mode) == 'NULL' .or. & + trim(ocn_mode) == 'SSTDATA' .or. & + trim(ocn_mode) == 'SST_AQUAP' .or. & + trim(ocn_mode) == 'COPYALL' .or. & + trim(ocn_mode) == 'IAF' .or. & + trim(ocn_mode) == 'SOM' .or. & + trim(ocn_mode) == 'SOM_AQUAP') then + if (my_task == master_task) then write(logunit,F00) ' ocn mode = ',trim(ocn_mode) + end if else write(logunit,F00) ' ERROR illegal ocn mode = ',trim(ocn_mode) call shr_sys_abort() @@ -369,11 +375,11 @@ subroutine docn_comp_init( EClock, cdata, x2o, o2x, NLFilename ) !---------------------------------------------------------------------------- call seq_infodata_PutData(infodata, ocnrof_prognostic=ocnrof_prognostic, & - ocn_present=ocn_present, ocn_prognostic=ocn_prognostic, & - ocn_nx=SDOCN%nxg, ocn_ny=SDOCN%nyg ) + ocn_present=ocn_present, ocn_prognostic=ocn_prognostic, & + ocn_nx=SDOCN%nxg, ocn_ny=SDOCN%nyg ) !---------------------------------------------------------------------------- - ! Initialize MCT global seg map, 1d decomp + ! Initialize data model MCT global seg map, 1d decomp !---------------------------------------------------------------------------- call t_startf('docn_initgsmaps') @@ -390,14 +396,26 @@ subroutine docn_comp_init( EClock, cdata, x2o, o2x, NLFilename ) call t_stopf('docn_initgsmaps') !---------------------------------------------------------------------------- - ! Initialize MCT domain + ! Initialize data model MCT domain !---------------------------------------------------------------------------- call t_startf('docn_initmctdom') if (my_task == master_task) write(logunit,F00) 'copy domains' call shr_sys_flush(logunit) - if (ocn_present) call shr_dmodel_rearrGGrid(SDOCN%grid, ggrid, gsmap, rearr, mpicom) + if (ocn_present) then + call shr_dmodel_rearrGGrid(SDOCN%grid, ggrid, gsmap, rearr, mpicom) + end if + + ! Special logic for either prescribed or som aquaplanet - overwrite and + ! set mask/frac to 1 + if (ocn_mode == 'SST_AQUAP' .or. ocn_mode == 'SOM_AQUAP') then + kmask = mct_aVect_indexRA(ggrid%data,'mask') + ggrid%data%rattr(kmask,:) = 1 + kfrac = mct_aVect_indexRA(ggrid%data,'frac') + ggrid%data%rattr(kfrac,:) = 1.0_r8 + write(logunit,F00) ' Resetting the data ocean mask and frac to 1 for aquaplanet' + end if call t_stopf('docn_initmctdom') @@ -678,7 +696,7 @@ subroutine docn_comp_run( EClock, cdata, x2o, o2x) o2x%rAttr(kswp ,n) = swp enddo - case('AQUAP') + case('SST_AQUAP') lsize = mct_avect_lsize(o2x) do n = 1,lsize o2x%rAttr(:,n) = 0.0_r8 @@ -701,7 +719,7 @@ subroutine docn_comp_run( EClock, cdata, x2o, o2x) o2x%rAttr(kswp ,n) = swp enddo - case('SOM') + case('SOM','SOM_AQUAP') lsize = mct_avect_lsize(o2x) do n = 1,SDOCN%nstreams call shr_dmodel_translateAV(SDOCN%avs(n),avstrm,avifld,avofld,rearr) @@ -875,6 +893,8 @@ subroutine prescribed_sst(xc, yc, lsize, sst_option, sst) rlon(:) = xc(:) * pio180 rlat(:) = yc(:) * pio180 + write(6,*)"DEBUG: sst_option is ",sst_option + ! Control if (sst_option < 1 .or. sst_option > 10) then From 8f258474f664af6254097f8bde1076692617c30b Mon Sep 17 00:00:00 2001 From: Mariana Vertenstein Date: Sat, 29 Apr 2017 14:28:34 -0600 Subject: [PATCH 198/219] fixed comment on new aquaplanet mode --- .../data_comps/docn/cime_config/namelist_definition_docn.xml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/components/data_comps/docn/cime_config/namelist_definition_docn.xml b/src/components/data_comps/docn/cime_config/namelist_definition_docn.xml index 46430d72fbff..9f75abca4939 100644 --- a/src/components/data_comps/docn/cime_config/namelist_definition_docn.xml +++ b/src/components/data_comps/docn/cime_config/namelist_definition_docn.xml @@ -301,6 +301,8 @@ If DOCN_MODE is prescribed, datamode will be set to SSTDATA If DOCN_MODE is interannual, datamode will be set to IAF If DOCN_MODE is som , datamode will be set to SOM + If DOCN_MODE is sst_aqup[n], datamode will be set to SST_AQUAP + If DOCN_MODE is som_aqup[n], datamode will be set to SOM_AQUAP If DOCN_MODE is null, datamode will be set to NULL default: SSTDATA (prescribed setting for DOCN_MODE)' From 3461b95f690cef0500f8b916152107f6afb399ed Mon Sep 17 00:00:00 2001 From: Mariana Vertenstein Date: Sat, 29 Apr 2017 14:32:22 -0600 Subject: [PATCH 199/219] updated config_grids.xml to be the same as master --- config/cesm/config_grids.xml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/config/cesm/config_grids.xml b/config/cesm/config_grids.xml index fce725be28fc..a709e2d5338f 100644 --- a/config/cesm/config_grids.xml +++ b/config/cesm/config_grids.xml @@ -368,6 +368,13 @@ gx1v6 + + 0.9x1.25 + 0.9x1.25 + 0.9x1.25 + null + + 0.9x1.25 0.9x1.25 @@ -506,6 +513,13 @@ gx1v6 + + 1.9x2.5 + 1.9x2.5 + 1.9x2.5 + null + + 1.9x2.5 1.9x2.5 From efd9f5fd6807581b3e7f9d85d735bd8c883d5044 Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Mon, 1 May 2017 15:57:11 -0600 Subject: [PATCH 200/219] rename function --- config/acme/machines/template.case.run | 4 +--- config/acme/machines/template.case.test | 4 +--- config/acme/machines/template.lt_archive | 4 +--- config/acme/machines/template.st_archive | 4 +--- config/cesm/machines/template.case.run | 2 +- config/cesm/machines/template.case.test | 2 +- config/cesm/machines/template.lt_archive | 2 +- config/cesm/machines/template.st_archive | 2 +- scripts/Tools/acme_check_env | 2 +- scripts/Tools/bless_test_results | 2 +- scripts/Tools/case.build | 2 +- scripts/Tools/case.cmpgen_namelists | 2 +- scripts/Tools/case.setup | 2 +- scripts/Tools/case.submit | 2 +- scripts/Tools/case_diff | 2 +- scripts/Tools/check_case | 2 +- scripts/Tools/check_input_data | 2 +- scripts/Tools/check_lockedfiles | 2 +- scripts/Tools/cime_bisect | 2 +- scripts/Tools/code_checker | 2 +- scripts/Tools/compare_namelists | 2 +- scripts/Tools/compare_test_results | 2 +- scripts/Tools/component_compare_baseline | 2 +- scripts/Tools/component_compare_copy | 2 +- scripts/Tools/component_compare_test | 2 +- scripts/Tools/component_generate_baseline | 2 +- scripts/Tools/getTiming | 2 +- scripts/Tools/jenkins_generic_job | 2 +- scripts/Tools/list_acme_tests | 2 +- scripts/Tools/normalize_cases | 2 +- scripts/Tools/pelayout | 2 +- scripts/Tools/preview_namelists | 2 +- scripts/Tools/save_provenance | 2 +- scripts/Tools/simple_compare | 2 +- scripts/Tools/testreporter.py | 2 +- scripts/Tools/update_acme_tests | 2 +- scripts/Tools/wait_for_tests | 2 +- scripts/Tools/xmlchange | 2 +- scripts/Tools/xmlconvertors/config_pes_converter.py | 8 +++----- scripts/Tools/xmlconvertors/grid_xml_converter.py | 4 +--- scripts/Tools/xmlquery | 2 +- scripts/create_clone | 2 +- scripts/create_newcase | 2 +- scripts/create_test | 2 +- scripts/fortran_unit_testing/run_tests.py | 2 +- scripts/lib/CIME/utils.py | 2 +- scripts/manage_case | 2 +- scripts/manage_pes | 2 +- scripts/query_testlists | 2 +- scripts/tests/scripts_regression_tests.py | 2 +- src/build_scripts/buildlib.pio | 2 +- tools/configure | 2 +- 52 files changed, 54 insertions(+), 66 deletions(-) diff --git a/config/acme/machines/template.case.run b/config/acme/machines/template.case.run index ea81a1585279..d85c3e15dce4 100755 --- a/config/acme/machines/template.case.run +++ b/config/acme/machines/template.case.run @@ -55,9 +55,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("--caseroot", help="Case directory to build") - args = parser.parse_args() - - CIME.utils.handle_standard_logging_options(args) + CIME.utils.parse_args_and.parse_args_and_handle_standard_logging_options(args, parser) if args.caseroot is not None: os.chdir(args.caseroot) diff --git a/config/acme/machines/template.case.test b/config/acme/machines/template.case.test index 1c2cf6e49b05..81ccab9cbd15 100755 --- a/config/acme/machines/template.case.test +++ b/config/acme/machines/template.case.test @@ -50,9 +50,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("--caseroot", help="Case directory to build") - args = parser.parse_args(args[1:]) - - CIME.utils.handle_standard_logging_options(args) + CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) if args.caseroot is not None: os.chdir(args.caseroot) diff --git a/config/acme/machines/template.lt_archive b/config/acme/machines/template.lt_archive index 421cca9325af..0feb5f9d3777 100755 --- a/config/acme/machines/template.lt_archive +++ b/config/acme/machines/template.lt_archive @@ -49,9 +49,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("--caseroot", default=os.getcwd(), help="Case directory to build") - args = parser.parse_args() - - CIME.utils.handle_standard_logging_options(args) + CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) if args.caseroot is not None: os.chdir(args.caseroot) diff --git a/config/acme/machines/template.st_archive b/config/acme/machines/template.st_archive index 68f008ab3f46..5f830a1e2583 100755 --- a/config/acme/machines/template.st_archive +++ b/config/acme/machines/template.st_archive @@ -48,9 +48,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("--caseroot", default=os.getcwd(), help="Case directory to build") - args = parser.parse_args() - - CIME.utils.handle_standard_logging_options(args) + CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) if args.caseroot is not None: os.chdir(args.caseroot) diff --git a/config/cesm/machines/template.case.run b/config/cesm/machines/template.case.run index fa89acc59b1d..7f9d31fc5f59 100755 --- a/config/cesm/machines/template.case.run +++ b/config/cesm/machines/template.case.run @@ -52,7 +52,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("--caseroot", help="Case directory to build") - args = CIME.utils.handle_standard_logging_options(args, parser) + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) if args.caseroot is not None: os.chdir(args.caseroot) diff --git a/config/cesm/machines/template.case.test b/config/cesm/machines/template.case.test index f508dc754bf3..13dbb65a4ce7 100755 --- a/config/cesm/machines/template.case.test +++ b/config/cesm/machines/template.case.test @@ -46,7 +46,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("--caseroot", help="Case directory to build") - args = CIME.utils.handle_standard_logging_options(args, parser) + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) if args.caseroot is not None: os.chdir(args.caseroot) diff --git a/config/cesm/machines/template.lt_archive b/config/cesm/machines/template.lt_archive index c44ccacb4da1..9f51cfa886fe 100755 --- a/config/cesm/machines/template.lt_archive +++ b/config/cesm/machines/template.lt_archive @@ -47,7 +47,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("--caseroot", default=os.getcwd(), help="Case directory to build") - args = CIME.utils.handle_standard_logging_options(args, parser) + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) if args.caseroot is not None: os.chdir(args.caseroot) diff --git a/config/cesm/machines/template.st_archive b/config/cesm/machines/template.st_archive index 93c36d0c6f1e..ec2bc02de34d 100755 --- a/config/cesm/machines/template.st_archive +++ b/config/cesm/machines/template.st_archive @@ -47,7 +47,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("--caseroot", default=os.getcwd(), help="Case directory to build") - args = CIME.utils.handle_standard_logging_options(args, parser) + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) if args.caseroot is not None: os.chdir(args.caseroot) diff --git a/scripts/Tools/acme_check_env b/scripts/Tools/acme_check_env index 1cb1726de8cf..e6edf90576c4 100755 --- a/scripts/Tools/acme_check_env +++ b/scripts/Tools/acme_check_env @@ -30,7 +30,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter CIME.utils.setup_standard_logging_options(parser) - args = CIME.utils.handle_standard_logging_options(args, parser) + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) ############################################################################### def check_sh(): diff --git a/scripts/Tools/bless_test_results b/scripts/Tools/bless_test_results index fbbb2430e9fe..ceca60cf51f2 100755 --- a/scripts/Tools/bless_test_results +++ b/scripts/Tools/bless_test_results @@ -89,7 +89,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("bless_tests", nargs="*", help="When blessing, limit the bless to tests matching these regex") - args = CIME.utils.handle_standard_logging_options(args, parser) + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) expect(not (args.report_only and args.force), "Makes no sense to use -r and -f simultaneously") diff --git a/scripts/Tools/case.build b/scripts/Tools/case.build index c9d29a9a0058..8caee2ee579a 100755 --- a/scripts/Tools/case.build +++ b/scripts/Tools/case.build @@ -56,7 +56,7 @@ OR parser.add_argument("--clean-all", action="store_true", help="clean all objects including sharedlibobjects that may be used by other builds") - args = CIME.utils.handle_standard_logging_options(args, parser) + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) cleanlist = args.clean if args.clean is None or len(args.clean) else comps diff --git a/scripts/Tools/case.cmpgen_namelists b/scripts/Tools/case.cmpgen_namelists index 4cd8a03878f0..999ba075a45c 100755 --- a/scripts/Tools/case.cmpgen_namelists +++ b/scripts/Tools/case.cmpgen_namelists @@ -49,7 +49,7 @@ OR help="Force generation to use baselines with this name. " "Default will be to follow the case specification") - args = CIME.utils.handle_standard_logging_options(args, parser) + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) return args.caseroot, args.compare, args.generate, args.compare_name, args.generate_name diff --git a/scripts/Tools/case.setup b/scripts/Tools/case.setup index 929aa870de2e..7fb4eaae6daf 100755 --- a/scripts/Tools/case.setup +++ b/scripts/Tools/case.setup @@ -45,7 +45,7 @@ OR parser.add_argument("-r", "--reset", action="store_true", help="Does a clean followed by setup") - args = CIME.utils.handle_standard_logging_options(args, parser) + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) return args.caseroot, args.clean, args.test_mode, args.reset diff --git a/scripts/Tools/case.submit b/scripts/Tools/case.submit index f65029f8b156..3ea9d21b2874 100755 --- a/scripts/Tools/case.submit +++ b/scripts/Tools/case.submit @@ -48,7 +48,7 @@ OR parser.add_argument("-a", "--batch-args", help="Used to pass additional arguments to batch system. ") - args = CIME.utils.handle_standard_logging_options(args, parser) + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) CIME.utils.expect(args.prereq is None, "--prereq not currently supported") diff --git a/scripts/Tools/case_diff b/scripts/Tools/case_diff index 2be58d909699..4ad7da8da800 100755 --- a/scripts/Tools/case_diff +++ b/scripts/Tools/case_diff @@ -42,7 +42,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("-b", "--show-binary", action="store_true", help="Show binary diffs") - args = CIME.utils.handle_standard_logging_options(args, parser) + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) return args.case1, args.case2, args.show_binary, args.skip_list diff --git a/scripts/Tools/check_case b/scripts/Tools/check_case index 0289e458f10d..0c360e41e879 100755 --- a/scripts/Tools/check_case +++ b/scripts/Tools/check_case @@ -38,7 +38,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter CIME.utils.setup_standard_logging_options(parser) - args = CIME.utils.handle_standard_logging_options(args, parser) + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) ############################################################################### def _main_func(description): diff --git a/scripts/Tools/check_input_data b/scripts/Tools/check_input_data index 58296663c87e..dc1277204909 100755 --- a/scripts/Tools/check_input_data +++ b/scripts/Tools/check_input_data @@ -52,7 +52,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("--download", action="store_true", help="Attempt to download missing input files") - args = CIME.utils.handle_standard_logging_options(args, parser) + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) return args.svn_loc, args.input_data_root, args.data_list_dir, args.download diff --git a/scripts/Tools/check_lockedfiles b/scripts/Tools/check_lockedfiles index daf957e3e11b..46f509e4721d 100755 --- a/scripts/Tools/check_lockedfiles +++ b/scripts/Tools/check_lockedfiles @@ -29,7 +29,7 @@ OR parser.add_argument("--caseroot", default=os.getcwd(), help="Case directory to build") - args = CIME.utils.handle_standard_logging_options(args, parser) + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) return args.caseroot diff --git a/scripts/Tools/cime_bisect b/scripts/Tools/cime_bisect index f4a7c4323b2a..53d1d8304129 100755 --- a/scripts/Tools/cime_bisect +++ b/scripts/Tools/cime_bisect @@ -74,7 +74,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("-a", "--all-commits", action="store_true", help="Test all commits, not just merges") - args = CIME.utils.handle_standard_logging_options(args, parser) + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) if (args.test_root is None): args.test_root = os.path.join(_MACHINE.get_value("CIME_OUTPUT_ROOT"), "cime_bisect") diff --git a/scripts/Tools/code_checker b/scripts/Tools/code_checker index 2321384bafd2..79191d2d2bfd 100755 --- a/scripts/Tools/code_checker +++ b/scripts/Tools/code_checker @@ -46,7 +46,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("files", nargs="*", help="Restrict checking to specific files. Relative name is fine.") - args = CIME.utils.handle_standard_logging_options(args, parser) + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) return args.num_procs, args.files diff --git a/scripts/Tools/compare_namelists b/scripts/Tools/compare_namelists index c1ccd419b4b3..fcfbc481a2c3 100755 --- a/scripts/Tools/compare_namelists +++ b/scripts/Tools/compare_namelists @@ -40,7 +40,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("-c", "--case", action="store", dest="case", default=None, help="The case base id (..). Helps us normalize data.") - args = CIME.utils.handle_standard_logging_options(args, parser) + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) # Normalize case if (args.case is not None): diff --git a/scripts/Tools/compare_test_results b/scripts/Tools/compare_test_results index e4f48f010307..7307c74e0c8f 100755 --- a/scripts/Tools/compare_test_results +++ b/scripts/Tools/compare_test_results @@ -90,7 +90,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("compare_tests", nargs="*", help="When comparing, limit the comparison to tests matching these regex") - args = CIME.utils.handle_standard_logging_options(args, parser) + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) return args.baseline_name, args.baseline_root, args.test_root, args.compiler, args.test_id, args.compare_tests, args.namelists_only, args.hist_only diff --git a/scripts/Tools/component_compare_baseline b/scripts/Tools/component_compare_baseline index c8a0e5ccac8a..a809e12acfe3 100755 --- a/scripts/Tools/component_compare_baseline +++ b/scripts/Tools/component_compare_baseline @@ -35,7 +35,7 @@ OR parser.add_argument("-b", "--baseline-dir", help="Use custom baseline dir") - args = CIME.utils.handle_standard_logging_options(args, parser) + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) return args.caseroot, args.baseline_dir diff --git a/scripts/Tools/component_compare_copy b/scripts/Tools/component_compare_copy index 985b6f8137d3..db3574cad57b 100755 --- a/scripts/Tools/component_compare_copy +++ b/scripts/Tools/component_compare_copy @@ -36,7 +36,7 @@ OR parser.add_argument("caseroot", nargs="?", default=os.getcwd(), help="Case directory") - args = CIME.utils.handle_standard_logging_options(args, parser) + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) return args.suffix, args.caseroot diff --git a/scripts/Tools/component_compare_test b/scripts/Tools/component_compare_test index b186ad3b63fe..07e6c17b199e 100755 --- a/scripts/Tools/component_compare_test +++ b/scripts/Tools/component_compare_test @@ -38,7 +38,7 @@ OR parser.add_argument("caseroot", nargs="?", default=os.getcwd(), help="Case directory") - args = CIME.utils.handle_standard_logging_options(args, parser) + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) return args.suffix1, args.suffix2, args.caseroot diff --git a/scripts/Tools/component_generate_baseline b/scripts/Tools/component_generate_baseline index b8a59f7f3a41..311537438dfa 100755 --- a/scripts/Tools/component_generate_baseline +++ b/scripts/Tools/component_generate_baseline @@ -40,7 +40,7 @@ OR "will raise an error. Specifying this option allows " "existing baseline directories to be silently overwritten.") - args = CIME.utils.handle_standard_logging_options(args, parser) + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) return args.caseroot, args.baseline_dir, args.allow_baseline_overwrite diff --git a/scripts/Tools/getTiming b/scripts/Tools/getTiming index 5694f7f0070f..22878582ee51 100755 --- a/scripts/Tools/getTiming +++ b/scripts/Tools/getTiming @@ -22,7 +22,7 @@ def parse_command_line(args, description): parser.add_argument("--caseroot", default=os.getcwd(), help="Case directory to get timing for") - args = CIME.utils.handle_standard_logging_options(args, parser) + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) return args.caseroot, args.lid def __main_func(description): diff --git a/scripts/Tools/jenkins_generic_job b/scripts/Tools/jenkins_generic_job index c28a03eb85c3..a31ff82fb8eb 100755 --- a/scripts/Tools/jenkins_generic_job +++ b/scripts/Tools/jenkins_generic_job @@ -84,7 +84,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter help="Number of tasks create_test should perform simultaneously. Default " "will be min(num_cores, num_tests).") - args = CIME.utils.handle_standard_logging_options(args, parser) + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) expect(not (args.submit_to_cdash and args.generate_baselines), "Does not make sense to use --generate-baselines and --submit-to-cdash together") diff --git a/scripts/Tools/list_acme_tests b/scripts/Tools/list_acme_tests index 2a992a6a9ee3..c7f2cefba512 100755 --- a/scripts/Tools/list_acme_tests +++ b/scripts/Tools/list_acme_tests @@ -43,7 +43,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("categories", nargs="*", help="The test categories to list. Default will list all. Test categories: %s" % (", ".join(update_acme_tests.get_test_suites()))) - args = CIME.utils.handle_standard_logging_options(args, parser) + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) if (not args.categories): args.categories = update_acme_tests.get_test_suites() diff --git a/scripts/Tools/normalize_cases b/scripts/Tools/normalize_cases index fdad9ae5af34..693c4861e67d 100755 --- a/scripts/Tools/normalize_cases +++ b/scripts/Tools/normalize_cases @@ -38,7 +38,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("case2", help="Second case. This one will not be changed") - args = CIME.utils.handle_standard_logging_options(args, parser) + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) return args.case1, args.case2 diff --git a/scripts/Tools/pelayout b/scripts/Tools/pelayout index cd3be9bf48ed..50736a528322 100755 --- a/scripts/Tools/pelayout +++ b/scripts/Tools/pelayout @@ -68,7 +68,7 @@ def parse_command_line(args): parser.add_argument("-caseroot" , "--caseroot", default=os.getcwd(), help="Case directory to reference") - args = CIME.utils.handle_standard_logging_options(args, parser) + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) if (args.no_header): args.header = None diff --git a/scripts/Tools/preview_namelists b/scripts/Tools/preview_namelists index a86e662b75a9..4bece0ee573b 100755 --- a/scripts/Tools/preview_namelists +++ b/scripts/Tools/preview_namelists @@ -27,7 +27,7 @@ def parse_command_line(args, description): parser.add_argument('--test', action='store_true', help="Run preview_namelist in test mode.") - args = CIME.utils.handle_standard_logging_options(args, parser) + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) return args diff --git a/scripts/Tools/save_provenance b/scripts/Tools/save_provenance index d12d387479b4..fa3ae37b869d 100755 --- a/scripts/Tools/save_provenance +++ b/scripts/Tools/save_provenance @@ -42,7 +42,7 @@ OR parser.add_argument("-l", "--lid", help="Force system to save provenance with this LID") - args = CIME.utils.handle_standard_logging_options(args, parser) + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) return args.mode, args.caseroot, args.lid diff --git a/scripts/Tools/simple_compare b/scripts/Tools/simple_compare index 885067505406..a72ebe398bc1 100755 --- a/scripts/Tools/simple_compare +++ b/scripts/Tools/simple_compare @@ -40,7 +40,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("-c", "--case", action="store", dest="case", default=None, help="The case base id (..). Helps us normalize data.") - args = CIME.utils.handle_standard_logging_options(args, parser) + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) # Normalize case if (args.case is not None): diff --git a/scripts/Tools/testreporter.py b/scripts/Tools/testreporter.py index 4ea6666dc244..bc5188ab41c8 100755 --- a/scripts/Tools/testreporter.py +++ b/scripts/Tools/testreporter.py @@ -43,7 +43,7 @@ def parse_command_line(args): parser.add_argument("--dumpxml",action="store_true", help="Dump XML test results to sceen.") args = parser.parse_args() - CIME.utils.handle_standard_logging_options(args) + CIME.utils.parse_args_and_handle_standard_logging_options(args) return args.testroot, args.testid, args.tagname, args.testtype, args.dryrun, args.dumpxml diff --git a/scripts/Tools/update_acme_tests b/scripts/Tools/update_acme_tests index 7f7d24759b9c..588c51321604 100755 --- a/scripts/Tools/update_acme_tests +++ b/scripts/Tools/update_acme_tests @@ -48,7 +48,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("-p", "--platform", help="Only add tests for a specific platform, format=machine,compiler. Useful for adding new platforms.") - args = CIME.utils.handle_standard_logging_options(args, parser) + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) expect(os.path.isfile(args.test_list_path), "'%s' is not a valid file" % args.test_list_path) diff --git a/scripts/Tools/wait_for_tests b/scripts/Tools/wait_for_tests index 116d76a2b911..7b4e698f3fa0 100755 --- a/scripts/Tools/wait_for_tests +++ b/scripts/Tools/wait_for_tests @@ -66,7 +66,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("-g", "--cdash-build-group", default=CIME.wait_for_tests.CDASH_DEFAULT_BUILD_GROUP, help="The build group to be used to display results on the CDash dashboard.") - args = CIME.utils.handle_standard_logging_options(args, parser) + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) return args.paths, args.no_wait, args.check_throughput, args.check_memory, args.ignore_namelist_diffs, args.ignore_memleak, args.cdash_build_name, args.cdash_project, args.cdash_build_group diff --git a/scripts/Tools/xmlchange b/scripts/Tools/xmlchange index 8c15b547b3d0..f32bf556c0d8 100755 --- a/scripts/Tools/xmlchange +++ b/scripts/Tools/xmlchange @@ -78,7 +78,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("-f","--force", action="store_true", help="ignore typing checks and store value") - args = CIME.utils.handle_standard_logging_options(args, parser) + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) listofsettings = [] if( len(args.listofsettings )): diff --git a/scripts/Tools/xmlconvertors/config_pes_converter.py b/scripts/Tools/xmlconvertors/config_pes_converter.py index a0d0746c2275..c453ee4c31b7 100755 --- a/scripts/Tools/xmlconvertors/config_pes_converter.py +++ b/scripts/Tools/xmlconvertors/config_pes_converter.py @@ -28,9 +28,7 @@ def parse_command_line(args): parser.add_argument("-cime2file", "--cime2file", help="location of config_grid.xml file in CIME2 repository") parser.add_argument("-cime5file", "--cime5file", help="location of config_grids.xml file in CIME5 repository") - args = parser.parse_args(args[1:]) - - CIME.utils.handle_standard_logging_options(args) + CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) if args.cime2file is None or args.cime5file is None: parser.print_help() @@ -187,8 +185,8 @@ def __init__(self, xmlfilename): tempxml = StringIO.StringIO(t3) super(PesTree, self).__init__(tempxml) tempxml.close() - - + + def populate(self): xmlnodes = self.root.findall('grid') nodeclass = Cime5PesNode diff --git a/scripts/Tools/xmlconvertors/grid_xml_converter.py b/scripts/Tools/xmlconvertors/grid_xml_converter.py index 28fb95f49b50..988727a3edcc 100755 --- a/scripts/Tools/xmlconvertors/grid_xml_converter.py +++ b/scripts/Tools/xmlconvertors/grid_xml_converter.py @@ -33,9 +33,7 @@ def parse_command_line(args): parser.add_argument("-cime2file", "--cime2file", help="location of config_grid.xml file in CIME2 repository") parser.add_argument("-cime5file", "--cime5file", help="location of config_grids.xml file in CIME5 repository") - args = parser.parse_args(args[1:]) - - CIME.utils.handle_standard_logging_options(args) + CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) if args.cime2file is None or args.cime5file is None: parser.print_help() diff --git a/scripts/Tools/xmlquery b/scripts/Tools/xmlquery index 380dc3c90a15..26b6b23d788b 100755 --- a/scripts/Tools/xmlquery +++ b/scripts/Tools/xmlquery @@ -105,7 +105,7 @@ epilog=textwrap.dedent(__doc__)) group.add_argument("--valid-values", default=False, action="store_true", help="Print the valid values associated with this variable if defined") - args = CIME.utils.handle_standard_logging_options(args, parser) + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) if (len(sys.argv) == 1) : parser.print_help() diff --git a/scripts/create_clone b/scripts/create_clone index e8ca78ed4d46..74c7bedfee5d 100755 --- a/scripts/create_clone +++ b/scripts/create_clone @@ -40,7 +40,7 @@ def parse_command_line(args): help="Specify the root output directory" "default: setting in case, create_clone will fail if this directory is not writable") - args = CIME.utils.handle_standard_logging_options(args, parser) + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) if args.case is None: expect(False, diff --git a/scripts/create_newcase b/scripts/create_newcase index d33ace410ab5..6d0f79cfcacf 100755 --- a/scripts/create_newcase +++ b/scripts/create_newcase @@ -121,7 +121,7 @@ OR parser.add_argument("-i", "--input-dir", help="Use a non-default location for input files") - args = CIME.utils.handle_standard_logging_options(args, parser) + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) if args.srcroot is not None: expect(os.path.isdir(args.srcroot), diff --git a/scripts/create_test b/scripts/create_test index fc491892306e..fb8ce394b442 100755 --- a/scripts/create_test +++ b/scripts/create_test @@ -226,7 +226,7 @@ OR parser.add_argument("-i", "--input-dir", help="Use a non-default location for input files") - args = CIME.utils.handle_standard_logging_options(args, parser) + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) # generate and compare flags may not point to the same directory if model == "cesm": diff --git a/scripts/fortran_unit_testing/run_tests.py b/scripts/fortran_unit_testing/run_tests.py index 3cffb4cd18d8..9ce19b2d4535 100755 --- a/scripts/fortran_unit_testing/run_tests.py +++ b/scripts/fortran_unit_testing/run_tests.py @@ -128,7 +128,7 @@ def parse_command_line(args): help="""Path to an XML file listing directories to run tests from.""" ) - args = CIME.utils.handle_standard_logging_options(args, parser) + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) output = Printer(color=args.color) if args.xml_test_list is None and args.test_spec_dir is None: diff --git a/scripts/lib/CIME/utils.py b/scripts/lib/CIME/utils.py index fff888b8e201..6d79bce05438 100644 --- a/scripts/lib/CIME/utils.py +++ b/scripts/lib/CIME/utils.py @@ -637,7 +637,7 @@ def filter(self, record): #non-zero return means we log this message return 1 if record.levelno < self.max_level else 0 -def handle_standard_logging_options(args, parser=None): +def parse_args_and_handle_standard_logging_options(args, parser=None): """ Guide to logging in CIME. diff --git a/scripts/manage_case b/scripts/manage_case index dc03e50bac1e..40f6a8fc280e 100755 --- a/scripts/manage_case +++ b/scripts/manage_case @@ -140,7 +140,7 @@ def parse_command_line(args): parser.add_argument("--long", action="store_true", help="Provide long output for queries") - args = CIME.utils.handle_standard_logging_options(args, parser) + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) return args diff --git a/scripts/manage_pes b/scripts/manage_pes index 374e33f088d8..9f0f86e52cfa 100755 --- a/scripts/manage_pes +++ b/scripts/manage_pes @@ -99,7 +99,7 @@ def parse_command_line(args, description): parser.add_argument("-machine", "--machine", default=None, help="can be a supported machine name") - args = CIME.utils.handle_standard_logging_options(args, parser) + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) CIME.utils.expect(args.add or args.query, "Either --query or --add must be on command line") diff --git a/scripts/query_testlists b/scripts/query_testlists index a03c50da9466..d9c5f3a31d3d 100755 --- a/scripts/query_testlists +++ b/scripts/query_testlists @@ -45,7 +45,7 @@ def parse_command_line(args, description): parser.add_argument("--xml-testlist", help="Use this testlist to lookup tests, default specified in config_files.xml") - args = CIME.utils.handle_standard_logging_options(args, parser) + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) expect(not(args.count and args.list_type), "Cannot specify both --count and --list arguments.") diff --git a/scripts/tests/scripts_regression_tests.py b/scripts/tests/scripts_regression_tests.py index a315a7fffe47..380b0f19fdf0 100755 --- a/scripts/tests/scripts_regression_tests.py +++ b/scripts/tests/scripts_regression_tests.py @@ -2217,7 +2217,7 @@ def _main_func(): else: setattr(args, log_param, False) - args = CIME.utils.handle_standard_logging_options(args, None) + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, None) write_provenance_info() diff --git a/src/build_scripts/buildlib.pio b/src/build_scripts/buildlib.pio index 3a37c05b54ef..5b6909b0ddfb 100755 --- a/src/build_scripts/buildlib.pio +++ b/src/build_scripts/buildlib.pio @@ -38,7 +38,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("caseroot", nargs="?", default=os.getcwd(), help="Case directory to build") - args = CIME.utils.handle_standard_logging_options(args, parser) + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) return args.buildroot, args.installpath, args.caseroot diff --git a/tools/configure b/tools/configure index bba14b82d700..997b1d3e524d 100755 --- a/tools/configure +++ b/tools/configure @@ -69,7 +69,7 @@ def parse_command_line(args): argcnt = len(args) args = parser.parse_args() - CIME.utils.handle_standard_logging_options(args) + CIME.utils.parse_args_and_handle_standard_logging_options(args) opts = {} if args.machines_dir is not None: From b55228190f9cb488aa3cc882311218f8d9a44caa Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Mon, 1 May 2017 16:06:19 -0600 Subject: [PATCH 201/219] fix pylint issues --- scripts/lib/CIME/buildlib.py | 4 ++-- scripts/lib/CIME/buildnml.py | 4 ++-- scripts/lib/CIME/case_setup.py | 1 - 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/scripts/lib/CIME/buildlib.py b/scripts/lib/CIME/buildlib.py index 0ebd4e2cc46a..cc775e7bbbf1 100644 --- a/scripts/lib/CIME/buildlib.py +++ b/scripts/lib/CIME/buildlib.py @@ -3,7 +3,7 @@ """ from CIME.XML.standard_module_setup import * -from CIME.utils import handle_standard_logging_options, setup_standard_logging_options +from CIME.utils import parse_args_and_handle_standard_logging_options, setup_standard_logging_options from CIME.case import Case import sys, os, argparse, doctest @@ -30,7 +30,7 @@ def parse_input(argv): parser.add_argument("bldroot", help="root for building library") - args = handle_standard_logging_options(argv, parser) + args = parse_args_and_handle_standard_logging_options(argv, parser) return args.caseroot, args.libroot, args.bldroot diff --git a/scripts/lib/CIME/buildnml.py b/scripts/lib/CIME/buildnml.py index 11445d0a0bf5..0fa84fa578ee 100644 --- a/scripts/lib/CIME/buildnml.py +++ b/scripts/lib/CIME/buildnml.py @@ -5,7 +5,7 @@ """ from CIME.XML.standard_module_setup import * -from CIME.utils import expect, handle_standard_logging_options, setup_standard_logging_options +from CIME.utils import expect, parse_args_and_handle_standard_logging_options, setup_standard_logging_options import sys, os, argparse, doctest logger = logging.getLogger(__name__) @@ -25,7 +25,7 @@ def parse_input(argv): parser.add_argument("caseroot", default=os.getcwd(), help="Case directory") - args = handle_standard_logging_options(argv, parser) + args = parse_args_and_handle_standard_logging_options(argv, parser) return args.caseroot diff --git a/scripts/lib/CIME/case_setup.py b/scripts/lib/CIME/case_setup.py index f17ef584e073..dd815e39dce5 100644 --- a/scripts/lib/CIME/case_setup.py +++ b/scripts/lib/CIME/case_setup.py @@ -155,7 +155,6 @@ def _case_setup_impl(case, caseroot, clean=False, test_mode=False, reset=False): logger.debug("at update TOTALPES = %s"%pestot) case.set_value("TOTALPES", pestot) thread_count = env_mach_pes.get_max_thread_count(models) - build_threaded = case.get_build_threaded() cost_pes = env_mach_pes.get_cost_pes(pestot, thread_count, machine=case.get_value("MACH")) case.set_value("COST_PES", cost_pes) From 0a70675ba4a0ef73c6e86c88fcf80ae63c8f4999 Mon Sep 17 00:00:00 2001 From: James Foucar Date: Mon, 1 May 2017 17:13:59 -0600 Subject: [PATCH 202/219] Minor pylint fix --- scripts/lib/CIME/case_setup.py | 1 - 1 file changed, 1 deletion(-) diff --git a/scripts/lib/CIME/case_setup.py b/scripts/lib/CIME/case_setup.py index f17ef584e073..dd815e39dce5 100644 --- a/scripts/lib/CIME/case_setup.py +++ b/scripts/lib/CIME/case_setup.py @@ -155,7 +155,6 @@ def _case_setup_impl(case, caseroot, clean=False, test_mode=False, reset=False): logger.debug("at update TOTALPES = %s"%pestot) case.set_value("TOTALPES", pestot) thread_count = env_mach_pes.get_max_thread_count(models) - build_threaded = case.get_build_threaded() cost_pes = env_mach_pes.get_cost_pes(pestot, thread_count, machine=case.get_value("MACH")) case.set_value("COST_PES", cost_pes) From 0e3cc7651e21201245ccaf4150415c6053a61520 Mon Sep 17 00:00:00 2001 From: James Foucar Date: Mon, 1 May 2017 17:23:44 -0600 Subject: [PATCH 203/219] Bug fixes --- scripts/lib/CIME/XML/env_batch.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/scripts/lib/CIME/XML/env_batch.py b/scripts/lib/CIME/XML/env_batch.py index 78ebfe39fee8..8e36bd4ea2e9 100644 --- a/scripts/lib/CIME/XML/env_batch.py +++ b/scripts/lib/CIME/XML/env_batch.py @@ -472,11 +472,11 @@ def get_queue_specs(self, queue): """ for queue_node in self.get_all_queues(): if queue_node.text == queue: - jobmin = queue.get("jobmin") - jobmax = queue.get("jobmax") - jobname = queue.get("jobname") - walltimemax = queue.get("walltimemax") - strict = queue.get("strict") == "true" + jobmin = queue_node.get("jobmin") + jobmax = queue_node.get("jobmax") + jobname = queue_node.get("jobname") + walltimemax = queue_node.get("walltimemax") + strict = queue_node.get("strict") == "true" return jobmin, jobmax, jobname, walltimemax, strict From c10d09722e1e53bc7376716ec8ccc172a24909e0 Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Mon, 1 May 2017 19:07:08 -0600 Subject: [PATCH 204/219] fix typo in parse_args --- config/acme/machines/template.case.run | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/acme/machines/template.case.run b/config/acme/machines/template.case.run index d85c3e15dce4..7f949dde4251 100755 --- a/config/acme/machines/template.case.run +++ b/config/acme/machines/template.case.run @@ -55,7 +55,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("--caseroot", help="Case directory to build") - CIME.utils.parse_args_and.parse_args_and_handle_standard_logging_options(args, parser) + CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) if args.caseroot is not None: os.chdir(args.caseroot) From ad309251989b0b9ebcefc9fb527996199db9f54b Mon Sep 17 00:00:00 2001 From: Mariana Vertenstein Date: Mon, 1 May 2017 22:08:53 -0600 Subject: [PATCH 205/219] removed unpack commented region --- src/components/data_comps/docn/docn_comp_mod.F90 | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/src/components/data_comps/docn/docn_comp_mod.F90 b/src/components/data_comps/docn/docn_comp_mod.F90 index a648f88fbaef..b2d37ce2c354 100644 --- a/src/components/data_comps/docn/docn_comp_mod.F90 +++ b/src/components/data_comps/docn/docn_comp_mod.F90 @@ -624,20 +624,6 @@ subroutine docn_comp_run( EClock, cdata, x2o, o2x) call t_stopf('docn_run1') - !-------------------- - ! UNPACK - !-------------------- - - call t_startf('docn_unpack') - ! lsize = mct_avect_lsize(x2o) - ! nflds_x2o = mct_avect_nRattr(x2o) - ! do nf=1,nflds_x2o - ! do n=1,lsize - ! ?? = x2o%rAttr(nf,n) - ! enddo - ! enddo - call t_stopf('docn_unpack') - !-------------------- ! ADVANCE OCN !-------------------- From 16febe7ce0dc3d5404881034de9baa62fff7c050 Mon Sep 17 00:00:00 2001 From: Michael Deakin Date: Mon, 1 May 2017 23:00:25 -0600 Subject: [PATCH 206/219] Add checks to verify the create_newcase directory was created as expected --- scripts/tests/scripts_regression_tests.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/scripts/tests/scripts_regression_tests.py b/scripts/tests/scripts_regression_tests.py index 150044905cea..b0709c98e777 100755 --- a/scripts/tests/scripts_regression_tests.py +++ b/scripts/tests/scripts_regression_tests.py @@ -254,6 +254,9 @@ def test_a_createnewcase(self): cls._testdirs.append(testdir) run_cmd_assert_result(self, "%s/create_newcase --case CreateNewcaseTest --script-root %s --compset X --res f19_g16 --output-root %s" % (SCRIPT_DIR, testdir, cls._testroot), from_dir=SCRIPT_DIR) + self.assertTrue(os.path.exists(testdir)) + self.assertTrue(os.path.exists(os.path.join(testdir, "case.setup"))) + run_cmd_assert_result(self, "./case.setup", from_dir=testdir) run_cmd_assert_result(self, "./case.build", from_dir=testdir) From 76df711e38285eed28c21aec4adc4c1cf0faca7b Mon Sep 17 00:00:00 2001 From: Jim Edwards Date: Tue, 2 May 2017 08:25:35 -0600 Subject: [PATCH 207/219] nag compiler needs a width --- src/components/data_comps/docn/docn_comp_mod.F90 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/components/data_comps/docn/docn_comp_mod.F90 b/src/components/data_comps/docn/docn_comp_mod.F90 index b2d37ce2c354..58119151b93f 100644 --- a/src/components/data_comps/docn/docn_comp_mod.F90 +++ b/src/components/data_comps/docn/docn_comp_mod.F90 @@ -302,9 +302,9 @@ subroutine docn_comp_init( EClock, cdata, x2o, o2x, NLFilename ) if (ocn_mode(1:9) == 'SST_AQUAP') then ! First determine the prescribed aquaplanet option if (len_trim(ocn_mode) == 10) then - read(ocn_mode(10:10),'(i)') aquap_option + read(ocn_mode(10:10),'(i1)') aquap_option else if (len_trim(ocn_mode) == 11) then - read(ocn_mode(10:11),'(i)') aquap_option + read(ocn_mode(10:11),'(i2)') aquap_option end if ! Now remove the index from the ocn_mode value, to have a generic setting ! for use below From cf122004acb5dca257672353d1c7b70b6d2cce37 Mon Sep 17 00:00:00 2001 From: Chris Fischer Date: Tue, 2 May 2017 09:33:13 -0600 Subject: [PATCH 208/219] Update ChangeLog --- ChangeLog | 205 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 205 insertions(+) diff --git a/ChangeLog b/ChangeLog index 89037b262716..aa0351fc0c9d 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,208 @@ +====================================================================== + +Originator: Chris Fischer +Date: 5-2-207 +Tag: cime5.3.0-alpha.10 +Answer Changes: [None, Round Off, Climate Changing] +Tests: scripts_regression_tests.py with PIO_VERSION set to 2 in driver config_components.xml + scripts_regression test + ERP_Ln9.f09_f09.F1850_DONOTUSE.yellowstone_intel.cam-outfrq9s_clm5 + summarize_cprnc_diffs by hand + code-checker +Dependencies: + +Brief Summary: + - Update to version 2.2.1 of the pio external. + - Update the top-level READMEs. + - New optional ndep stream from atm. + - Creation of new aquaplanet capability in DOCN. + - Fix typo in parse_args. + - All command line arguments should conform. Allowed for one exception, the -value option to xmlquery. + - ERP fix again. + - Pick up multi-instance cprnc.out files in summarize_cprnc_diffs. + - Fix an issue with pio1 determining if netcdf4 is available and removes a debug print statement. + - Improve error message when config_files.xml is not found. + - Fix pylint error. + - Put explicit dot in paths in some instructions. + - Remove scripts adjustment of pio settings. + + +User interface changes: + - Now accepting only conforming command line arguments. + +PR summary: git log --oneline --first-parent [previous_tag]..master +0dcd36c Merge pull request #1447 from jedwards4b/pio2_external_update +357ad45 Merge pull request #1432 from ESMCI/rljacob/README-cleanup +c76d946 Merge pull request #1448 from ESMCI/mvertens/ndep +e5f144d Merge pull request #1451 from ESMCI/mvertens/aquap +d4dfbf1 Merge pull request #1452 from jedwards4b/acme_run_template_fix +505ce67 Merge pull request #1393 from jedwards4b/test_cli +c1e5a38 Merge pull request #1445 from jedwards4b/erp_fix_again +f6c05d1 Merge pull request #1442 from billsacks/summarize_cprnc_multiinst +71cc74e Merge pull request #1425 from jedwards4b/pio1_fixes +aa9018a Merge pull request #1440 from jedwards4b/better_msg_for_config_files_not_found +1089e53 Merge pull request #1446 from ESMCI/jgfouca/fix_pylint_err_nml +3a498d2 Merge pull request #1371 from billsacks/source_file_from_path +ae2a5ca Add Sandia Corporation to CIME LICENSE.TXT +7692745 Merge pull request #1441 from jedwards4b/pio_default_settings + + +Modified files: git diff --name-status [previous_tag] + +M LICENSE.TXT +D README +M README.md +M config/acme/machines/template.case.run +M config/acme/machines/template.case.test +M config/acme/machines/template.lt_archive +M config/acme/machines/template.st_archive +M config/cesm/machines/config_compilers.xml +M config/cesm/machines/template.case.run +M config/cesm/machines/template.case.test +M config/cesm/machines/template.lt_archive +M config/cesm/machines/template.st_archive +M scripts/Tools/acme_check_env +M scripts/Tools/bless_test_results +M scripts/Tools/case.build +M scripts/Tools/case.cmpgen_namelists +M scripts/Tools/case.setup +M scripts/Tools/case.submit +M scripts/Tools/case_diff +M scripts/Tools/check_case +M scripts/Tools/check_input_data +M scripts/Tools/check_lockedfiles +M scripts/Tools/cime_bisect +M scripts/Tools/code_checker +M scripts/Tools/compare_namelists +M scripts/Tools/compare_test_results +M scripts/Tools/component_compare_baseline +M scripts/Tools/component_compare_copy +M scripts/Tools/component_compare_test +M scripts/Tools/component_generate_baseline +M scripts/Tools/getTiming +M scripts/Tools/jenkins_generic_job +M scripts/Tools/list_acme_tests +M scripts/Tools/normalize_cases +M scripts/Tools/pelayout +M scripts/Tools/preview_namelists +M scripts/Tools/save_provenance +M scripts/Tools/simple_compare +M scripts/Tools/testreporter.py +M scripts/Tools/update_acme_tests +M scripts/Tools/wait_for_tests +M scripts/Tools/xmlchange +M scripts/Tools/xmlconvertors/config_pes_converter.py +M scripts/Tools/xmlconvertors/grid_xml_converter.py +M scripts/Tools/xmlquery +M scripts/create_clone +M scripts/create_newcase +M scripts/create_test +M scripts/fortran_unit_testing/run_tests.py +M scripts/lib/CIME/SystemTests/erp.py +M scripts/lib/CIME/SystemTests/seq.py +M scripts/lib/CIME/XML/env_batch.py +M scripts/lib/CIME/XML/env_mach_specific.py +M scripts/lib/CIME/XML/files.py +M scripts/lib/CIME/buildlib.py +M scripts/lib/CIME/buildnml.py +M scripts/lib/CIME/case.py +M scripts/lib/CIME/case_setup.py +M scripts/lib/CIME/namelist.py +M scripts/lib/CIME/utils.py +M scripts/manage_case +M scripts/manage_pes +M scripts/query_testlists +M scripts/tests/scripts_regression_tests.py +M src/build_scripts/buildlib.gptl +M src/build_scripts/buildlib.pio +M src/components/data_comps/docn/cime_config/buildnml +M src/components/data_comps/docn/cime_config/config_component.xml +M src/components/data_comps/docn/cime_config/namelist_definition_docn.xml +M src/components/data_comps/docn/docn_comp_mod.F90 +M src/drivers/mct/cime_config/namelist_definition_drv_flds.xml +M src/drivers/mct/shr/seq_flds_mod.F90 +A src/drivers/mct/shr/shr_ndep_mod.F90 +M src/externals/pio1/pio/CMakeLists.txt +M src/externals/pio1/pio/pionfput_mod.F90.in +M src/externals/pio2/CMakeLists.txt +M src/externals/pio2/CTestScript.cmake +M src/externals/pio2/ctest/CTestEnvironment-anlworkstation.cmake +M src/externals/pio2/ctest/CTestEnvironment-nwsc.cmake +A src/externals/pio2/ctest/runcdash-cgd-gnu-openmpi.sh +M src/externals/pio2/ctest/runcdash-cgd-nag.sh +M src/externals/pio2/ctest/runcdash-nwsc-intel-mpiserial.sh +M src/externals/pio2/ctest/runcdash-nwscla-intel.sh +M src/externals/pio2/ctest/runctest-cgd.sh +M src/externals/pio2/ctest/runctest-nwscla.sh +M src/externals/pio2/doc/source/contributing_code.txt +M src/externals/pio2/examples/c/CMakeLists.txt +A src/externals/pio2/examples/c/darray_async.c +A src/externals/pio2/examples/c/darray_no_async.c +M src/externals/pio2/examples/c/example1.c +M src/externals/pio2/src/clib/CMakeLists.txt +M src/externals/pio2/src/clib/bget.c +M src/externals/pio2/src/clib/config.h.in +M src/externals/pio2/src/clib/pio.h +M src/externals/pio2/src/clib/pio_darray.c +M src/externals/pio2/src/clib/pio_darray_int.c +M src/externals/pio2/src/clib/pio_file.c +M src/externals/pio2/src/clib/pio_get_nc.c +M src/externals/pio2/src/clib/pio_getput_int.c +M src/externals/pio2/src/clib/pio_internal.h +M src/externals/pio2/src/clib/pio_lists.c +M src/externals/pio2/src/clib/pio_msg.c +M src/externals/pio2/src/clib/pio_nc.c +M src/externals/pio2/src/clib/pio_nc4.c +M src/externals/pio2/src/clib/pio_put_nc.c +M src/externals/pio2/src/clib/pio_rearrange.c +M src/externals/pio2/src/clib/pio_spmd.c +M src/externals/pio2/src/clib/pio_varm.c +M src/externals/pio2/src/clib/pioc.c +M src/externals/pio2/src/clib/pioc_sc.c +M src/externals/pio2/src/clib/pioc_support.c +M src/externals/pio2/tests/cunit/CMakeLists.txt +M src/externals/pio2/tests/cunit/pio_tests.h +D src/externals/pio2/tests/cunit/test_async_2comp.c +M src/externals/pio2/tests/cunit/test_async_3proc.c +M src/externals/pio2/tests/cunit/test_async_4proc.c +M src/externals/pio2/tests/cunit/test_async_simple.c +M src/externals/pio2/tests/cunit/test_darray_1d.c +M src/externals/pio2/tests/cunit/test_darray_3d.c +A src/externals/pio2/tests/cunit/test_darray_multi.c +M src/externals/pio2/tests/cunit/test_darray_multivar.c +A src/externals/pio2/tests/cunit/test_darray_multivar2.c +A src/externals/pio2/tests/cunit/test_decomp_uneven.c +M src/externals/pio2/tests/cunit/test_decomps.c +M src/externals/pio2/tests/cunit/test_intercomm2.c +M src/externals/pio2/tests/cunit/test_iosystem2_simple.c +M src/externals/pio2/tests/cunit/test_pioc.c +M src/externals/pio2/tests/cunit/test_pioc_fill.c +A src/externals/pio2/tests/cunit/test_rearr.c +M src/externals/pio2/tests/cunit/test_shared.c +M src/externals/pio2/tests/cunit/test_spmd.c +M src/externals/pio2/tests/general/ncdf_get_put.F90.in +M src/externals/pio2/tests/general/pio_decomp_frame_tests.F90.in +M src/externals/pio2/tests/general/util/pio_tutil.F90 +M src/share/util/shr_pio_mod.F90 +M src/share/util/shr_strdata_mod.F90 +M tools/configure +M tools/cprnc/README +M tools/cprnc/summarize_cprnc_diffs +M tools/mapping/check_maps/.gitignore +M tools/mapping/check_maps/README +M tools/mapping/check_maps/src/ESMF_RegridWeightGenCheck.F90 +M tools/mapping/check_maps/src/Makefile +M tools/mapping/gen_domain_files/INSTALL +M tools/mapping/gen_domain_files/src/Makefile +M tools/mapping/gen_mapping_files/README +A tools/mapping/gen_mapping_files/gen_ESMF_mapping_file/.gitignore +M tools/mapping/gen_mapping_files/gen_ESMF_mapping_file/README +M tools/mapping/gen_mapping_files/gen_ESMF_mapping_file/create_ESMF_map.sh +M tools/mapping/gen_mapping_files/runoff_to_ocn/INSTALL +M tools/mapping/map_field/INSTALL +M tools/mapping/map_field/src/Makefile + +====================================================================== ====================================================================== From 4379cad13a73cab5dd8467b0e90d1dcf2e15b8b5 Mon Sep 17 00:00:00 2001 From: James Foucar Date: Tue, 2 May 2017 10:49:10 -0600 Subject: [PATCH 209/219] Do not override walltime unless test --- scripts/create_test | 1 - scripts/lib/CIME/XML/env_batch.py | 16 ++++++++++------ scripts/lib/CIME/case.py | 3 ++- scripts/lib/CIME/test_scheduler.py | 1 - 4 files changed, 12 insertions(+), 9 deletions(-) diff --git a/scripts/create_test b/scripts/create_test index f55dfe47131d..2b8643101b57 100755 --- a/scripts/create_test +++ b/scripts/create_test @@ -20,7 +20,6 @@ import argparse, math, glob logger = logging.getLogger(__name__) - ############################################################################### def parse_command_line(args, description): ############################################################################### diff --git a/scripts/lib/CIME/XML/env_batch.py b/scripts/lib/CIME/XML/env_batch.py index 8e36bd4ea2e9..6385be121b2b 100644 --- a/scripts/lib/CIME/XML/env_batch.py +++ b/scripts/lib/CIME/XML/env_batch.py @@ -181,7 +181,7 @@ def make_batch_script(self, input_template, job, case, total_tasks, tasks_per_no fd.write(output_text) os.chmod(job, os.stat(job).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) - def set_job_defaults(self, batch_jobs, pesize=None, walltime=None, force_queue=None): + def set_job_defaults(self, batch_jobs, pesize=None, walltime=None, force_queue=None, allow_walltime_override=False): if self.batchtype is None: self.batchtype = self.get_batch_system_type() @@ -197,21 +197,25 @@ def set_job_defaults(self, batch_jobs, pesize=None, walltime=None, force_queue=N if force_queue: if not self.queue_meets_spec(force_queue, task_count, walltime=walltime, job=job): - logger.warning("User-request queue '%s' does not meet requirements for job '%s'" % (force_queue, job)) + logger.warning("WARNING: User-requested queue '%s' does not meet requirements for job '%s'" % (force_queue, job)) else: queue = self.select_best_queue(task_count, walltime=walltime, job=job) if queue is None and walltime is not None: # Try to see if walltime was the holdup queue = self.select_best_queue(task_count, walltime=None, job=job) if queue is not None: - # It was, override the walltime to avoid failure + # It was, override the walltime if a test, otherwise just warn the user new_walltime = self.get_queue_specs(queue)[3] expect(new_walltime is not None, "Should never make it here") - logger.warning("Requested walltime '%s' could not be matched by any queue, using '%s' instead" % (walltime, new_walltime)) - walltime = new_walltime + logger.warning("WARNING: Requested walltime '%s' could not be matched by any queue" % walltime) + if allow_walltime_override: + logger.warning(" Using walltime '%s' instead" % new_walltime) + walltime = new_walltime + else: + logger.warning(" Continuing with suspect walltime, batch submission may fail") if queue is None: - logger.warning("No queue on this system met the requirements for this job. Falling back to defaults") + logger.warning("WARNING: No queue on this system met the requirements for this job. Falling back to defaults") default_queue_node = self.get_default_queue() queue = default_queue_node.text walltime = self.get_queue_specs(queue)[3] diff --git a/scripts/lib/CIME/case.py b/scripts/lib/CIME/case.py index 9e143418fd55..07d8fde0d60c 100644 --- a/scripts/lib/CIME/case.py +++ b/scripts/lib/CIME/case.py @@ -741,7 +741,7 @@ def configure(self, compset_name, grid_name, machine_name=None, env_batch.set_batch_system(batch, batch_system_type=batch_system_type) env_batch.create_job_groups(bjobs) - env_batch.set_job_defaults(bjobs, pesize=maxval, walltime=walltime, force_queue=queue) + env_batch.set_job_defaults(bjobs, pesize=maxval, walltime=walltime, force_queue=queue, allow_walltime_override=test) self.schedule_rewrite(env_batch) #-------------------------------------------- @@ -810,6 +810,7 @@ def configure(self, compset_name, grid_name, machine_name=None, if model == "cesm" and not test: self.set_value("DOUT_S",True) self.set_value("TIMER_LEVEL", 4) + if test: self.set_value("TEST",True) diff --git a/scripts/lib/CIME/test_scheduler.py b/scripts/lib/CIME/test_scheduler.py index 7de420c615fd..a884e17550da 100644 --- a/scripts/lib/CIME/test_scheduler.py +++ b/scripts/lib/CIME/test_scheduler.py @@ -405,7 +405,6 @@ def _create_newcase_phase(self, test): create_newcase_cmd += " --mpilib %s" % self._mpilib logger.debug (" MPILIB set to %s" % self._mpilib) - if self._queue is not None: create_newcase_cmd += " --queue=%s" % self._queue From dc84643fde623a3f8e59103b06813967dc524ff2 Mon Sep 17 00:00:00 2001 From: James Foucar Date: Tue, 2 May 2017 12:01:35 -0600 Subject: [PATCH 210/219] Add tests --- scripts/tests/scripts_regression_tests.py | 84 +++++++++++++++++++++++ 1 file changed, 84 insertions(+) diff --git a/scripts/tests/scripts_regression_tests.py b/scripts/tests/scripts_regression_tests.py index 7dac780e46d2..535d6cf48999 100755 --- a/scripts/tests/scripts_regression_tests.py +++ b/scripts/tests/scripts_regression_tests.py @@ -1402,6 +1402,90 @@ def test_cime_case_xmlchange_append(self): result = run_cmd_assert_result(self, "./xmlquery --value PIO_CONFIG_OPTS", from_dir=casedir) self.assertEqual(result, "-opt1 -opt2") + ########################################################################### + def test_cime_case_test_walltime_mgmt_1(self): + ########################################################################### + if CIME.utils.get_model() != "acme": + self.skipTest("Skipping walltime test. Depends on ACME batch settings") + + test_name = "ERS.f19_g16_rx1.A" + machine, compiler = "blues", "gnu" + run_cmd_assert_result(self, "unset CIME_GLOBAL_WALLTIME && %s/create_test --no-setup --machine %s %s -t %s --test-root %s --output-root %s" % + (SCRIPT_DIR, machine, test_name, self._baseline_name, self._testroot, self._testroot)) + + casedir = os.path.join(self._testroot, + "%s.%s" % (CIME.utils.get_full_test_name(test_name, machine=machine, compiler=compiler), self._baseline_name)) + self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir) + + result = run_cmd_assert_result(self, "./xmlquery JOB_WALLCLOCK_TIME --subgroup=case.test --value", from_dir=casedir) + self.assertEqual(result, "0:10:00") + + result = run_cmd_assert_result(self, "./xmlquery JOB_QUEUE --subgroup=case.test --value", from_dir=casedir) + self.assertEqual(result, "shared") + + ########################################################################### + def test_cime_case_test_walltime_mgmt_2(self): + ########################################################################### + if CIME.utils.get_model() != "acme": + self.skipTest("Skipping walltime test. Depends on ACME batch settings") + + test_name = "ERS_P64.f19_g16_rx1.A" + machine, compiler = "blues", "gnu" + run_cmd_assert_result(self, "unset CIME_GLOBAL_WALLTIME && %s/create_test --no-setup --machine %s %s -t %s --test-root %s --output-root %s" % + (SCRIPT_DIR, machine, test_name, self._baseline_name, self._testroot, self._testroot)) + + casedir = os.path.join(self._testroot, + "%s.%s" % (CIME.utils.get_full_test_name(test_name, machine=machine, compiler=compiler), self._baseline_name)) + self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir) + + result = run_cmd_assert_result(self, "./xmlquery JOB_WALLCLOCK_TIME --subgroup=case.test --value", from_dir=casedir) + self.assertEqual(result, "03:00:00") + + result = run_cmd_assert_result(self, "./xmlquery JOB_QUEUE --subgroup=case.test --value", from_dir=casedir) + self.assertEqual(result, "batch") + + ########################################################################### + def test_cime_case_test_walltime_mgmt_3(self): + ########################################################################### + if CIME.utils.get_model() != "acme": + self.skipTest("Skipping walltime test. Depends on ACME batch settings") + + test_name = "ERS_P64.f19_g16_rx1.A" + machine, compiler = "blues", "gnu" + run_cmd_assert_result(self, "unset CIME_GLOBAL_WALLTIME && %s/create_test --no-setup --machine %s %s -t %s --test-root %s --output-root %s --walltime='0:10:00'" % + (SCRIPT_DIR, machine, test_name, self._baseline_name, self._testroot, self._testroot)) + + casedir = os.path.join(self._testroot, + "%s.%s" % (CIME.utils.get_full_test_name(test_name, machine=machine, compiler=compiler), self._baseline_name)) + self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir) + + result = run_cmd_assert_result(self, "./xmlquery JOB_WALLCLOCK_TIME --subgroup=case.test --value", from_dir=casedir) + self.assertEqual(result, "0:10:00") + + result = run_cmd_assert_result(self, "./xmlquery JOB_QUEUE --subgroup=case.test --value", from_dir=casedir) + self.assertEqual(result, "batch") # Not smart enough to select faster queue + + ########################################################################### + def test_cime_case_test_walltime_mgmt_4(self): + ########################################################################### + if CIME.utils.get_model() != "acme": + self.skipTest("Skipping walltime test. Depends on ACME batch settings") + + test_name = "ERS_P1.f19_g16_rx1.A" + machine, compiler = "blues", "gnu" + run_cmd_assert_result(self, "unset CIME_GLOBAL_WALLTIME && %s/create_test --no-setup --machine %s %s -t %s --test-root %s --output-root %s --walltime='2:00:00'" % + (SCRIPT_DIR, machine, test_name, self._baseline_name, self._testroot, self._testroot)) + + casedir = os.path.join(self._testroot, + "%s.%s" % (CIME.utils.get_full_test_name(test_name, machine=machine, compiler=compiler), self._baseline_name)) + self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir) + + result = run_cmd_assert_result(self, "./xmlquery JOB_WALLCLOCK_TIME --subgroup=case.test --value", from_dir=casedir) + self.assertEqual(result, "01:00:00") + + result = run_cmd_assert_result(self, "./xmlquery JOB_QUEUE --subgroup=case.test --value", from_dir=casedir) + self.assertEqual(result, "shared") + ############################################################################### class X_TestSingleSubmit(TestCreateTestCommon): ############################################################################### From 26f0ffc5f4df84cff947d2a890ecc7e1e585e597 Mon Sep 17 00:00:00 2001 From: James Foucar Date: Tue, 2 May 2017 10:10:34 -0600 Subject: [PATCH 211/219] Fix single submit --- scripts/create_test | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/create_test b/scripts/create_test index 2b8643101b57..a8a75ce41543 100755 --- a/scripts/create_test +++ b/scripts/create_test @@ -420,7 +420,7 @@ def single_submit_impl(machine_name, test_id, proc_pool, project, args, job_cost wall_time_bab = wall_time queue = env_batch.select_best_queue(proc_pool, wall_time_bab) - wall_time_max_bab = env_batch.get_max_walltime(queue) + wall_time_max_bab = env_batch.get_queue_specs(queue)[3] if wall_time_max_bab is not None: wall_time_max = convert_to_seconds(wall_time_max_bab) if wall_time_max < wall_time: From a2caece71cd240dff15a51f6886eab6fcb47ef80 Mon Sep 17 00:00:00 2001 From: Andy Salinger Date: Tue, 2 May 2017 15:16:23 -0600 Subject: [PATCH 212/219] Revert change in pio1 to point into pio2/cmake A change was made in pio1 to no longer point into pio2/cmake for the build system. Good idea, but it broke the build of the HOMME test in acme_developer. Reverting it for now, and alerted jayesh to the issue. --- cime/src/externals/pio1/pio/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cime/src/externals/pio1/pio/CMakeLists.txt b/cime/src/externals/pio1/pio/CMakeLists.txt index d2c162bb71d1..c9fd1bf503d1 100644 --- a/cime/src/externals/pio1/pio/CMakeLists.txt +++ b/cime/src/externals/pio1/pio/CMakeLists.txt @@ -9,7 +9,7 @@ CMAKE_MINIMUM_REQUIRED(VERSION 2.8.5) IF (USER_CMAKE_MODULE_PATH) SET (CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${USER_CMAKE_MODULE_PATH}) ELSE() - SET (CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/cmake") + SET (CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/../../pio2/cmake") ENDIF() find_file( TESTFILE NAMES TryCSizeOf.f90 PATHS ${CMAKE_MODULE_PATH} NO_DEFAULT_PATH) get_filename_component( TESTFILEPATH ${TESTFILE} PATH) From 00bfdfe492332da897af1cffce46df1df21d5476 Mon Sep 17 00:00:00 2001 From: Andy Salinger Date: Wed, 3 May 2017 11:52:56 -0600 Subject: [PATCH 213/219] Comment out invalid_args check CIME added this, but ACME is not ready. Too many occurrences of "-id" with single dash --- cime/scripts/lib/CIME/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cime/scripts/lib/CIME/utils.py b/cime/scripts/lib/CIME/utils.py index 6d79bce05438..21bba173d3fd 100644 --- a/cime/scripts/lib/CIME/utils.py +++ b/cime/scripts/lib/CIME/utils.py @@ -662,7 +662,7 @@ def parse_args_and_handle_standard_logging_options(args, parser=None): # scripts_regression_tests is the only thing that should pass a None argument in parser if parser is not None: - _check_for_invalid_args(args[1:]) + #_check_for_invalid_args(args[1:]) args = parser.parse_args(args[1:]) # --verbose adds to the message format but does not impact the log level From 89f800a15eb5570cb559e27f08ca5b0feaf16211 Mon Sep 17 00:00:00 2001 From: Andy Salinger Date: Wed, 3 May 2017 11:53:49 -0600 Subject: [PATCH 214/219] Update Sandia worksations to gcc5.3.0 --- cime/config/acme/machines/config_machines.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cime/config/acme/machines/config_machines.xml b/cime/config/acme/machines/config_machines.xml index a35d28681082..5e72b3c08c64 100644 --- a/cime/config/acme/machines/config_machines.xml +++ b/cime/config/acme/machines/config_machines.xml @@ -607,7 +607,7 @@ sems-python/2.7.9 - sems-gcc/5.1.0 + sems-gcc/5.3.0 sems-intel/15.0.2 From c063d8111ae3bee3eccb69f752e92226e2b49f12 Mon Sep 17 00:00:00 2001 From: Andy Salinger Date: Wed, 3 May 2017 17:24:44 -0600 Subject: [PATCH 215/219] Fix acme template change for new parser JimF figured this out for me. JimE made changes in CIME but forgot to do the ACME parts. --- cime/config/acme/machines/template.case.run | 2 +- cime/config/acme/machines/template.case.test | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cime/config/acme/machines/template.case.run b/cime/config/acme/machines/template.case.run index 7f949dde4251..56d425e34013 100755 --- a/cime/config/acme/machines/template.case.run +++ b/cime/config/acme/machines/template.case.run @@ -55,7 +55,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("--caseroot", help="Case directory to build") - CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) if args.caseroot is not None: os.chdir(args.caseroot) diff --git a/cime/config/acme/machines/template.case.test b/cime/config/acme/machines/template.case.test index 81ccab9cbd15..5fbb6b5c35f0 100755 --- a/cime/config/acme/machines/template.case.test +++ b/cime/config/acme/machines/template.case.test @@ -50,7 +50,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("--caseroot", help="Case directory to build") - CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) if args.caseroot is not None: os.chdir(args.caseroot) From 7616e0e74d42f2367028d04aa5acf13d9a3b7480 Mon Sep 17 00:00:00 2001 From: Andy Salinger Date: Wed, 3 May 2017 17:25:46 -0600 Subject: [PATCH 216/219] Make single-dash before multichar arg a warning Moving towards deprecating -multichararg in favor of --multichararg with double dash. This was ignored in the past, and recently made an error in CIME. Backing off to a warning. --- cime/scripts/lib/CIME/utils.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/cime/scripts/lib/CIME/utils.py b/cime/scripts/lib/CIME/utils.py index 21bba173d3fd..57a28addbe31 100644 --- a/cime/scripts/lib/CIME/utils.py +++ b/cime/scripts/lib/CIME/utils.py @@ -662,7 +662,7 @@ def parse_args_and_handle_standard_logging_options(args, parser=None): # scripts_regression_tests is the only thing that should pass a None argument in parser if parser is not None: - #_check_for_invalid_args(args[1:]) + _check_for_invalid_args(args[1:]) args = parser.parse_args(args[1:]) # --verbose adds to the message format but does not impact the log level @@ -1241,10 +1241,11 @@ def _check_for_invalid_args(args): if arg.startswith("--"): continue if arg.startswith("-") and len(arg) > 2: - if arg == "-value" or arg == "-noecho": +# Uncomment these lines when we want to enforce --mulitchararg syntax +# if arg == "-value" or arg == "-noecho": logger.warn("This argument is depricated, please use -%s"%arg) - else: - expect(False, "Invalid argument %s\n Multi-character arguments should begin with \"--\" and single character with \"-\"\n Use --help for a complete list of available options"%arg) +# else: +# expect(False, "Invalid argument %s\n Multi-character arguments should begin with \"--\" and single character with \"-\"\n Use --help for a complete list of available options"%arg) class SharedArea(object): """ From d0a10418b237969fb431b4761b5f7fc4046b8f2f Mon Sep 17 00:00:00 2001 From: James Foucar Date: Tue, 16 May 2017 10:33:27 -0600 Subject: [PATCH 217/219] Bug fix: Handle failures to get mem usage from baselines --- cime/scripts/lib/CIME/SystemTests/system_tests_common.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cime/scripts/lib/CIME/SystemTests/system_tests_common.py b/cime/scripts/lib/CIME/SystemTests/system_tests_common.py index aadaaa5ef2e6..a690386d4744 100644 --- a/cime/scripts/lib/CIME/SystemTests/system_tests_common.py +++ b/cime/scripts/lib/CIME/SystemTests/system_tests_common.py @@ -365,7 +365,8 @@ def _compare_baseline(self): baselog = os.path.join(basecmp_dir, "cpl.log") if os.path.isfile(baselog) and len(memlist) > 3: - blmem = self._get_mem_usage(baselog)[-1][1] + blmem = self._get_mem_usage(baselog) + blmem = 0 if blmem == [] else blmem[-1][1] curmem = memlist[-1][1] if blmem != 0: diff = (curmem - blmem) / blmem From 91c202e6cab59dcdc124dfc2f66889ec2661c46f Mon Sep 17 00:00:00 2001 From: James Foucar Date: Wed, 17 May 2017 11:01:46 -0600 Subject: [PATCH 218/219] Fix upstream merge resolution mistake --- cime/config/acme/machines/config_batch.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cime/config/acme/machines/config_batch.xml b/cime/config/acme/machines/config_batch.xml index cf3e928fef19..aaefcf270e05 100644 --- a/cime/config/acme/machines/config_batch.xml +++ b/cime/config/acme/machines/config_batch.xml @@ -158,7 +158,7 @@ -l nodes={{ num_nodes }}:ppn={{ tasks_per_node }} - shared + shared batch From 0540b7f593d98884ec5ec66c9cd91bc796124576 Mon Sep 17 00:00:00 2001 From: Robert Jacob Date: Wed, 17 May 2017 18:07:51 -0500 Subject: [PATCH 219/219] Add fix to bug in docn that was in this cime version The version of CIME we started with (15297cd) had a bug in docn which was fixed in a later version (10fbc43). Instead of starting over, just bring in the 2 files that fix the bug. Bug spotted by baseline compare fails with DTEST. --- .../docn/cime_config/config_component.xml | 31 +++++----- .../data_comps/docn/docn_comp_mod.F90 | 56 +++++++++++++++---- 2 files changed, 61 insertions(+), 26 deletions(-) diff --git a/cime/src/components/data_comps/docn/cime_config/config_component.xml b/cime/src/components/data_comps/docn/cime_config/config_component.xml index 1b0d88186967..ff71aa17f1b2 100644 --- a/cime/src/components/data_comps/docn/cime_config/config_component.xml +++ b/cime/src/components/data_comps/docn/cime_config/config_component.xml @@ -41,7 +41,7 @@ driver. The atmosphere/ocean fluxes are computed in the coupler. Therefore, the data ocean model does not compute fluxes like the data ice model. DOCN has two distinct modes of operation. It can - arun as a pure data model, reading in ocean SSTs (normally + run as a pure data model, reading in ocean SSTs (normally climatological) from input datasets, performing time/spatial interpolations, and passing these to the coupler. Alternatively, DOCN can compute updated SSTs by running as a slab ocean model where @@ -50,18 +50,14 @@ from the driver. --- A setting of prescribed assumes the only field in the input stream is SST. It also assumes the SST is in Celsius and must be converted to Kelvin. - All other fields are set to zero except for ocean salinity, which - is set to a constant reference salinity value. - Normally the ice fraction data is found in the same data files that - provide SST data to the data ocean model. They are normally found in - the same file because the SST and ice fraction data are derived from - the same observational data sets and are consistent with each other. - to the data ocean model. They are normally found in the same file - because the SST and ice fraction data are derived from the same - observational data sets and are consistent with each other. - --- A setting of som (slab ocean model) mode is a prognostic mode. This mode - computes a prognostic sea surface temperature and a freeze/melt - potential (surface Q-flux) used by the sea ice model. This + All other fields are set to zero except for ocean salinity, which is set to a + constant reference salinity value. Normally the ice fraction data is found in + the same data files that provide SST data to the data ocean model. They are + normally found in the same file because the SST and ice fraction data are derived + from the same observational data sets and are consistent with each other. + --- Settings of som (slab ocean model) or som_aquap (aquaplanet slab ocean) are + prognostic modes which compute a prognostic sea surface temperature and a + freeze/melt potential (surface Q-flux) used by the sea ice model. This calculation requires an external SOM forcing data file that includes ocean mixed layer depths and bottom-of-the-slab Q-fluxes. Scientifically appropriate bottom-of-the-slab Q-fluxes are normally @@ -71,7 +67,14 @@ appropriate and is provided for testing and development purposes only. Users must create scientifically appropriate data for their particular application. A tool is available to derive valid SOM forcing. - Default is prescribed. + --- A setting of sst_aquapN (where “N” is an integer index value) is a + type of prescribed SST mode used specifically for an aquaplanet setup in + which global SSTs correspond to an analytic form set by the index value. + Currently, indices for 10 SST profiles are supported [e.g., index 3 corresponds + to the “QOBS” profile of Neale and Hoskins (2001, Atmos. Sci. Lett.)]. + With source code modifications, it is possible for users to create their own + analytic SST distributions and match them to indices 11 or greater. + diff --git a/cime/src/components/data_comps/docn/docn_comp_mod.F90 b/cime/src/components/data_comps/docn/docn_comp_mod.F90 index 58119151b93f..f557311f7f2b 100644 --- a/cime/src/components/data_comps/docn/docn_comp_mod.F90 +++ b/cime/src/components/data_comps/docn/docn_comp_mod.F90 @@ -79,6 +79,7 @@ module docn_comp_mod integer(IN) :: kt,ks,ku,kv,kdhdx,kdhdy,kq,kswp ! field indices integer(IN) :: kswnet,klwup,klwdn,ksen,klat,kmelth,ksnow,krofi integer(IN) :: kh,kqbot + integer(IN) :: index_lat, index_lon type(shr_strdata_type) :: SDOCN type(mct_rearr) :: rearr @@ -148,8 +149,6 @@ subroutine docn_comp_init( EClock, cdata, x2o, o2x, NLFilename ) integer(IN) :: nunit ! unit number integer(IN) :: kmask ! field reference integer(IN) :: kfrac ! field reference - integer(IN) :: klat ! field reference - integer(IN) :: klon ! field reference logical :: ocn_present ! flag logical :: ocn_prognostic ! flag logical :: ocnrof_prognostic ! flag @@ -360,7 +359,7 @@ subroutine docn_comp_init( EClock, cdata, x2o, o2x, NLFilename ) ocnrof_prognostic = .true. endif - if (trim(ocn_mode) == 'SOM') then + if (trim(ocn_mode) == 'SOM' .or. trim(ocn_mode) == 'SOM_AQUAP') then ocn_prognostic = .true. endif @@ -466,11 +465,11 @@ subroutine docn_comp_init( EClock, cdata, x2o, o2x, NLFilename ) kmask = mct_aVect_indexRA(ggrid%data,'mask') imask(:) = nint(ggrid%data%rAttr(kmask,:)) - klon = mct_aVect_indexRA(ggrid%data,'lon') - xc(:) = ggrid%data%rAttr(klon,:) + index_lon = mct_aVect_indexRA(ggrid%data,'lon') + xc(:) = ggrid%data%rAttr(index_lon,:) - klat = mct_aVect_indexRA(ggrid%data,'lat') - yc(:) = ggrid%data%rAttr(klat,:) + index_lat = mct_aVect_indexRA(ggrid%data,'lat') + yc(:) = ggrid%data%rAttr(index_lat,:) call t_stopf('docn_initmctavs') @@ -508,7 +507,7 @@ subroutine docn_comp_init( EClock, cdata, x2o, o2x, NLFilename ) endif endif call shr_mpi_bcast(exists,mpicom,'exists') - if (trim(ocn_mode) == 'SOM') then + if (trim(ocn_mode) == 'SOM' .or. trim(ocn_mode) == 'SOM_AQUAP') then if (my_task == master_task) write(logunit,F00) ' reading ',trim(rest_file) call shr_pcdf_readwrite('read',iosystem,SDOCN%io_type,trim(rest_file),mpicom,gsmap,rf1=somtp,rf1n='somtp') endif @@ -705,7 +704,7 @@ subroutine docn_comp_run( EClock, cdata, x2o, o2x) o2x%rAttr(kswp ,n) = swp enddo - case('SOM','SOM_AQUAP') + case('SOM') lsize = mct_avect_lsize(o2x) do n = 1,SDOCN%nstreams call shr_dmodel_translateAV(SDOCN%avs(n),avstrm,avifld,avofld,rearr) @@ -743,6 +742,41 @@ subroutine docn_comp_run( EClock, cdata, x2o, o2x) enddo endif ! firstcall + case('SOM_AQUAP') + lsize = mct_avect_lsize(o2x) + do n = 1,SDOCN%nstreams + call shr_dmodel_translateAV(SDOCN%avs(n),avstrm,avifld,avofld,rearr) + enddo + if (firstcall) then + do n = 1,lsize + if (.not. read_restart) then + somtp(n) = o2x%rAttr(kt,n) + TkFrz + endif + o2x%rAttr(kt,n) = somtp(n) + o2x%rAttr(kq,n) = 0.0_R8 + enddo + else ! firstcall + tfreeze = shr_frz_freezetemp(o2x%rAttr(ks,:)) + TkFrz + do n = 1,lsize + !--- pull out h from av for resuse below --- + hn = avstrm%rAttr(kh,n) + !--- compute new temp --- + o2x%rAttr(kt,n) = somtp(n) + & + (x2o%rAttr(kswnet,n) + & ! shortwave + x2o%rAttr(klwup ,n) + & ! longwave + x2o%rAttr(klwdn ,n) + & ! longwave + x2o%rAttr(ksen ,n) + & ! sensible + x2o%rAttr(klat ,n) + & ! latent + x2o%rAttr(kmelth,n) - & ! ice melt + avstrm%rAttr(kqbot ,n) - & ! flux at bottom + (x2o%rAttr(ksnow,n)+x2o%rAttr(krofi,n))*latice) * & ! latent by prec and roff + dt/(cpsw*rhosw*hn) + !--- compute ice formed or melt potential --- + o2x%rAttr(kq,n) = (tfreeze(n) - o2x%rAttr(kt,n))*(cpsw*rhosw*hn)/dt ! ice formed q>0 + somtp(n) = o2x%rAttr(kt,n) ! save temp + enddo + endif ! firstcall + end select call t_stopf('docn_mode') @@ -764,7 +798,7 @@ subroutine docn_comp_run( EClock, cdata, x2o, o2x) close(nu) call shr_file_freeUnit(nu) endif - if (trim(ocn_mode) == 'SOM') then + if (trim(ocn_mode) == 'SOM' .or. trim(ocn_mode) == 'SOM_AQUAP') then if (my_task == master_task) write(logunit,F04) ' writing ',trim(rest_file),currentYMD,currentTOD call shr_pcdf_readwrite('write',iosystem,SDOCN%io_type,trim(rest_file),mpicom,gsmap,clobber=.true., & rf1=somtp,rf1n='somtp') @@ -879,8 +913,6 @@ subroutine prescribed_sst(xc, yc, lsize, sst_option, sst) rlon(:) = xc(:) * pio180 rlat(:) = yc(:) * pio180 - write(6,*)"DEBUG: sst_option is ",sst_option - ! Control if (sst_option < 1 .or. sst_option > 10) then