diff --git a/cime/ChangeLog b/cime/ChangeLog
index ff6ed05da496..3fda9fdf079c 100644
--- a/cime/ChangeLog
+++ b/cime/ChangeLog
@@ -1,5 +1,72 @@
======================================================================
+Originator: Chris Fischer
+Date: 7-19-209
+Tag: cime5.8.7
+Answer Changes: Climate changing for CAM ne120, ne120pg3, ne0CONUS grids
+Tests: scripts_regression_tests
+Dependencies:
+
+Brief Summary:
+ - Fix SE to gx1v7 domain and mapping files.
+ - Updates and fixes for MOM6 in cime.
+ - Maint 5.6 merge.
+
+User interface changes:
+
+PR summary: git log --oneline --first-parent [previous_tag]..master
+6efe21745 Merge pull request #3177 from ESMCI/fischer/SE_gx1v7MaskFix
+764ef08a0 Merge pull request #3156 from ESMCI/nuopc-cmeps-os
+7a5d60356 Merge pull request #3174 from jedwards4b/maint-5.6-merge
+
+Modified files: git diff --name-status [previous_tag]
+M config/cesm/config_files.xml
+M config/cesm/config_grids.xml
+M config/cesm/config_grids_mct.xml
+M config/cesm/machines/config_batch.xml
+M config/cesm/machines/config_compilers.xml
+M config/cesm/machines/config_machines.xml
+A config/cesm/machines/config_workflow.xml
+A config/cesm/machines/cylc_suite.rc.template
+M config/config_headers.xml
+M config/e3sm/config_files.xml
+M config/e3sm/machines/config_batch.xml
+A config/e3sm/machines/config_workflow.xml
+M config/xml_schemas/config_batch.xsd
+A config/xml_schemas/config_workflow.xsd
+M scripts/Tools/Makefile
+M scripts/Tools/archive_metadata
+A scripts/Tools/generate_cylc_workflow.py
+M scripts/Tools/mkDepends
+M scripts/Tools/preview_run
+M scripts/create_clone
+M scripts/create_newcase
+M scripts/lib/CIME/Servers/wget.py
+M scripts/lib/CIME/XML/batch.py
+M scripts/lib/CIME/XML/env_base.py
+M scripts/lib/CIME/XML/env_batch.py
+M scripts/lib/CIME/XML/env_mach_specific.py
+A scripts/lib/CIME/XML/env_workflow.py
+M scripts/lib/CIME/XML/generic_xml.py
+A scripts/lib/CIME/XML/workflow.py
+M scripts/lib/CIME/aprun.py
+M scripts/lib/CIME/case/case.py
+M scripts/lib/CIME/case/case_clone.py
+M scripts/lib/CIME/case/case_run.py
+M scripts/lib/CIME/case/case_setup.py
+M scripts/lib/CIME/case/case_st_archive.py
+M scripts/lib/CIME/case/case_submit.py
+M scripts/lib/CIME/get_timing.py
+M scripts/tests/scripts_regression_tests.py
+M src/build_scripts/buildlib.csm_share
+M src/components/data_comps/datm/cime_config/namelist_definition_datm.xml
+M src/components/data_comps/dice/nuopc/dice_comp_mod.F90
+M src/drivers/mct/cime_config/config_component.xml
+M src/drivers/mct/cime_config/config_component_cesm.xml
+M src/share/util/shr_file_mod.F90
+
+======================================================================
+
Originator: Chris Fischer
Date: 7-16-2019
Tag: cime5.8.6
diff --git a/cime/config/cesm/config_files.xml b/cime/config/cesm/config_files.xml
index 9ff04054c793..12464b5ef39d 100644
--- a/cime/config/cesm/config_files.xml
+++ b/cime/config/cesm/config_files.xml
@@ -51,6 +51,15 @@
$CIMEROOT/config/xml_schemas/config_batch.xsd
+
+ char
+ $CIMEROOT/config/$MODEL/machines/config_workflow.xml
+ case_last
+ env_case.xml
+ file containing workflow (for documentation only - DO NOT EDIT)
+ $CIMEROOT/config/xml_schemas/config_workflow.xsd
+
+
char
$CIMEROOT/config/$MODEL/config_inputdata.xml
@@ -126,6 +135,7 @@
$SRCROOT/components/pop/
$SRCROOT/components/mom/
+ $SRCROOT/components/nemo/
$CIMEROOT/src/components/data_comps/docn
$CIMEROOT/src/components/stub_comps/socn
$CIMEROOT/src/components/xcpl_comps/xocn
@@ -251,6 +261,7 @@
$COMP_ROOT_DIR_ICE/cime_config/config_compsets.xml
$COMP_ROOT_DIR_OCN/cime_config/config_compsets.xml
$COMP_ROOT_DIR_OCN/cime_config/config_compsets.xml
+ $COMP_ROOT_DIR_OCN/cime_config/config_compsets.xml
case_last
env_case.xml
@@ -271,6 +282,7 @@
$COMP_ROOT_DIR_ICE/cime_config/config_pes.xml
$COMP_ROOT_DIR_OCN/cime_config/config_pes.xml
$COMP_ROOT_DIR_OCN/cime_config/config_pes.xml
+ $COMP_ROOT_DIR_OCN/cime_config/config_pes.xml
case_last
env_case.xml
@@ -297,6 +309,7 @@
$COMP_ROOT_DIR_ICE/cime_config/config_archive.xml
$COMP_ROOT_DIR_OCN/cime_config/config_archive.xml
$COMP_ROOT_DIR_OCN/cime_config/config_archive.xml
+ $COMP_ROOT_DIR_OCN/cime_config/config_archive.xml
$COMP_ROOT_DIR_ROF/cime_config/config_archive.xml
$COMP_ROOT_DIR_ROF/cime_config/config_archive.xml
@@ -314,6 +327,7 @@
$COMP_ROOT_DIR_ATM/cime_config/SystemTests
$COMP_ROOT_DIR_OCN/cime_config/SystemTests
$COMP_ROOT_DIR_OCN/cime_config/SystemTests
+ $COMP_ROOT_DIR_OCN/cime_config/SystemTests
$COMP_ROOT_DIR_ICE/cime_config/SystemTests
$COMP_ROOT_DIR_GLC/cime_config/SystemTests
$COMP_ROOT_DIR_ROF/cime_config/SystemTests
@@ -336,6 +350,7 @@
$COMP_ROOT_DIR_ICE/cime_config/testdefs/testlist_cice.xml
$COMP_ROOT_DIR_OCN/cime_config/testdefs/testlist_pop.xml
$COMP_ROOT_DIR_OCN/cime_config/testdefs/testlist_mom.xml
+ $COMP_ROOT_DIR_OCN/cime_config/testdefs/testlist_nemo.xml
$COMP_ROOT_DIR_ROF/cime_config/testdefs/testlist_rtm.xml
$COMP_ROOT_DIR_ROF/cime_config/testdefs/testlist_mosart.xml
@@ -359,6 +374,7 @@
$COMP_ROOT_DIR_ROF/cime_config/testdefs/testmods_dirs
$COMP_ROOT_DIR_OCN/cime_config/testdefs/testmods_dirs
$COMP_ROOT_DIR_OCN/cime_config/testdefs/testmods_dirs
+ $COMP_ROOT_DIR_OCN/cime_config/testdefs/testmods_dirs
case_last
env_case.xml
@@ -379,6 +395,7 @@
$COMP_ROOT_DIR_ROF/cime_config/usermods_dirs
$COMP_ROOT_DIR_OCN/cime_config/usermods_dirs
$COMP_ROOT_DIR_OCN/cime_config/usermods_dirs
+ $COMP_ROOT_DIR_OCN/cime_config/usermods_dirs
case_last
env_case.xml
@@ -406,6 +423,7 @@
$COMP_ROOT_DIR_LND/bld/namelist_files/namelist_definition_ctsm.xml
$COMP_ROOT_DIR_OCN/bld/namelist_files/namelist_definition_pop.xml
$COMP_ROOT_DIR_OCN/bld/namelist_files/namelist_definition_mom.xml
+ $COMP_ROOT_DIR_OCN/bld/namelist_files/namelist_definition_nemo.xml
-->
case_last
diff --git a/cime/config/cesm/config_grids.xml b/cime/config/cesm/config_grids.xml
index 4b34790fc0a3..4552304301ba 100644
--- a/cime/config/cesm/config_grids.xml
+++ b/cime/config/cesm/config_grids.xml
@@ -246,6 +246,20 @@
tx1v1
+
+ T62
+ T62
+ tn1v3
+ tn1v3
+
+
+
+ T62
+ T62
+ tn0.25v3
+ tn0.25v3
+
+
T62
T62
@@ -343,6 +357,20 @@
gx1v7
+
+ 0.23x0.31
+ 0.23x0.31
+ tn1v3
+ tn1v3
+
+
+
+ 0.23x0.31
+ 0.23x0.31
+ tn0.25v3
+ tn0.25v3
+
+
0.23x0.31
0.23x0.31
@@ -385,6 +413,20 @@
gx1v7
+
+ 0.9x1.25
+ 0.9x1.25
+ tn1v3
+ tn1v3
+
+
+
+ 0.9x1.25
+ 0.9x1.25
+ tn0.25v3
+ tn0.25v3
+
+
0.9x1.25
0.9x1.25
@@ -1163,6 +1205,10 @@
1152 768
domain.lnd.fv0.23x0.31_gx1v6.100517.nc
domain.ocn.0.23x0.31_gx1v6_101108.nc
+ domain.lnd.fv0.23x0.31_tn1v3.160414.nc
+ domain.ocn.fv0.23x0.31_tn1v3.160414.nc
+ domain.lnd.fv0.23x0.31_tn0.25v3.160721.nc
+ domain.ocn.fv0.23x0.31_tn0.25v3.160721.nc
0.23x0.31 is FV 1/4-deg grid:
@@ -1184,6 +1230,12 @@
domain.ocn.fv0.9x1.25_gx1v7.151020.nc
domain.lnd.fv0.9x1.25_tx0.66v1.190314.nc
domain.ocn.fv0.9x1.25_tx0.66v1.190314.nc
+ domain.lnd.fv0.9x1.25_tn1v3.160414.nc
+ domain.ocn.fv0.9x1.25_tn1v3.160414.nc
+ domain.lnd.fv0.9x1.25_tn0.25v3.160721.nc
+ domain.ocn.fv0.9x1.25_tn0.25v3.160721.nc
+ /glade/u/home/benedict/ys/datain/domain.aqua.fv0.9x1.25.nc
+ /glade/u/home/benedict/ys/datain/domain.aqua.fv0.9x1.25.nc
$DIN_LOC_ROOT/share/meshes/fv0.9x1.25_141008_polemod_ESMFmesh.nc
0.9x1.25 is FV 1-deg grid:
@@ -1259,6 +1311,8 @@
$DIN_LOC_ROOT/share/domains/domain.ocn.T62_gx1v7.151008.nc
$DIN_LOC_ROOT/share/domains/domain.ocn.T62_gx3v7.130409.nc
$DIN_LOC_ROOT/share/domains/domain.ocn.T62_tx0.66v1.190425.nc
+ $DIN_LOC_ROOT/share/domains/domain.lnd.T62_tn1v3.160414.nc
+ $DIN_LOC_ROOT/share/domains/domain.lnd.T62_tn0.25v3.160721.nc
$DIN_LOC_ROOT/share/meshes/T62_040121_ESMFmesh.nc
T62 is Gaussian grid:
@@ -1383,8 +1437,8 @@
777602 1
$DIN_LOC_ROOT/share/domains/domain.lnd.ne120np4_gx1v6.110502.nc
$DIN_LOC_ROOT/share/domains/domain.ocn.ne120np4_gx1v6.121113.nc
- $DIN_LOC_ROOT/share/domains/domain.lnd.ne120np4_gx1v7.190502.nc
- $DIN_LOC_ROOT/share/domains/domain.ocn.ne120np4_gx1v7.190502.nc
+ $DIN_LOC_ROOT/share/domains/domain.lnd.ne120np4_gx1v7.190718.nc
+ $DIN_LOC_ROOT/share/domains/domain.ocn.ne120np4_gx1v7.190718.nc
ne120np4 is Spectral Elem 1/4-deg grid:
@@ -1399,8 +1453,8 @@
777600 1
- $DIN_LOC_ROOT/share/domains/domain.lnd.ne120np4.pg3_gx1v7.190503.nc
- $DIN_LOC_ROOT/share/domains/domain.ocn.ne120np4.pg3_gx1v7.190503.nc
+ $DIN_LOC_ROOT/share/domains/domain.lnd.ne120np4.pg3_gx1v7.190718.nc
+ $DIN_LOC_ROOT/share/domains/domain.ocn.ne120np4.pg3_gx1v7.190718.nc
ne120np4.pg3 is a Spectral Elem 0.25-deg grid with a 3x3 FVM physics grid:
EXPERIMENTAL FVM physics grid
@@ -1542,6 +1596,20 @@
+
+ 360 291
+ $DIN_LOC_ROOT/share/domains/domain.ocn.tn1v3.160414.nc
+ tn1v3 is NEMO ORCA1 tripole grid at 1 deg (reduced eORCA):
+ NEMO ORCA1 tripole ocean grid
+
+
+
+ 1440 1050
+ $DIN_LOC_ROOT/share/domains/domain.ocn.tn0.25v3.160721.nc
+ tn0.25v3 is NEMO ORCA1 tripole grid at 1/4 deg (reduced eORCA):
+ NEMO ORCA1 tripole ocean grid
+
+
360 180
$DIN_LOC_ROOT/share/meshes/rx1_nomask_181022_ESMFmesh.nc
diff --git a/cime/config/cesm/config_grids_common.xml b/cime/config/cesm/config_grids_common.xml
index ee3ed1d79492..9dd8f9efbb3a 100644
--- a/cime/config/cesm/config_grids_common.xml
+++ b/cime/config/cesm/config_grids_common.xml
@@ -97,8 +97,8 @@
-
-
+
+
diff --git a/cime/config/cesm/config_grids_mct.xml b/cime/config/cesm/config_grids_mct.xml
index 8ef172014683..cd4a76cee869 100644
--- a/cime/config/cesm/config_grids_mct.xml
+++ b/cime/config/cesm/config_grids_mct.xml
@@ -175,21 +175,21 @@
-
-
-
-
-
+
+
+
+
+
-
-
-
-
-
+
+
+
+
+
@@ -236,11 +236,11 @@
-
-
-
-
-
+
+
+
+
+
diff --git a/cime/config/cesm/machines/config_batch.xml b/cime/config/cesm/machines/config_batch.xml
index 028eea73b051..eb099adc07b2 100644
--- a/cime/config/cesm/machines/config_batch.xml
+++ b/cime/config/cesm/machines/config_batch.xml
@@ -1,5 +1,5 @@
-
+
-j oe
@@ -168,6 +169,16 @@
+
+
+ -l nodes={{ num_nodes }}
+ -q {{ queue }}
+
+
+ iccp
+
+
+
@@ -225,10 +236,10 @@
regular
- regular
- premium
- share
- economy
+ regular
+ premium
+ share
+ economy
@@ -591,25 +602,4 @@
default
-
-
-
-
- template.case.run
- $BUILD_COMPLETE and not $TEST
-
-
- template.case.test
- $BUILD_COMPLETE and $TEST
-
-
- template.st_archive
- 1
- 0:20:00
-
- case.run or case.test
- $DOUT_S
-
-
-
diff --git a/cime/config/cesm/machines/config_compilers.xml b/cime/config/cesm/machines/config_compilers.xml
index 054616943239..aa386bc035fd 100644
--- a/cime/config/cesm/machines/config_compilers.xml
+++ b/cime/config/cesm/machines/config_compilers.xml
@@ -80,7 +80,7 @@ using a fortran linker.
$(FC_AUTO_R8)
- $(FC_AUTO_R8)
+ $(FC_AUTO_R8) -Duse_LARGEFILE
FALSE
@@ -503,6 +503,65 @@ using a fortran linker.
$ENV{TRILINOS_PATH}
+
+
+ --host=Linux
+
+
+ -xCORE-AVX2
+
+
+ -xCORE-AVX2
+
+
+ -L$(NETCDF_DIR) -lnetcdff -Wl,--as-needed,-L$(NETCDF_DIR)/lib -lnetcdff -lnetcdf
+
+
+ -DHAVE_PAPI -DHAVE_SLASHPROC
+
+
+ -mkl
+
+
+
+
+
+
+ -DHAVE_NANOTIME -DBIT64 -DHAVE_VPRINTF -DHAVE_BACKTRACE -DHAVE_SLASHPROC -DHAVE_COMM_F2C -DHAVE_TIMES -DHAVE_GETTIMEOFDAY
+
+ $ENV{NETCDF}
+ gpfs
+ $ENV{PNETCDF}
+ xiar
+
+ cru
+
+
+
+
+
+ -xHost
+
+
+ -DINTEL_MKL -DHAVE_SSE2
+
+
+ -xHost
+
+
+ $(FC_AUTO_R8) -O3 -assume norealloc_lhs
+
+
+ $SHELL{${NETCDF_PATH}/bin/nc-config --flibs}
+
+ mpiicpc
+ mpiicc
+ mpiifort
+ icc
+ ifort
+ $ENV{TRILINOS_PATH}
+
+
-DHAVE_PAPI
diff --git a/cime/config/cesm/machines/config_machines.xml b/cime/config/cesm/machines/config_machines.xml
index 820fba18c6d7..567a7d056789 100644
--- a/cime/config/cesm/machines/config_machines.xml
+++ b/cime/config/cesm/machines/config_machines.xml
@@ -49,6 +49,72 @@ This allows using a different mpirun command to launch unit tests
-->
+
+ XC50 SkyLake, os is CNL, 40 pes/node, batch system is PBSPro
+ .*eth\d
+ CNL
+ intel,gnu,cray
+ mpt,mpi-serial
+ $ENV{BASEDIR}
+ $ENV{DIN_LOC_ROOT}
+ $DIN_LOC_ROOT
+ ${CIME_OUTPUT_ROOT}/archive/$CASE
+ ${CIME_OUTPUT_ROOT}/cesm_baselines
+ /home/jedwards/cesm_tools/cprnc/cprnc
+ 8
+ pbs
+ @ pusan.ac.kr
+ 40
+ 40
+
+ aprun
+
+ -j {{ hyperthreading }}
+ -n {{ total_tasks }}
+ -N $MAX_MPITASKS_PER_NODE
+ -S {{ tasks_per_numa }}
+ -d $ENV{OMP_NUM_THREADS}
+ --mpmd-env OMP_NUM_THREADS=$OMP_NUM_THREADS
+
+
+
+ /opt/modules/default/init/perl.pm
+ /opt/modules/default/init/python.py
+ /opt/modules/default/init/sh
+ /opt/modules/default/init/csh
+ /opt/modules/default/bin/modulecmd perl
+ /opt/modules/default/bin/modulecmd python
+ module
+ module
+
+ craype-x86-skylake
+ PrgEnv-pgi
+ PrgEnv-intel
+ PrgEnv-cray
+ PrgEnv-gnu
+ cray-netcdf
+ cray-hdf5
+ cray-parallel-netcdf
+ papi
+
+
+ PrgEnv-intel
+ craype-x86-skylake
+ craype-hugepages2M
+ perftools-base/7.0.4
+ cray-netcdf/4.6.1.3
+ cray-hdf5/1.10.2.0
+ cray-parallel-netcdf/1.8.1.3
+ papi/5.6.0.4
+ gridftp/6.0
+ cray-python/3.6.5.1
+
+
+
+ 256M
+ /home/jedwards/workflow/CESM_postprocessing
+
+
CMCC IBM iDataPlex, os is Linux, 16 pes/node, batch system is LSFd mpich
@@ -131,6 +197,7 @@ This allows using a different mpirun command to launch unit tests
1
on
on
+ /users/home/models/nemo/xios-cmip6/intel_xe_2013
@@ -260,7 +327,7 @@ This allows using a different mpirun command to launch unit tests
-np {{ total_tasks }}
-
+
/usr/share/Modules/init/perl.pm
/usr/share/Modules/init/python.py
/usr/share/Modules/init/csh
@@ -274,6 +341,8 @@ This allows using a different mpirun command to launch unit tests
compiler/gnu/8.2.0
+ mpi/3.3/gcc-8.2.0
+ tool/netcdf/4.6.1/gcc-8.1.0
@@ -647,7 +716,7 @@ This allows using a different mpirun command to launch unit tests
PrgEnv-intel
- intel intel/18.0.2.199
+ intel intel/19.0.3.199
/global/project/projectdirs/ccsm1/modulefiles/cori
@@ -674,16 +743,16 @@ This allows using a different mpirun command to launch unit tests
cray-libsci/18.03.1
- cray-mpich/7.7.0
+ cray-mpich/7.7.6
- cray-hdf5/1.10.1.1
- cray-netcdf/4.4.1.1.6
+ cray-hdf5/1.10.2.0
+ cray-netcdf/4.6.1.3
- cray-netcdf-hdf5parallel/4.4.1.1.6
- cray-hdf5-parallel/1.10.1.1
- cray-parallel-netcdf/1.8.1.3
+ cray-netcdf-hdf5parallel/4.6.1.3
+ cray-hdf5-parallel/1.10.2.0
+ cray-parallel-netcdf/1.8.1.4
cmake/3.8.2
@@ -750,13 +819,17 @@ This allows using a different mpirun command to launch unit tests
cray-hdf5
cray-netcdf-hdf5parallel
+
PrgEnv-intel
- intel intel/18.0.1.163
+ intel intel/19.0.3.199
/global/project/projectdirs/ccsm1/modulefiles/cori
- esmf/7.1.0r-defio-intel18.0.1.163-mpi-O-cori-knl
+ esmf/7.1.0r-defio-intel18.0.1.163-mpi-O-cori-knl
+
+
+ esmf/7.1.0r-netcdf-intel18.0.1.163-mpiuni-O-knl
@@ -769,26 +842,23 @@ This allows using a different mpirun command to launch unit tests
cray-memkind
- craype-mic-knl
craype craype/2.5.15
+ craype-mic-knl
cray-libsci/18.03.1
- cray-mpich/7.7.0
+ cray-mpich/7.7.6
- cray-hdf5/1.10.1.1
- cray-netcdf/4.4.1.1.6
+ cray-hdf5/1.10.2.0
+ cray-netcdf/4.6.1.3
- cray-netcdf-hdf5parallel/4.4.1.1.6
- cray-hdf5-parallel/1.10.1.1
- cray-parallel-netcdf/1.8.1.3
-
-
- cmake/3.8.2
+ cray-netcdf-hdf5parallel/4.6.1.3
+ cray-hdf5-parallel/1.10.2.0
+ cray-parallel-netcdf/1.8.1.4
diff --git a/cime/config/cesm/machines/config_workflow.xml b/cime/config/cesm/machines/config_workflow.xml
new file mode 100644
index 000000000000..f4bcf29e49bd
--- /dev/null
+++ b/cime/config/cesm/machines/config_workflow.xml
@@ -0,0 +1,138 @@
+
+
+
+
+
+
+ template.case.run
+ $BUILD_COMPLETE and not $TEST
+
+
+ template.case.test
+ $BUILD_COMPLETE and $TEST
+
+
+ template.st_archive
+
+ case.run or case.test
+ $DOUT_S
+
+ 1
+ 1
+ 0:20:00
+
+
+
+
+
+
+ $ENV{POSTPROCESS_PATH}/timeseries/template.timeseries
+ case.st_archive
+ $TIMESERIES
+
+ 72
+ 9
+ 0:20:00
+
+
+ 72
+ 9
+ 0:20:00
+
+
+
+
+
+ $ENV{POSTPROCESS_PATH}/timeseries/template.timeseries_transfer
+ timeseries
+ 1
+
+ 1
+ 1
+ 1:00:00
+
+
+
+
+
+ $CASEROOT/postprocess/xconform
+ timeseriesL
+ $CASEROOT/postprocess/pp_config -value --get STANDARDIZE_TIMESERIES
+
+
+
+ $CASEROOT/postprocess/atm_averages
+ timeseries or case.st_archive
+ $CASEROOT/postprocess/pp_config -value --get GENERATE_AVGS_ATM
+
+
+
+ $CASEROOT/postprocess/lnd_averages
+ :lnd_avg(args)
+timeseries or case.st_archive
+ $CASEROOT/postprocess/pp_config -value --get GENERATE_AVGS_ATM
+
+
+
+ $CASEROOT/postprocess/ice_averages
+ timeseries or case.st_archive
+ $CASEROOT/postprocess/pp_config -value --get GENERATE_AVGS_ATM
+
+
+
+
+ $CASEROOT/postprocess/ocn_averages
+ timeseries or case.st_archive
+ $CASEROOT/postprocess/pp_config -value --get GENERATE_AVGS_ATM
+
+
+
+ $CASEROOT/postprocess/atm_diagnostics
+ atm_averages
+ $CASEROOT/postprocess/pp_config -value --get GENERATE_AVGS_ATM
+
+
+
+ $CASEROOT/postprocess/lnd_diagnostics
+ lnd_averages
+ $CASEROOT/postprocess/pp_config -value --get GENERATE_AVGS_ATM
+
+
+ $CASEROOT/postprocess/ice_diagnostics
+ ice_averages
+ $CASEROOT/postprocess/pp_config -value --get GENERATE_AVGS_ATM
+
+
+ $CASEROOT/postprocess/ocn_diagnostics
+ ocn_averages
+ $CASEROOT/postprocess/pp_config -value --get GENERATE_AVGS_ATM
+
+
+
+
diff --git a/cime/config/cesm/machines/cylc_suite.rc.template b/cime/config/cesm/machines/cylc_suite.rc.template
new file mode 100644
index 000000000000..5511f6ab2e3b
--- /dev/null
+++ b/cime/config/cesm/machines/cylc_suite.rc.template
@@ -0,0 +1,24 @@
+[meta]
+ title = CESM CYLC workflow for {{ workflow_description }}
+[cylc]
+ [[parameters]]
+ member = {{ members }}
+
+[scheduling]
+ cycling mode = integer
+ initial cycle point = 1
+ final cycle point = {{ cycles }}
+
+ [[dependencies]]
+ [[[R1]]]
+ graph = "set_external_workflow => run => st_archive "
+ [[[R/P1]]] # Integer Cycling
+ graph = """
+ st_archive[-P1] => run
+ run => st_archive
+ """
+[runtime]
+ [[set_external_workflow]]
+ script = cd {{ case_path_string }} ./xmlchange EXTERNAL_WORKFLOW=TRUE
+ [[st_archive]]
+ script = cd {{ case_path_string }} ./case.submit --job case.st_archive; ./xmlchange CONTINUE_RUN=TRUE
diff --git a/cime/config/config_headers.xml b/cime/config/config_headers.xml
index e181a6d0d851..c0d939cf3a44 100644
--- a/cime/config/config_headers.xml
+++ b/cime/config/config_headers.xml
@@ -10,6 +10,13 @@
+
+
+ These variables may be changed anytime during a run, they
+ control jobs that will be submitted and their dependancies.
+
+
+
These variables CANNOT BE CHANGED once a case has been created.
diff --git a/cime/config/e3sm/config_files.xml b/cime/config/e3sm/config_files.xml
index 738514de3305..1f8b4e1998b1 100644
--- a/cime/config/e3sm/config_files.xml
+++ b/cime/config/e3sm/config_files.xml
@@ -33,6 +33,15 @@
$CIMEROOT/config/xml_schemas/config_batch.xsd
+
+ char
+ $CIMEROOT/config/$MODEL/machines/config_workflow.xml
+ case_last
+ env_case.xml
+ file containing workflow (for documentation only - DO NOT EDIT)
+ $CIMEROOT/config/xml_schemas/config_workflow.xsd
+
+
char
$CIMEROOT/config/$MODEL/config_inputdata.xml
diff --git a/cime/config/e3sm/machines/config_batch.xml b/cime/config/e3sm/machines/config_batch.xml
index 72b7c4ba5346..a97163dcf342 100644
--- a/cime/config/e3sm/machines/config_batch.xml
+++ b/cime/config/e3sm/machines/config_batch.xml
@@ -569,29 +569,4 @@
-
-
-
- template.case.run
- $BUILD_COMPLETE and not $TEST
-
-
- template.case.run.sh
- False
-
-
- template.case.test
- $BUILD_COMPLETE and $TEST
-
-
- template.st_archive
- 1
- 0:20:00
-
- case.run case.test
- $DOUT_S
-
-
-
-
diff --git a/cime/config/e3sm/machines/config_workflow.xml b/cime/config/e3sm/machines/config_workflow.xml
new file mode 100644
index 000000000000..7fcdcec80835
--- /dev/null
+++ b/cime/config/e3sm/machines/config_workflow.xml
@@ -0,0 +1,51 @@
+
+
+
+
+
+
+ template.case.run
+ $BUILD_COMPLETE and not $TEST
+
+
+ template.case.test
+ $BUILD_COMPLETE and $TEST
+
+
+ template.st_archive
+
+ case.run or case.test
+ $DOUT_S
+
+ 1
+ 0:20:00
+
+
+
+
diff --git a/cime/config/xml_schemas/config_batch.xsd b/cime/config/xml_schemas/config_batch.xsd
index 66ee5cf3ee3c..141bcf8e57fd 100644
--- a/cime/config/xml_schemas/config_batch.xsd
+++ b/cime/config/xml_schemas/config_batch.xsd
@@ -24,7 +24,7 @@
-
+
@@ -37,7 +37,7 @@
-
+
diff --git a/cime/config/xml_schemas/config_workflow.xsd b/cime/config/xml_schemas/config_workflow.xsd
new file mode 100644
index 000000000000..805a70cf7bf7
--- /dev/null
+++ b/cime/config/xml_schemas/config_workflow.xsd
@@ -0,0 +1,68 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/cime/scripts/Tools/archive_metadata b/cime/scripts/Tools/archive_metadata
index d0846920e2b4..472a6187fa56 100755
--- a/cime/scripts/Tools/archive_metadata
+++ b/cime/scripts/Tools/archive_metadata
@@ -791,7 +791,7 @@ def get_trunk_tag(case_dict, username, password):
if result:
last_tag = [i for i in result.split('\n') if i][-1]
last_tag = last_tag[:-1].split('_')[-1]
- tag = int(last_tag.strip('0'))
+ tag = int(last_tag.lstrip('0'))
return tag
diff --git a/cime/scripts/Tools/generate_cylc_workflow.py b/cime/scripts/Tools/generate_cylc_workflow.py
new file mode 100755
index 000000000000..ed7fb092f952
--- /dev/null
+++ b/cime/scripts/Tools/generate_cylc_workflow.py
@@ -0,0 +1,156 @@
+#!/usr/bin/env python
+
+"""
+Generates a cylc workflow file for the case. See https://cylc.github.io for details about cylc
+"""
+
+from standard_script_setup import *
+
+from CIME.case import Case
+from CIME.utils import expect, transform_vars
+
+import argparse, re
+logger = logging.getLogger(__name__)
+
+###############################################################################
+def parse_command_line(args, description):
+###############################################################################
+ parser = argparse.ArgumentParser(
+ description=description,
+ formatter_class=argparse.RawTextHelpFormatter)
+
+ CIME.utils.setup_standard_logging_options(parser)
+
+ parser.add_argument("caseroot", nargs="?", default=os.getcwd(),
+ help="Case directory for which namelists are generated.\n"
+ "Default is current directory.")
+
+ parser.add_argument('--cycles', default=1,
+ help="The number of cycles to run, default is RESUBMIT")
+
+ parser.add_argument("--ensemble", default=1,
+ help="generate suite.rc for an ensemble of cases, the case name argument must end in an integer.\n"
+ "for example: ./generate_cylc_workflow.py --ensemble 4 \n"
+ "will generate a workflow file in the current case, if that case is named case.01,"
+ "the workflow will include case.01, case.02, case.03 and case.04")
+
+ args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser)
+
+ return args.caseroot, args.cycles, int(args.ensemble)
+
+def cylc_get_ensemble_first_and_last(case, ensemble):
+ if ensemble == 1:
+ return 1,None
+ casename = case.get_value("CASE")
+ m = re.search(r"(.*[^\d])(\d+)$", casename)
+ minval = int(m.group(2))
+ maxval = minval+ensemble-1
+ return minval,maxval
+
+def cylc_get_case_path_string(case, ensemble):
+ caseroot = case.get_value("CASEROOT")
+ casename = case.get_value("CASE")
+ if ensemble == 1:
+ return "{};".format(caseroot)
+ basepath = os.path.abspath(caseroot+"/..")
+ m = re.search(r"(.*[^\d])(\d+)$", casename)
+
+ expect(m, "casename {} must end in an integer for ensemble method".format(casename))
+
+ return "{basepath}/{basename}$(printf \"%0{intlen}d\"".format(basepath=basepath, basename=m.group(1), intlen=len(m.group(2))) + " ${CYLC_TASK_PARAM_member});"
+
+
+def cylc_batch_job_template(job, jobname, case, ensemble):
+
+ env_batch = case.get_env("batch")
+ batch_system_type = env_batch.get_batch_system_type()
+ batchsubmit = env_batch.get_value("batch_submit")
+ submit_args = env_batch.get_submit_args(case, job)
+ case_path_string = cylc_get_case_path_string(case, ensemble)
+
+ return """
+ [[{jobname}]]
+ script = cd {case_path_string} ./case.submit --job {job}
+ [[[job]]]
+ batch system = {batch_system_type}
+ batch submit command template = {batchsubmit} {submit_args} '%(job)s'
+ [[[directives]]]
+""".format(jobname=jobname, job=job, case_path_string=case_path_string, batch_system_type=batch_system_type,
+ batchsubmit=batchsubmit, submit_args=submit_args) + "{{ batchdirectives }}\n"
+
+
+def cylc_script_job_template(job, case, ensemble):
+ case_path_string = cylc_get_case_path_string(case, ensemble)
+ return """
+ [[{job}]]
+ script = cd {case_path_string} ./case.submit --job {job}
+""".format(job=job, case_path_string=case_path_string)
+
+###############################################################################
+def _main_func(description):
+###############################################################################
+ caseroot, cycles, ensemble = parse_command_line(sys.argv, description)
+
+ expect(os.path.isfile(os.path.join(caseroot, "CaseStatus")),
+ "case.setup must be run prior to running {}".format(__file__))
+ with Case(caseroot, read_only=True) as case:
+ if cycles == 1:
+ cycles = max(1, case.get_value('RESUBMIT'))
+ env_batch = case.get_env('batch')
+ env_workflow = case.get_env('workflow')
+ jobs = env_workflow.get_jobs()
+ casename = case.get_value('CASE')
+ input_template = os.path.join(case.get_value("MACHDIR"),"cylc_suite.rc.template")
+
+ overrides = {"cycles":cycles,
+ 'casename':casename}
+ input_text = open(input_template).read()
+
+ first,last = cylc_get_ensemble_first_and_last(case, ensemble)
+ if ensemble == 1:
+ overrides.update({'members':"{}".format(first)})
+ overrides.update({"workflow_description":"case {}".format(case.get_value("CASE"))})
+ else:
+ overrides.update({'members':"{}..{}".format(first,last)})
+ firstcase = case.get_value("CASE")
+ intlen = len(str(last))
+ lastcase = firstcase[:-intlen]+str(last)
+ overrides.update({"workflow_description":"ensemble from {} to {}".format(firstcase,lastcase)})
+ overrides.update({"case_path_string":cylc_get_case_path_string(case, ensemble)})
+
+ for job in jobs:
+ jobname = job
+ if job == 'case.st_archive':
+ continue
+ if job == 'case.run':
+ jobname = 'run'
+ overrides.update(env_batch.get_job_overrides(job, case))
+ overrides.update({'job_id':'run.'+casename})
+ input_text = input_text + cylc_batch_job_template(job, jobname, case, ensemble)
+ else:
+ depends_on = env_workflow.get_value('dependency', subgroup=job)
+ if depends_on.startswith('case.'):
+ depends_on = depends_on[5:]
+ input_text = input_text.replace(' => '+depends_on,' => '+depends_on+' => '+job)
+
+
+ overrides.update(env_batch.get_job_overrides(job, case))
+ overrides.update({'job_id':job+'.'+casename})
+ if 'total_tasks' in overrides and overrides['total_tasks'] > 1:
+ input_text = input_text + cylc_batch_job_template(job, jobname, case, ensemble)
+ else:
+ input_text = input_text + cylc_script_job_template(jobname, case, ensemble)
+
+
+ overrides.update({'batchdirectives':env_batch.get_batch_directives(case,job, overrides=overrides,
+ output_format='cylc')})
+ # we need to re-transform for each job to get job size correctly
+ input_text = transform_vars(input_text, case=case, subgroup=job, overrides=overrides)
+
+ with open("suite.rc", "w") as f:
+ f.write(case.get_resolved_value(input_text))
+
+
+
+if (__name__ == "__main__"):
+ _main_func(__doc__)
diff --git a/cime/scripts/Tools/mkDepends b/cime/scripts/Tools/mkDepends
index d9bcbdd87b8a..cd7542732b00 100755
--- a/cime/scripts/Tools/mkDepends
+++ b/cime/scripts/Tools/mkDepends
@@ -71,29 +71,86 @@
# CESM Software Engineering Group, NCAR
# Mar 2013
# -----------------------------------------------------------------------------
+# Modifications to Santos' version needed by the NEMO ocean model:
+#
+# - Handling of module use statements activated by CPP macros, i.e.:
+#
+# #ifdef KEY
+# USE mod1
+# #else
+# USE mod2
+# #endif
+#
+# - Handling of Fortran code inclusion through CPP "#include"
+# (NEMO's *.h90 files)
+#
+# When preprocessing is required (-p option) fortran files *.F and *.F90 are
+# preprocessed before serching for module dependencies. Preprocessed files are
+# saved in a temp subdir of the current dir which is removed at the end of execution.
+# The default preprocessor command is 'cpp'.
+# User can override it setting the env variable CPP before execution.
+# CPP macros can be defined (-D option) or undefined (-U option).
+# CPP search path can be defined (-I option).
+# Defined/undefined macros and search path can be passed in the env variable
+# CPPFLAGS too.
+#
+# Pier Giuseppe Fogli
+# ANS, CMCC
+# Jun 2013
+# -----------------------------------------------------------------------------
-
-use Getopt::Std;
+use Getopt::Long qw(:config bundling);
use File::Basename;
+use File::Temp qw/ :POSIX /;
# Check for usage request.
@ARGV >= 2 or usage();
# Process command line.
-my %opt = ();
-getopts( "t:wd:m:", \%opt ) or usage();
+my $opt_w = 0;
+my $obj_dir;
+my $additional_file;
+my $do_cpp = 0;
+my $mangle_scheme = "lower";
+my @cpp_def_key;
+my @cpp_undef_key;
+my @cpp_inc_path;
+GetOptions('p' => \$do_cpp,
+ 'w' => \$opt_w,
+ 'D=s' => \@cpp_def_key,
+ 'U=s' => \@cpp_undef_key,
+ 'I=s' => \@cpp_inc_path,
+ 't=s' => \$obj_dir,
+ 'd=s' => \$additional_file,
+ 'm=s' => \$mangle_scheme,
+) or usage();
+@ARGV == 2 or usage(); # Check that only 2 files remain
my $filepath_arg = shift() or usage();
my $srcfile_arg = shift() or usage();
@ARGV == 0 or usage(); # Check that all args were processed.
-my $obj_dir = "";
-if ( defined $opt{'t'} ) { $obj_dir = $opt{'t'}."/"; }
-
-my $additional_file = "";
-if ( defined $opt{'d'} ) { $additional_file = $opt{'d'}; }
-
-my $mangle_scheme = "lower";
-if ( defined $opt{'m'} ) { $mangle_scheme = $opt{'m'}; }
+# Setup CPP stuff if needed
+my $cpp = "cpp";
+my $red = " ";
+if ($do_cpp){
+ my @cpp_keys = ();
+ my @cpp_path = ();
+ #
+ # Override default cpp from env
+ $ENV{"CPP"} and $cpp = $ENV{"CPP"} ;
+ $ENV{"CPPFLAGS"} and @$cpp_keys = $ENV{"CPPFLAGS"} ;
+ #
+ foreach $k (@cpp_def_key){
+ push @$cpp_keys, "-D".$k ;
+ }
+ foreach $k (@cpp_undef_key){
+ push @$cpp_keys, "-U".$k ;
+ }
+ foreach $k (@cpp_inc_path){
+ push @$cpp_path, "-I".$k ;
+ }
+ if ($cpp =~ /gcc/){ $red = ">"; }
+}
open(FILEPATH, $filepath_arg) or die "Can't open $filepath_arg: $!\n";
open(SRCFILES, $srcfile_arg) or die "Can't open $srcfile_arg: $!\n";
@@ -106,7 +163,7 @@ close(FILEPATH);
chomp @file_paths;
unshift(@file_paths,'.');
foreach $dir (@file_paths) { # (could check that directories exist here)
- $dir =~ s!/?\s*$!!; # remove / and any whitespace at end of directory name
+ $dir =~ s!/?\s*$!!; #!# remove / and any whitespace at end of directory name
($dir) = glob $dir; # Expand tildes in path names.
}
@@ -120,7 +177,8 @@ my %module_files = ();
# Attempt to parse each file for /^\s*module/ and extract module names
# for each file.
my ($f, $name, $path, $suffix, $mod);
-my @suffixes = ('\.[fF]90', '\.[fF]','\.F90\.in' );
+# include NEMO's *.h90 files
+my @suffixes = ('\.[fFh]90', '\.[fF]','\.F90\.in' );
foreach $f (@src) {
($name, $path, $suffix) = fileparse($f, @suffixes);
# find the file in the list of directorys (in @file_paths)
@@ -169,14 +227,34 @@ my %file_modules = ();
my %file_includes = ();
my @check_includes = ();
my %modules_used = ();
+
+# Create a temp dir for preprocessed files if needed
+my $tmp_dir;
+my $tmp_file;
+if ($do_cpp){
+ my $tmp_nam = tmpnam();
+ $tmp_dir = basename($tmp_nam);
+ my $cmd = "mkdir " . $tmp_dir;
+ system($cmd) == 0 or die "Failed to run command $cmd !\n";
+}
+
foreach $f ( @src ) {
# Find the file in the seach path (@file_paths).
unless ($file_path = find_file($f)) {
- if (defined $opt{'w'}) {print STDERR "$f not found\n";}
+ if ($opt_w) {print STDERR "$f not found\n";}
next;
}
+ # Preprocess if required
+ ($name, $path, $suffix) = fileparse($f, @suffixes);
+ if ($do_cpp && $suffix =~ /\.F/){
+ $tmp_file = catfile($tmp_dir, $name . lc($suffix));
+ my $cmd = $cpp . " " . "@$cpp_path" . " " . "@$cpp_keys" . " " . $file_path . $red . $tmp_file ;
+ system($cmd) == 0 or die "Failed to run command $cmd !\n";
+ $file_path = $tmp_file;
+ }
+
# Find the module and include dependencies.
($rmods, $rincs) = find_dependencies( $file_path );
@@ -189,16 +267,22 @@ foreach $f ( @src ) {
push @check_includes, @{$file_includes{$f}};
}
-print STDERR "\%file_modules\n";
-while ( ($k,$v) = each %file_modules ) {
- print STDERR "$k => @$v\n";
+# Remove temp preprocessed file
+if ($do_cpp){
+ my $cmd = "rm -rf " . $tmp_dir;
+ system($cmd) == 0 or die "Failed to run command: $cmd !\n";
}
-print STDERR "\%file_includes\n";
-while ( ($k,$v) = each %file_includes ) {
- print STDERR "$k => @$v\n";
-}
-print STDERR "\@check_includes\n";
-print STDERR "@check_includes\n";
+
+#print STDERR "\%file_modules\n";
+#while ( ($k,$v) = each %file_modules ) {
+# print STDERR "$k => @$v\n";
+#}
+#print STDERR "\%file_includes\n";
+#while ( ($k,$v) = each %file_includes ) {
+# print STDERR "$k => @$v\n";
+#}
+#print STDERR "\@check_includes\n";
+#print STDERR "@check_includes\n";
# Find include file dependencies.
my %include_depends = ();
@@ -281,10 +365,10 @@ print "# Declare all module files used to build each object.\n";
foreach $f (sort keys %file_modules) {
my $file;
if($f =~ /\.F90\.in$/){
- $f =~ /(.+)\.F90\.in/;
+ $f =~ /(.+)\.F90\.in/;
$file = $1;
}else{
- $f =~ /(.+)\./;
+ $f =~ /(.+)\./;
$file = $1;
}
$target = $obj_dir."$file.o";
@@ -326,12 +410,12 @@ sub find_dependencies {
my @suffixes = ('\.[fF]90', '\.[fF]','\.F90\.in' );
($name, $path, $suffix) = fileparse($file, @suffixes);
$target = "$name.o";
- my $include;
+
while ( ) {
- # Search for "#include" and strip filename when found.
- if ( /^#include\s+[<"](.*)[>"]/ ) {
+ # Search for CPP include and strip filename when found.
+ if ( /^#\s*include\s+[<"](.*)[>"]/ ) {
$include = $1;
- }
+ }
# Search for Fortran include dependencies.
elsif ( /^\s*include\s+['"](.*)['"]/ ) { #" for emacs fontlock
$include = $1;
@@ -429,8 +513,22 @@ sub usage {
($ProgName = $0) =~ s!.*/!!; # name of program
die < 1:
+ m = re.search(r'(\d+)$', args.case)
+ expect(m, " case name must end in an integer to use this feature")
+ startval = m.group(1)
+
return args.case, args.clone, args.keepexe, args.mach_dir, args.project, \
- args.cime_output_root, args.user_mods_dir
+ args.cime_output_root, args.user_mods_dir, int(args.ensemble), startval
##############################################################################
def _main_func():
###############################################################################
- case, clone, keepexe, mach_dir, project, cime_output_root, user_mods_dir = parse_command_line(sys.argv)
+ case, clone, keepexe, mach_dir, project, cime_output_root, user_mods_dir, \
+ ensemble, startval = parse_command_line(sys.argv)
cloneroot = os.path.abspath(clone)
expect(os.path.isdir(cloneroot),
@@ -77,12 +89,16 @@ def _main_func():
if user_mods_dir is not None:
if os.path.isdir(user_mods_dir):
user_mods_dir = os.path.abspath(user_mods_dir)
-
- with Case(cloneroot, read_only=False) as clone:
- clone.create_clone(case, keepexe=keepexe, mach_dir=mach_dir,
- project=project,
- cime_output_root=cime_output_root,
- user_mods_dir=user_mods_dir)
+ nint = len(startval)
+
+ for i in range(int(startval), int(startval)+ensemble):
+ if ensemble > 1:
+ case = case[:-nint] + '{{0:0{0:d}d}}'.format(nint).format(i)
+ with Case(cloneroot, read_only=False) as clone:
+ clone.create_clone(case, keepexe=keepexe, mach_dir=mach_dir,
+ project=project,
+ cime_output_root=cime_output_root,
+ user_mods_dir=user_mods_dir)
###############################################################################
diff --git a/cime/scripts/create_newcase b/cime/scripts/create_newcase
index f517bbdedec5..183d9dae2b76 100755
--- a/cime/scripts/create_newcase
+++ b/cime/scripts/create_newcase
@@ -89,6 +89,14 @@ def parse_command_line(args, cimeroot, description):
help="Full pathname of config grid file to use. "
"\nThis should be a copy of config/config_grids.xml with the new user grid changes added to it. \n")
+ if cime_config and cime_config.has_option('main','workflow'):
+ workflow_default = cime_config.get('main', 'workflow')
+ else:
+ workflow_default = "default"
+
+ parser.add_argument("--workflow-case",default=workflow_default,
+ help="A workflow from config_workflow.xml to apply to this case. ")
+
if cime_config and cime_config.has_option('main','SRCROOT'):
srcroot_default = cime_config.get('main', 'srcroot')
@@ -172,7 +180,7 @@ def parse_command_line(args, cimeroot, description):
args.user_mods_dir, args.pesfile, \
args.gridfile, args.srcroot, args.test, args.multi_driver, \
args.ninst, args.walltime, args.queue, args.output_root, args.script_root, \
- run_unsupported, args.answer, args.input_dir, args.driver, args.non_local
+ run_unsupported, args.answer, args.input_dir, args.driver, args.workflow_case, args.non_local
###############################################################################
def _main_func(description):
@@ -184,7 +192,9 @@ def _main_func(description):
user_mods_dir, pesfile, \
gridfile, srcroot, test, multi_driver, ninst, walltime, \
queue, output_root, script_root, run_unsupported, \
- answer, input_dir, driver, non_local = parse_command_line(sys.argv, cimeroot, description)
+ answer, input_dir, driver, \
+ workflow, non_local = parse_command_line(sys.argv, cimeroot, description)
+
if script_root is None:
caseroot = os.path.abspath(casename)
@@ -209,7 +219,7 @@ def _main_func(description):
multi_driver=multi_driver, ninst=ninst, test=test,
walltime=walltime, queue=queue, output_root=output_root,
run_unsupported=run_unsupported, answer=answer,
- input_dir=input_dir, driver=driver, non_local=non_local)
+ input_dir=input_dir, driver=driver, workflow_case=workflow, non_local=non_local)
###############################################################################
diff --git a/cime/scripts/lib/CIME/Servers/wget.py b/cime/scripts/lib/CIME/Servers/wget.py
index dbf374ce1c7f..d96f0cd414fc 100644
--- a/cime/scripts/lib/CIME/Servers/wget.py
+++ b/cime/scripts/lib/CIME/Servers/wget.py
@@ -50,7 +50,7 @@ def getdirectory(self, rel_path, full_path):
logger.debug(output)
logger.debug(errput)
if (stat != 0):
- logging.warning("wget failed with output: {} and errput {}\n".format(output, errput))
+ logging.warning("wget failed with output: {} and errput {}\n".format(output.encode('utf-8'), errput.encode('utf-8')))
# wget puts an empty file if it fails.
try:
os.remove(full_path)
diff --git a/cime/scripts/lib/CIME/XML/batch.py b/cime/scripts/lib/CIME/XML/batch.py
index c0c577488ef7..b9e4385496dd 100644
--- a/cime/scripts/lib/CIME/XML/batch.py
+++ b/cime/scripts/lib/CIME/XML/batch.py
@@ -107,14 +107,15 @@ def get_batch_jobs(self):
and the second a dict of qualifiers for the job
"""
jobs = []
- bnode = self.get_child("batch_jobs")
- for jnode in self.get_children(root=bnode):
- if self.name(jnode) == "job":
- name = self.get(jnode, "name")
- jdict = {}
- for child in self.get_children(root=jnode):
- jdict[self.name(child)] = self.text(child)
-
- jobs.append((name, jdict))
+ bnode = self.get_optional_child("batch_jobs")
+ if bnode:
+ for jnode in self.get_children(root=bnode):
+ if self.name(jnode) == "job":
+ name = self.get(jnode, "name")
+ jdict = {}
+ for child in self.get_children(root=jnode):
+ jdict[self.name(child)] = self.text(child)
+
+ jobs.append((name, jdict))
return jobs
diff --git a/cime/scripts/lib/CIME/XML/env_base.py b/cime/scripts/lib/CIME/XML/env_base.py
index 0502403d2bf2..801a315a87bb 100644
--- a/cime/scripts/lib/CIME/XML/env_base.py
+++ b/cime/scripts/lib/CIME/XML/env_base.py
@@ -12,7 +12,7 @@ class EnvBase(EntryID):
def __init__(self, case_root, infile, schema=None, read_only=False):
if case_root is None:
case_root = os.getcwd()
-
+ self._caseroot = case_root
if os.path.isabs(infile):
fullpath = infile
else:
diff --git a/cime/scripts/lib/CIME/XML/env_batch.py b/cime/scripts/lib/CIME/XML/env_batch.py
index b78c1d35d8d7..acea3a269a82 100644
--- a/cime/scripts/lib/CIME/XML/env_batch.py
+++ b/cime/scripts/lib/CIME/XML/env_batch.py
@@ -4,7 +4,7 @@
from CIME.XML.standard_module_setup import *
from CIME.XML.env_base import EnvBase
-from CIME.utils import transform_vars, get_cime_root, convert_to_seconds, format_time, get_cime_config, get_batch_script_for_job, get_logging_options
+from CIME.utils import transform_vars, get_cime_root, convert_to_seconds, get_cime_config, get_batch_script_for_job, get_logging_options
from collections import OrderedDict
import stat, re, math
@@ -32,21 +32,6 @@ def set_value(self, item, value, subgroup=None, ignore_type=False):
"""
val = None
- if item == "JOB_WALLCLOCK_TIME":
- #Most systems use %H:%M:%S format for wallclock but LSF
- #uses %H:%M this code corrects the value passed in to be
- #the correct format - if we find we have more exceptions
- #than this we may need to generalize this further
- walltime_format = self.get_value("walltime_format", subgroup=None)
- if walltime_format is not None and walltime_format.count(":") != value.count(":"): # pylint: disable=maybe-no-member
- if value.count(":") == 1:
- t_spec = "%H:%M"
- elif value.count(":") == 2:
- t_spec = "%H:%M:%S"
- else:
- expect(False, "could not interpret format for wallclock time {}".format(value))
- value = format_time(walltime_format, t_spec, value)
-
if item == "JOB_QUEUE":
expect(value in self._get_all_queue_names() or ignore_type,
"Unknown Job Queue specified use --force to set")
@@ -69,34 +54,26 @@ def set_value(self, item, value, subgroup=None, ignore_type=False):
return val
# pylint: disable=arguments-differ
- def get_value(self, item, attribute=None, resolved=True, subgroup="PRIMARY"):
+ def get_value(self, item, attribute=None, resolved=True, subgroup=None):
"""
Must default subgroup to something in order to provide single return value
"""
value = None
- if subgroup is None:
- node = self.get_optional_child(item, attribute)
- if node is None:
- # this will take the last instance of item listed in all batch_system elements
- bs_nodes = self.get_children("batch_system")
- for bsnode in bs_nodes:
- cnode = self.get_optional_child(item, attribute, root=bsnode)
- if cnode is not None:
- node = cnode
-
- if node is not None:
- value = self.text(node)
- if resolved:
- value = self.get_resolved_value(value)
- else:
- value = super(EnvBatch, self).get_value(item,attribute,resolved)
-
+ node = self.get_optional_child(item, attribute)
+ if node is None:
+ # this will take the last instance of item listed in all batch_system elements
+ bs_nodes = self.get_children("batch_system")
+ for bsnode in bs_nodes:
+ cnode = self.get_optional_child(item, attribute, root=bsnode)
+ if cnode is not None:
+ node = cnode
+ if node is None or item in ("BATCH_SYSTEM", "PROJECT_REQUIRED"):
+ value = super(EnvBatch, self).get_value(item,attribute,resolved)
else:
- if subgroup == "PRIMARY":
- subgroup = "case.test" if "case.test" in self.get_jobs() else "case.run"
- #pylint: disable=assignment-from-none
- value = super(EnvBatch, self).get_value(item, attribute=attribute, resolved=resolved, subgroup=subgroup)
+ value = self.text(node)
+ if resolved:
+ value = self.get_resolved_value(value)
return value
@@ -186,25 +163,39 @@ def set_batch_system(self, batchobj, batch_system_type=None):
self.add_child(self.copy(batchobj.machine_node))
self.set_value("BATCH_SYSTEM", batch_system_type)
- def make_batch_script(self, input_template, job, case, outfile=None):
- expect(os.path.exists(input_template), "input file '{}' does not exist".format(input_template))
- task_count = self.get_value("task_count", subgroup=job)
+ def get_job_overrides(self, job, case):
+ env_workflow = case.get_env('workflow')
+ total_tasks, num_nodes, tasks_per_node, thread_count = env_workflow.get_job_specs(job)
overrides = {}
- if task_count is not None:
- overrides["total_tasks"] = int(task_count)
- overrides["num_nodes"] = int(math.ceil(float(task_count)/float(case.tasks_per_node)))
+
+ if total_tasks:
+ overrides["total_tasks"] = total_tasks
+ overrides["num_nodes"] = num_nodes
+ overrides["tasks_per_node"] = tasks_per_node
+ if thread_count:
+ overrides["thread_count"] = thread_count
else:
- task_count = case.get_value("TOTALPES")*int(case.thread_count)
- if int(task_count) < case.get_value("MAX_TASKS_PER_NODE"):
- overrides["max_tasks_per_node"] = int(task_count)
+ total_tasks = case.get_value("TOTALPES")*int(case.thread_count)
+ thread_count = case.thread_count
+ if int(total_tasks)*int(thread_count) < case.get_value("MAX_TASKS_PER_NODE"):
+ overrides["max_tasks_per_node"] = int(total_tasks)
+
+ overrides["mpirun"] = case.get_mpirun_cmd(job=job, overrides=overrides)
+ return overrides
- overrides["job_id"] = case.get_value("CASE") + os.path.splitext(job)[1]
+ def make_batch_script(self, input_template, job, case, outfile=None):
+ expect(os.path.exists(input_template), "input file '{}' does not exist".format(input_template))
+ overrides = self.get_job_overrides(job, case)
+ ext = os.path.splitext(job)[-1]
+ if len(ext) == 0:
+ ext = job
+ if ext.startswith('.'):
+ ext = ext[1:]
+ overrides["job_id"] = ext + '.' + case.get_value("CASE")
if "pleiades" in case.get_value("MACH"):
# pleiades jobname needs to be limited to 15 chars
overrides["job_id"] = overrides["job_id"][:15]
-
overrides["batchdirectives"] = self.get_batch_directives(case, job, overrides=overrides)
- overrides["mpirun"] = case.get_mpirun_cmd(job=job)
output_text = transform_vars(open(input_template,"r").read(), case=case, subgroup=job, overrides=overrides)
output_name = get_batch_script_for_job(job) if outfile is None else outfile
logger.info("Creating file {}".format(output_name))
@@ -220,8 +211,8 @@ def set_job_defaults(self, batch_jobs, case):
if self._batchtype == "none":
return
-
- known_jobs = self.get_jobs()
+ env_workflow = case.get_env('workflow')
+ known_jobs = env_workflow.get_jobs()
for job, jsect in batch_jobs:
if job not in known_jobs:
@@ -281,9 +272,8 @@ def set_job_defaults(self, batch_jobs, case):
walltime = specs[3]
walltime = self._default_walltime if walltime is None else walltime # last-chance fallback
-
- self.set_value("JOB_QUEUE", queue, subgroup=job, ignore_type=specs is None)
- self.set_value("JOB_WALLCLOCK_TIME", walltime, subgroup=job)
+ env_workflow.set_value("JOB_QUEUE", queue, subgroup=job, ignore_type=specs is None)
+ env_workflow.set_value("JOB_WALLCLOCK_TIME", walltime, subgroup=job)
logger.debug("Job {} queue {} walltime {}".format(job, queue, walltime))
def _match_attribs(self, attribs, case, queue):
@@ -317,7 +307,7 @@ def _match(self, my_value, xml_value):
logger.debug("(env_mach_specific) _match {} {} {}".format(my_value, xml_value, result))
return result
- def get_batch_directives(self, case, job, overrides=None):
+ def get_batch_directives(self, case, job, overrides=None, output_format='default'):
"""
"""
result = []
@@ -335,8 +325,10 @@ def get_batch_directives(self, case, job, overrides=None):
for root in roots:
if root is not None:
if directive_prefix is None:
- directive_prefix = self.get_element_text("batch_directive", root=root)
-
+ if output_format == 'default':
+ directive_prefix = self.get_element_text("batch_directive", root=root)
+ elif output_format == 'cylc':
+ directive_prefix = " "
if unknown_queue:
unknown_queue_directives = self.get_element_text("unknown_queue_directives",
root=root)
@@ -351,6 +343,16 @@ def get_batch_directives(self, case, job, overrides=None):
if self._match_attribs(self.attrib(dnode), case, queue):
for node in nodes:
directive = self.get_resolved_value("" if self.text(node) is None else self.text(node))
+ if output_format == 'cylc':
+ if self._batchtype == 'pbs':
+ # cylc includes the -N itself, no need to add
+ if directive.startswith("-N"):
+ directive=''
+ continue
+ m = re.match(r'\s*(-[\w])', directive)
+ if m:
+ directive = re.sub(r'(-[\w]) ','{} = '.format(m.group(1)), directive)
+
default = self.get(node, "default")
if default is None:
directive = transform_vars(directive, case=case, subgroup=job, default=default, overrides=overrides)
@@ -427,34 +429,35 @@ def get_submit_args(self, case, job):
def submit_jobs(self, case, no_batch=False, job=None, user_prereq=None, skip_pnl=False,
allow_fail=False, resubmit_immediate=False, mail_user=None, mail_type=None,
batch_args=None, dry_run=False):
- alljobs = self.get_jobs()
+ env_workflow = case.get_env('workflow')
+ external_workflow = case.get_value("EXTERNAL_WORKFLOW")
+ alljobs = env_workflow.get_jobs()
+ alljobs = [j for j in alljobs
+ if os.path.isfile(os.path.join(self._caseroot,get_batch_script_for_job(j)))]
startindex = 0
jobs = []
firstjob = job
if job is not None:
expect(job in alljobs, "Do not know about batch job {}".format(job))
startindex = alljobs.index(job)
-
for index, job in enumerate(alljobs):
logger.debug( "Index {:d} job {} startindex {:d}".format(index, job, startindex))
if index < startindex:
continue
try:
- prereq = self.get_value('prereq', subgroup=job, resolved=False)
- if prereq is None or job == firstjob or (dry_run and prereq == "$BUILD_COMPLETE"):
+ prereq = env_workflow.get_value('prereq', subgroup=job, resolved=False)
+ if external_workflow or prereq is None or job == firstjob or (dry_run and prereq == "$BUILD_COMPLETE"):
prereq = True
else:
prereq = case.get_resolved_value(prereq)
prereq = eval(prereq)
except Exception:
expect(False,"Unable to evaluate prereq expression '{}' for job '{}'".format(self.get_value('prereq',subgroup=job), job))
-
if prereq:
- jobs.append((job, self.get_value('dependency', subgroup=job)))
+ jobs.append((job, env_workflow.get_value('dependency', subgroup=job)))
if self._batchtype == "cobalt":
break
-
depid = OrderedDict()
jobcmds = []
@@ -467,7 +470,7 @@ def submit_jobs(self, case, no_batch=False, job=None, user_prereq=None, skip_pnl
num_submit = 1
prev_job = None
-
+ batch_job_id = None
for _ in range(num_submit):
for job, dependency in jobs:
if dependency is not None:
@@ -497,10 +500,13 @@ def submit_jobs(self, case, no_batch=False, job=None, user_prereq=None, skip_pnl
batch_job_id = str(alljobs.index(job)) if dry_run else result
depid[job] = batch_job_id
jobcmds.append( (job, result) )
- if self._batchtype == "cobalt":
+
+ if self._batchtype == "cobalt" or external_workflow:
break
- prev_job = batch_job_id
+ if not external_workflow and not no_batch:
+ expect(batch_job_id, "No result from jobs {}".format(jobs))
+ prev_job = batch_job_id
if dry_run:
return jobcmds
@@ -577,17 +583,23 @@ def _build_run_args_str(self, job, no_batch, **run_args):
def _submit_single_job(self, case, job, dep_jobs=None, allow_fail=False,
no_batch=False, skip_pnl=False, mail_user=None, mail_type=None,
batch_args=None, dry_run=False, resubmit_immediate=False):
+
if not dry_run:
logger.warning("Submit job {}".format(job))
batch_system = self.get_value("BATCH_SYSTEM", subgroup=None)
if batch_system is None or batch_system == "none" or no_batch:
logger.info("Starting job script {}".format(job))
function_name = job.replace(".", "_")
+ job_name = "."+job
if not dry_run:
args = self._build_run_args(job, True, skip_pnl=skip_pnl, set_continue_run=resubmit_immediate,
submit_resubmits=not resubmit_immediate)
try:
- getattr(case, function_name)(**{k: v for k, (v, _) in args.items()})
+ if hasattr(case, function_name):
+ getattr(case, function_name)(**{k: v for k, (v, _) in args.items()})
+ else:
+ expect(os.path.isfile(job_name),"Could not find file {}".format(job_name))
+ run_cmd_no_fail(os.path.join(self._caseroot,job_name), combine_output=True, verbose=True, from_dir=self._caseroot)
except Exception as e:
# We don't want exception from the run phases getting into submit phase
logger.warning("Exception from {}: {}".format(function_name, str(e)))
@@ -665,13 +677,14 @@ def _submit_single_job(self, case, job, dep_jobs=None, allow_fail=False,
batch_env_flag = self.get_value("batch_env", subgroup=None)
run_args = self._build_run_args_str(job, False, skip_pnl=skip_pnl, set_continue_run=resubmit_immediate,
submit_resubmits=not resubmit_immediate)
- if batch_env_flag:
+ if batch_system == 'lsf':
+ sequence = (run_args, batchsubmit, submitargs, batchredirect, get_batch_script_for_job(job))
+ elif batch_env_flag:
sequence = (batchsubmit, submitargs, run_args, batchredirect, get_batch_script_for_job(job))
else:
sequence = (batchsubmit, submitargs, batchredirect, get_batch_script_for_job(job), run_args)
submitcmd = " ".join(s.strip() for s in sequence if s is not None)
-
if dry_run:
return submitcmd
else:
@@ -807,8 +820,7 @@ def get_all_queues(self):
return nodes
def get_children(self, name=None, attributes=None, root=None):
- if name in ("JOB_WALLCLOCK_TIME", "PROJECT", "CHARGE_ACCOUNT",
- "PROJECT_REQUIRED", "JOB_QUEUE", "BATCH_COMMAND_FLAGS"):
+ if name == "PROJECT_REQUIRED":
nodes = super(EnvBatch, self).get_children("entry", attributes={"id":name}, root=root)
else:
nodes = super(EnvBatch, self).get_children(name, attributes=attributes, root=root)
@@ -883,8 +895,18 @@ def compare_xml(self, other):
def make_all_batch_files(self, case):
machdir = case.get_value("MACHDIR")
+ env_workflow = case.get_env("workflow")
logger.info("Creating batch scripts")
- for job in self.get_jobs():
- input_batch_script = os.path.join(machdir,self.get_value('template', subgroup=job))
- logger.info("Writing {} script from input template {}".format(job, input_batch_script))
- self.make_batch_script(input_batch_script, job, case)
+ jobs = env_workflow.get_jobs()
+ for job in jobs:
+ template = case.get_resolved_value(env_workflow.get_value('template', subgroup=job))
+
+ if os.path.isabs(template):
+ input_batch_script = template
+ else:
+ input_batch_script = os.path.join(machdir,template)
+ if os.path.isfile(input_batch_script):
+ logger.info("Writing {} script from input template {}".format(job, input_batch_script))
+ self.make_batch_script(input_batch_script, job, case)
+ else:
+ logger.warning("Input template file {} for job {} does not exist or cannot be read.".format(input_batch_script, job))
diff --git a/cime/scripts/lib/CIME/XML/env_mach_specific.py b/cime/scripts/lib/CIME/XML/env_mach_specific.py
index 1c5729c32e26..86a6bfc75dc0 100644
--- a/cime/scripts/lib/CIME/XML/env_mach_specific.py
+++ b/cime/scripts/lib/CIME/XML/env_mach_specific.py
@@ -406,7 +406,7 @@ def get_module_system_cmd_path(self, lang):
cmd_nodes = self.get_optional_child("cmd_path", attributes={"lang":lang}, root=self.get_child("module_system"))
return self.text(cmd_nodes) if cmd_nodes is not None else None
- def get_mpirun(self, case, attribs, job, exe_only=False):
+ def get_mpirun(self, case, attribs, job, exe_only=False, overrides=None):
"""
Find best match, return (executable, {arg_name : text})
"""
@@ -462,12 +462,12 @@ def get_mpirun(self, case, attribs, job, exe_only=False):
# Now that we know the best match, compute the arguments
if not exe_only:
arg_node = self.get_optional_child("arguments", root=the_match)
- if arg_node is not None:
+ if arg_node:
arg_nodes = self.get_children("arg", root=arg_node)
for arg_node in arg_nodes:
arg_value = transform_vars(self.text(arg_node),
case=case,
- subgroup=job,
+ subgroup=job,overrides=overrides,
default=self.get(arg_node, "default"))
args.append(arg_value)
diff --git a/cime/scripts/lib/CIME/XML/env_workflow.py b/cime/scripts/lib/CIME/XML/env_workflow.py
new file mode 100644
index 000000000000..5a70e4d5987f
--- /dev/null
+++ b/cime/scripts/lib/CIME/XML/env_workflow.py
@@ -0,0 +1,141 @@
+"""
+Interface to the env_workflow.xml file. This class inherits from EnvBase
+"""
+
+from CIME.XML.standard_module_setup import *
+from CIME.XML.env_base import EnvBase
+from CIME.utils import get_cime_root
+import re, math
+
+logger = logging.getLogger(__name__)
+
+# pragma pylint: disable=attribute-defined-outside-init
+
+class EnvWorkflow(EnvBase):
+
+ def __init__(self, case_root=None, infile="env_workflow.xml", read_only=False):
+ """
+ initialize an object interface to file env_workflow.xml in the case directory
+ """
+ # This arbitrary setting should always be overwritten
+ # schema = os.path.join(get_cime_root(), "config", "xml_schemas", "env_workflow.xsd")
+ # TODO: define schema for this file
+ schema = None
+ super(EnvWorkflow,self).__init__(case_root, infile, schema=schema, read_only=read_only)
+
+ def create_job_groups(self, batch_jobs, is_test):
+ # Subtle: in order to support dynamic batch jobs, we need to remove the
+ # job_submission group and replace with job-based groups
+
+ orig_group = self.get_child("group", {"id":"job_submission"},
+ err_msg="Looks like job groups have already been created")
+ orig_group_children = super(EnvWorkflow, self).get_children(root=orig_group)
+
+ childnodes = []
+ for child in reversed(orig_group_children):
+ childnodes.append(child)
+
+ self.remove_child(orig_group)
+
+ for name, jdict in batch_jobs:
+ if name == "case.run" and is_test:
+ pass # skip
+ elif name == "case.test" and not is_test:
+ pass # skip
+ elif name == "case.run.sh":
+ pass # skip
+ else:
+ new_job_group = self.make_child("group", {"id":name})
+ for field in jdict.keys():
+ if field == "runtime_parameters":
+ continue
+ val = jdict[field]
+ node = self.make_child("entry", {"id":field,"value":val}, root=new_job_group)
+ self.make_child("type", root=node, text="char")
+
+ for child in childnodes:
+ self.add_child(self.copy(child), root=new_job_group)
+
+ def get_jobs(self):
+ groups = self.get_children("group")
+ results = []
+ for group in groups:
+ results.append(self.get(group, "id"))
+ return results
+
+ def get_type_info(self, vid):
+ gnodes = self.get_children("group")
+ type_info = None
+ for gnode in gnodes:
+ nodes = self.get_children("entry",{"id":vid}, root=gnode)
+ type_info = None
+ for node in nodes:
+ new_type_info = self._get_type_info(node)
+ if type_info is None:
+ type_info = new_type_info
+ else:
+ expect( type_info == new_type_info,
+ "Inconsistent type_info for entry id={} {} {}".format(vid, new_type_info, type_info))
+ return type_info
+
+ def get_job_specs(self, job):
+ task_count = self.get_value("task_count", subgroup=job)
+ tasks_per_node = self.get_value("tasks_per_node", subgroup=job)
+ thread_count = self.get_value("thread_count", subgroup=job)
+ num_nodes = None
+ if task_count is not None and tasks_per_node is not None:
+ task_count = int(task_count)
+ num_nodes = int(math.ceil(float(task_count)/float(tasks_per_node)))
+ tasks_per_node = task_count//num_nodes
+ if not thread_count:
+ thread_count = 1
+
+ return task_count, num_nodes, tasks_per_node, thread_count
+
+ # pylint: disable=arguments-differ
+ def get_value(self, item, attribute=None, resolved=True, subgroup="PRIMARY"):
+ """
+ Must default subgroup to something in order to provide single return value
+ """
+ value = None
+ if subgroup == "PRIMARY":
+ subgroup = "case.test" if "case.test" in self.get_jobs() else "case.run"
+
+ #pylint: disable=assignment-from-none
+ if value is None:
+ value = super(EnvWorkflow, self).get_value(item, attribute=attribute, resolved=resolved, subgroup=subgroup)
+
+ return value
+
+ # pylint: disable=arguments-differ
+ def set_value(self, item, value, subgroup=None, ignore_type=False):
+ """
+ Override the entry_id set_value function with some special cases for this class
+ """
+ val = None
+
+ # allow the user to set item for all jobs if subgroup is not provided
+ if subgroup is None:
+ gnodes = self.get_children("group")
+ for gnode in gnodes:
+ node = self.get_optional_child("entry", {"id":item}, root=gnode)
+ if node is not None:
+ self._set_value(node, value, vid=item, ignore_type=ignore_type)
+ val = value
+ else:
+ group = self.get_optional_child("group", {"id":subgroup})
+ if group is not None:
+ node = self.get_optional_child("entry", {"id":item}, root=group)
+ if node is not None:
+ val = self._set_value(node, value, vid=item, ignore_type=ignore_type)
+
+ return val
+
+ def get_children(self, name=None, attributes=None, root=None):
+ if name in ("JOB_WALLCLOCK_TIME", "PROJECT", "CHARGE_ACCOUNT",
+ "JOB_QUEUE", "BATCH_COMMAND_FLAGS"):
+ nodes = super(EnvWorkflow, self).get_children("entry", attributes={"id":name}, root=root)
+ else:
+ nodes = super(EnvWorkflow, self).get_children(name, attributes=attributes, root=root)
+
+ return nodes
diff --git a/cime/scripts/lib/CIME/XML/generic_xml.py b/cime/scripts/lib/CIME/XML/generic_xml.py
index 5860b8174a3b..d2d40d1a0212 100644
--- a/cime/scripts/lib/CIME/XML/generic_xml.py
+++ b/cime/scripts/lib/CIME/XML/generic_xml.py
@@ -370,7 +370,7 @@ def scan_optional_child(self, nodename, attributes=None, root=None):
"""
nodes = self.scan_children(nodename, attributes=attributes, root=root)
- expect(len(nodes) <= 1, "Multiple matches for nodename '{}' and attrs '{}' in file '{}'".format(nodename, attributes, self.filename))
+ expect(len(nodes) <= 1, "Multiple matches for nodename '{}' and attrs '{}' in file '{}', found {} matches".format(nodename, attributes, self.filename, len(nodes)))
return nodes[0] if nodes else None
def scan_children(self, nodename, attributes=None, root=None):
diff --git a/cime/scripts/lib/CIME/XML/workflow.py b/cime/scripts/lib/CIME/XML/workflow.py
new file mode 100644
index 000000000000..91767d277eb1
--- /dev/null
+++ b/cime/scripts/lib/CIME/XML/workflow.py
@@ -0,0 +1,80 @@
+"""
+Interface to the config_workflow.xml file. This class inherits from GenericXML.py
+"""
+
+from CIME.XML.standard_module_setup import *
+from CIME.XML.generic_xml import GenericXML
+from CIME.XML.files import Files
+from CIME.utils import expect
+
+logger = logging.getLogger(__name__)
+
+class Workflow(GenericXML):
+
+ def __init__(self, infile=None, files=None):
+ """
+ initialize an object
+ """
+ if files is None:
+ files = Files()
+ if infile is None:
+ infile = files.get_value("WORKFLOW_SPEC_FILE")
+ expect(infile, "No workflow file defined in {}".format(files.filename))
+
+ schema = files.get_schema("WORKFLOW_SPEC_FILE")
+
+ GenericXML.__init__(self, infile, schema=schema)
+
+ #Append the contents of $HOME/.cime/config_workflow.xml if it exists
+ #This could cause problems if node matchs are repeated when only one is expected
+ infile = os.path.join(os.environ.get("HOME"),".cime","config_workflow.xml")
+ if os.path.exists(infile):
+ GenericXML.read(self, infile)
+
+ def get_workflow_jobs(self, machine, workflow_case="default"):
+ """
+ Return a list of jobs with the first element the name of the case script
+ and the second a dict of qualifiers for the job
+ """
+ jobs = []
+ bnodes = []
+ findmore = True
+ prepend = False
+ while findmore:
+ bnode = self.get_optional_child("workflow_jobs", attributes={"case":workflow_case})
+ expect(bnode,"No workflow_case {} found in file {}".format(workflow_case, self.filename))
+ if prepend:
+ bnodes = [bnode] + bnodes
+ else:
+ bnodes.append(bnode)
+ prepend = False
+ workflow_attribs = self.attrib(bnode)
+ if "prepend" in workflow_attribs:
+ workflow_case = workflow_attribs["prepend"]
+ prepend = True
+ elif "append" in workflow_attribs:
+ workflow_case = workflow_attribs["append"]
+ else:
+ findmore = False
+ for bnode in bnodes:
+ for jnode in self.get_children(root=bnode):
+ if self.name(jnode) == "job":
+ name = self.get(jnode, "name")
+ jdict = {}
+ for child in self.get_children(root=jnode):
+ if self.name(child) == "runtime_parameters":
+ attrib = self.attrib(child)
+ if attrib and attrib == {'MACH' : machine}:
+ for rtchild in self.get_children(root=child):
+ jdict[self.name(rtchild)] = self.text(rtchild)
+ elif not attrib:
+ for rtchild in self.get_children(root=child):
+ if self.name(rtchild) not in jdict:
+ jdict[self.name(rtchild)] = self.text(rtchild)
+
+ else:
+ jdict[self.name(child)] = self.text(child)
+
+ jobs.append((name, jdict))
+
+ return jobs
diff --git a/cime/scripts/lib/CIME/aprun.py b/cime/scripts/lib/CIME/aprun.py
index 59ec4792d7d7..30c053c07d00 100755
--- a/cime/scripts/lib/CIME/aprun.py
+++ b/cime/scripts/lib/CIME/aprun.py
@@ -108,7 +108,7 @@ def _get_aprun_cmd_for_case_impl(ntasks, nthreads, rootpes, pstrids,
return aprun_args, total_node_count, total_task_count, min_tasks_per_node, max_thread_count
###############################################################################
-def get_aprun_cmd_for_case(case, run_exe):
+def get_aprun_cmd_for_case(case, run_exe, overrides=None):
###############################################################################
"""
Given a case, construct and return the aprun command and optimized node count
@@ -120,9 +120,19 @@ def get_aprun_cmd_for_case(case, run_exe):
for the_list, item_name in zip([ntasks, nthreads, rootpes, pstrids],
["NTASKS", "NTHRDS", "ROOTPE", "PSTRID"]):
the_list.append(case.get_value("_".join([item_name, model])))
+ max_tasks_per_node = case.get_value("MAX_TASKS_PER_NODE")
+ if overrides:
+ if 'max_tasks_per_node' in overrides:
+ max_tasks_per_node = overrides['max_tasks_per_node']
+ if 'total_tasks' in overrides:
+ ntasks = [overrides['total_tasks'] if x > 1 else x for x in ntasks]
+ if 'thread_count' in overrides:
+ nthreads = [overrides['thread_count'] if x > 1 else x for x in nthreads]
+
+
return _get_aprun_cmd_for_case_impl(ntasks, nthreads, rootpes, pstrids,
- case.get_value("MAX_TASKS_PER_NODE"),
+ max_tasks_per_node,
case.get_value("MAX_MPITASKS_PER_NODE"),
case.get_value("PIO_NUMTASKS"),
case.get_value("PIO_ASYNC_INTERFACE"),
diff --git a/cime/scripts/lib/CIME/case/case.py b/cime/scripts/lib/CIME/case/case.py
index 1d13d88a6d6e..c1e0a375710a 100644
--- a/cime/scripts/lib/CIME/case/case.py
+++ b/cime/scripts/lib/CIME/case/case.py
@@ -22,6 +22,7 @@
from CIME.XML.compsets import Compsets
from CIME.XML.grids import Grids
from CIME.XML.batch import Batch
+from CIME.XML.workflow import Workflow
from CIME.XML.pio import PIO
from CIME.XML.archive import Archive
from CIME.XML.env_test import EnvTest
@@ -32,6 +33,7 @@
from CIME.XML.env_run import EnvRun
from CIME.XML.env_archive import EnvArchive
from CIME.XML.env_batch import EnvBatch
+from CIME.XML.env_workflow import EnvWorkflow
from CIME.XML.generic_xml import GenericXML
from CIME.user_mod_support import apply_user_mods
from CIME.aprun import get_aprun_cmd_for_case
@@ -204,6 +206,8 @@ def read_xml(self):
self._env_entryid_files.append(EnvBuild(self._caseroot, components=components, read_only=self._force_read_only))
self._env_entryid_files.append(EnvMachPes(self._caseroot, components=components, read_only=self._force_read_only))
self._env_entryid_files.append(EnvBatch(self._caseroot, read_only=self._force_read_only))
+ self._env_entryid_files.append(EnvWorkflow(self._caseroot, read_only=self._force_read_only))
+
if os.path.isfile(os.path.join(self._caseroot,"env_test.xml")):
self._env_entryid_files.append(EnvTest(self._caseroot, components=components, read_only=self._force_read_only))
self._env_generic_files = []
@@ -877,7 +881,8 @@ def configure(self, compset_name, grid_name, machine_name=None,
multi_driver=False, ninst=1, test=False,
walltime=None, queue=None, output_root=None,
run_unsupported=False, answer=None,
- input_dir=None, driver=None, non_local=False):
+ input_dir=None, driver=None, workflow_case="default",
+ non_local=False):
expect(check_name(compset_name, additional_chars='.'), "Invalid compset name {}".format(compset_name))
@@ -1062,11 +1067,13 @@ def configure(self, compset_name, grid_name, machine_name=None,
batch_system_type = machobj.get_value("BATCH_SYSTEM")
logger.info("Batch_system_type is {}".format(batch_system_type))
- batch = Batch(batch_system=batch_system_type, machine=machine_name)
- bjobs = batch.get_batch_jobs()
+ batch = Batch(batch_system=batch_system_type, machine=machine_name, files=files)
+ workflow = Workflow(files=files)
+ bjobs = workflow.get_workflow_jobs(machine=machine_name, workflow_case=workflow_case)
+ env_workflow = self.get_env("workflow")
env_batch.set_batch_system(batch, batch_system_type=batch_system_type)
- env_batch.create_job_groups(bjobs, test)
+ env_workflow.create_job_groups(bjobs, test)
if walltime:
self.set_value("USER_REQUESTED_WALLTIME", walltime, subgroup=self.get_primary_job())
@@ -1141,7 +1148,6 @@ def _create_caseroot_tools(self):
except Exception as e:
logger.warning("FAILED to set up exefiles: {}".format(str(e)))
- # set up utility files in caseroot/Tools/
toolfiles = [os.path.join(toolsdir, "check_lockedfiles"),
os.path.join(toolsdir, "get_standard_makefile_args"),
os.path.join(toolsdir, "getTiming"),
@@ -1364,7 +1370,7 @@ def cancel_batch_jobs(self, jobids):
if not success:
logger.warning("Failed to kill {}".format(jobid))
- def get_mpirun_cmd(self, job=None, allow_unresolved_envvars=True):
+ def get_mpirun_cmd(self, job=None, allow_unresolved_envvars=True, overrides=None):
if job is None:
job = self.get_primary_job()
@@ -1394,11 +1400,11 @@ def get_mpirun_cmd(self, job=None, allow_unresolved_envvars=True):
logger.info('Using a custom run_misc_suffix {}'.format(custom_run_misc_suffix))
run_misc_suffix = custom_run_misc_suffix
-
# special case for aprun
if executable is not None and "aprun" in executable and not "theta" in self.get_value("MACH"):
- aprun_args, num_nodes = get_aprun_cmd_for_case(self, run_exe)[0:2]
- expect( (num_nodes + self.spare_nodes) == self.num_nodes, "Not using optimized num nodes")
+ aprun_args, num_nodes = get_aprun_cmd_for_case(self, run_exe, overrides=overrides)[0:2]
+ if job in ("case.run","case.test"):
+ expect( (num_nodes + self.spare_nodes) == self.num_nodes, "Not using optimized num nodes")
return self.get_resolved_value(executable + aprun_args + " " + run_misc_suffix, allow_unresolved_envvars=allow_unresolved_envvars)
else:
@@ -1459,10 +1465,9 @@ def _check_testlists(self, compset_alias, grid_name, files):
# called if run_unsupported is False.
tests = Testlist(tests_spec_file, files)
testlist = tests.get_tests(compset=compset_alias, grid=grid_name, supported_only=True)
+ test_categories = ["prealpha", "prebeta", "test_release", "aux_"]
for test in testlist:
- if test["category"] == "prealpha" \
- or test["category"] == "prebeta" \
- or "aux_" in test["category"] \
+ if test["category"] in test_categories \
or get_cime_default_driver() in test["category"]:
testcnt += 1
if testcnt > 0:
@@ -1501,6 +1506,8 @@ def set_file(self, xmlfile):
new_env_file = EnvMachPes(infile=xmlfile, components=components)
elif ftype == "env_batch.xml":
new_env_file = EnvBatch(infile=xmlfile)
+ elif ftype == "env_workflow.xml":
+ new_env_file = EnvWorkflow(infile=xmlfile)
elif ftype == "env_test.xml":
new_env_file = EnvTest(infile=xmlfile)
elif ftype == "env_archive.xml":
@@ -1563,7 +1570,7 @@ def create(self, casename, srcroot, compset_name, grid_name,
multi_driver=False, ninst=1, test=False,
walltime=None, queue=None, output_root=None,
run_unsupported=False, answer=None,
- input_dir=None, driver=None, non_local=False):
+ input_dir=None, driver=None, workflow_case="default", non_local=False):
try:
# Set values for env_case.xml
self.set_lookup_value("CASE", os.path.basename(casename))
@@ -1579,7 +1586,8 @@ def create(self, casename, srcroot, compset_name, grid_name,
walltime=walltime, queue=queue,
output_root=output_root,
run_unsupported=run_unsupported, answer=answer,
- input_dir=input_dir, driver=driver, non_local=non_local)
+ input_dir=input_dir, driver=driver,
+ workflow_case=workflow_case, non_local=non_local)
self.create_caseroot()
diff --git a/cime/scripts/lib/CIME/case/case_clone.py b/cime/scripts/lib/CIME/case/case_clone.py
index 2567631eb2ea..2a873340233f 100644
--- a/cime/scripts/lib/CIME/case/case_clone.py
+++ b/cime/scripts/lib/CIME/case/case_clone.py
@@ -4,9 +4,9 @@
import os, glob, shutil
from CIME.XML.standard_module_setup import *
from CIME.utils import expect, check_name, safe_copy
-from CIME.user_mod_support import apply_user_mods
-from CIME.locked_files import lock_file
from CIME.simple_compare import compare_files
+from CIME.locked_files import lock_file
+from CIME.user_mod_support import apply_user_mods
logger = logging.getLogger(__name__)
@@ -123,6 +123,14 @@ def create_clone(self, newcaseroot, keepexe=False, mach_dir=None, project=None,
os.path.join(newcaseroot, casesub),
symlinks=True)
+ # copy the postprocessing directory if it exists
+ if os.path.isdir(os.path.join(cloneroot, "postprocess")):
+ shutil.copytree(os.path.join(cloneroot, "postprocess"),
+ os.path.join(newcaseroot, "postprocess"),
+ symlinks=True)
+
+
+
# lock env_case.xml in new case
lock_file("env_case.xml", newcaseroot)
diff --git a/cime/scripts/lib/CIME/case/case_run.py b/cime/scripts/lib/CIME/case/case_run.py
index 1e5bb8626348..289d616ed5e8 100644
--- a/cime/scripts/lib/CIME/case/case_run.py
+++ b/cime/scripts/lib/CIME/case/case_run.py
@@ -29,7 +29,10 @@ def _pre_run_check(case, lid, skip_pnl=False, da_cycle=0):
safe_copy(env_mach_pes,"{}.{}".format(env_mach_pes, lid))
# check for locked files, may impact BUILD_COMPLETE
- case.check_lockedfiles()
+ skip = None
+ if case.get_value("EXTERNAL_WORKFLOW"):
+ skip = "env_batch"
+ case.check_lockedfiles(skip=skip)
logger.debug("check_lockedfiles OK")
build_complete = case.get_value("BUILD_COMPLETE")
@@ -241,6 +244,8 @@ def _resubmit_check(case):
case.submit(job=job, resubmit=True)
+ logger.debug("resubmit after check is {}".format(resubmit))
+
###############################################################################
def _do_external(script_name, caseroot, rundir, lid, prefix):
###############################################################################
@@ -328,10 +333,14 @@ def case_run(self, skip_pnl=False, set_continue_run=False, submit_resubmits=Fals
self.set_value("CONTINUE_RUN",
self.get_value("RESUBMIT_SETS_CONTINUE_RUN"))
- logger.warning("check for resubmit")
- if submit_resubmits:
- _resubmit_check(self)
+ external_workflow = self.get_value("EXTERNAL_WORKFLOW")
+ if not external_workflow:
+ logger.warning("check for resubmit")
- model_log("e3sm", logger, "{} CASE.RUN HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S")))
+ logger.debug("submit_resubmits is {}".format(submit_resubmits))
+ if submit_resubmits:
+ _resubmit_check(self)
+
+ model_log("e3sm", logger, "{} CASE.RUN HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S")))
return True
diff --git a/cime/scripts/lib/CIME/case/case_setup.py b/cime/scripts/lib/CIME/case/case_setup.py
index ae17ceb41172..4966edf6f6fe 100644
--- a/cime/scripts/lib/CIME/case/case_setup.py
+++ b/cime/scripts/lib/CIME/case/case_setup.py
@@ -116,9 +116,7 @@ def _case_setup_impl(case, caseroot, clean=False, test_mode=False, reset=False):
# Set tasks to 1 if mpi-serial library
if mpilib == "mpi-serial":
- for vid, value in case:
- if vid.startswith("NTASKS") and value != 1:
- case.set_value(vid, 1)
+ case.set_value("NTASKS", 1)
# Check ninst.
# In CIME there can be multiple instances of each component model (an ensemble) NINST is the instance of that component.
diff --git a/cime/scripts/lib/CIME/case/case_st_archive.py b/cime/scripts/lib/CIME/case/case_st_archive.py
index e2cdecfc6520..9acb8e409a68 100644
--- a/cime/scripts/lib/CIME/case/case_st_archive.py
+++ b/cime/scripts/lib/CIME/case/case_st_archive.py
@@ -223,6 +223,36 @@ def _archive_history_files(archive, archive_entry,
if compname == 'clm':
compname = r'clm2?'
+ if compname == 'nemo':
+ archive_rblddir = os.path.join(dout_s_root, compclass, 'rebuild')
+ if not os.path.exists(archive_rblddir):
+ os.makedirs(archive_rblddir)
+ logger.debug("created directory {}".format(archive_rblddir))
+
+ sfxrbld = r'mesh_mask_' + r'[0-9]*'
+ pfile = re.compile(sfxrbld)
+ rbldfiles = [f for f in os.listdir(rundir) if pfile.search(f)]
+ logger.debug("rbldfiles = {} ".format(rbldfiles))
+
+ if rbldfiles:
+ for rbldfile in rbldfiles:
+ srcfile = join(rundir, rbldfile)
+ destfile = join(archive_rblddir, rbldfile)
+ logger.info("moving {} to {} ".format(srcfile, destfile))
+ archive_file_fn(srcfile, destfile)
+
+ sfxhst = casename + r'_[0-9][mdy]_' + r'[0-9]*'
+ pfile = re.compile(sfxhst)
+ hstfiles = [f for f in os.listdir(rundir) if pfile.search(f)]
+ logger.debug("hstfiles = {} ".format(hstfiles))
+
+ if hstfiles:
+ for hstfile in hstfiles:
+ srcfile = join(rundir, hstfile)
+ destfile = join(archive_histdir, hstfile)
+ logger.info("moving {} to {} ".format(srcfile, destfile))
+ archive_file_fn(srcfile, destfile)
+
# determine ninst and ninst_string
# archive history files - the only history files that kept in the
@@ -314,6 +344,9 @@ def _archive_restarts_date(case, casename, rundir, archive,
"""
logger.info('-------------------------------------------')
logger.info('Archiving restarts for date {}'.format(datename))
+ logger.debug('last date {}'.format(last_date))
+ logger.debug('date is last? {}'.format(datename_is_last))
+ logger.debug('components are {}'.format(components))
logger.info('-------------------------------------------')
logger.debug("last date: {}".format(last_date))
@@ -391,6 +424,10 @@ def _archive_restarts_date_comp(case, casename, rundir, archive, archive_entry,
pattern = compname + r'\.' + suffix + r'\.' + '_'.join(datename_str.rsplit('-', 1))
pfile = re.compile(pattern)
restfiles = [f for f in os.listdir(rundir) if pfile.search(f)]
+ elif compname == 'nemo':
+ pattern = r'_*_' + suffix + r'[0-9]*'
+ pfile = re.compile(pattern)
+ restfiles = [f for f in os.listdir(rundir) if pfile.search(f)]
else:
pattern = r"^{}\.{}[\d_]*\.".format(casename, compname)
pfile = re.compile(pattern)
@@ -399,10 +436,10 @@ def _archive_restarts_date_comp(case, casename, rundir, archive, archive_entry,
pfile = re.compile(pattern)
restfiles = [f for f in files if pfile.search(f)]
logger.debug("pattern is {} restfiles {}".format(pattern, restfiles))
- for restfile in restfiles:
- restfile = os.path.basename(restfile)
+ for rfile in restfiles:
+ rfile = os.path.basename(rfile)
- file_date = get_file_date(restfile)
+ file_date = get_file_date(rfile)
if last_date is not None and file_date > last_date:
# Skip this file
continue
@@ -413,7 +450,7 @@ def _archive_restarts_date_comp(case, casename, rundir, archive, archive_entry,
# obtain array of history files for restarts
# need to do this before archiving restart files
histfiles_for_restart = get_histfiles_for_restarts(rundir, archive,
- archive_entry, restfile,
+ archive_entry, rfile,
testonly=testonly)
if datename_is_last and histfiles_for_restart:
@@ -424,8 +461,8 @@ def _archive_restarts_date_comp(case, casename, rundir, archive, archive_entry,
# archive restart files and all history files that are needed for restart
# Note that the latest file should be copied and not moved
if datename_is_last:
- srcfile = os.path.join(rundir, restfile)
- destfile = os.path.join(archive_restdir, restfile)
+ srcfile = os.path.join(rundir, rfile)
+ destfile = os.path.join(archive_restdir, rfile)
last_restart_file_fn(srcfile, destfile)
logger.info("{} file {} to {}".format(last_restart_file_fn_msg, srcfile, destfile))
for histfile in histfiles_for_restart:
@@ -439,8 +476,8 @@ def _archive_restarts_date_comp(case, casename, rundir, archive, archive_entry,
else:
# Only archive intermediate restarts if requested - otherwise remove them
if case.get_value('DOUT_S_SAVE_INTERIM_RESTART_FILES'):
- srcfile = os.path.join(rundir, restfile)
- destfile = os.path.join(archive_restdir, restfile)
+ srcfile = os.path.join(rundir, rfile)
+ destfile = os.path.join(archive_restdir, rfile)
expect(os.path.isfile(srcfile),
"restart file {} does not exist ".format(srcfile))
archive_file_fn(srcfile, destfile)
@@ -456,15 +493,93 @@ def _archive_restarts_date_comp(case, casename, rundir, archive, archive_entry,
logger.info("copying {} to {}".format(srcfile, destfile))
safe_copy(srcfile, destfile)
else:
- srcfile = os.path.join(rundir, restfile)
- logger.info("removing interim restart file {}".format(srcfile))
- if (os.path.isfile(srcfile)):
- try:
- os.remove(srcfile)
- except OSError:
- logger.warning("unable to remove interim restart file {}".format(srcfile))
+ if compname == 'nemo':
+ flist = glob.glob(rundir + "/" + casename + "_*_restart*.nc")
+ logger.debug("nemo restart file {}".format(flist))
+ if len(flist) > 2:
+ flist0 = glob.glob(rundir + "/" + casename + "_*_restart_0000.nc")
+ if len(flist0) > 1:
+ rstfl01 = flist0[0]
+ rstfl01spl = rstfl01.split("/")
+ logger.debug("splitted name {}".format(rstfl01spl))
+ rstfl01nm = rstfl01spl[-1]
+ rstfl01nmspl = rstfl01nm.split("_")
+ logger.debug("splitted name step2 {}".format(rstfl01nmspl))
+ rsttm01 = rstfl01nmspl[-3]
+
+ rstfl02 = flist0[1]
+ rstfl02spl = rstfl02.split("/")
+ logger.debug("splitted name {}".format(rstfl02spl))
+ rstfl02nm = rstfl02spl[-1]
+ rstfl02nmspl = rstfl02nm.split("_")
+ logger.debug("splitted name step2 {}".format(rstfl02nmspl))
+ rsttm02 = rstfl02nmspl[-3]
+
+ if int(rsttm01) > int(rsttm02):
+ restlist = glob.glob(rundir + "/" + casename + "_" + rsttm02 + "_restart_*.nc")
+ else:
+ restlist = glob.glob(rundir + "/" + casename + "_" + rsttm01 + "_restart_*.nc")
+ logger.debug("nemo restart list {}".format(restlist))
+ if restlist:
+ for _restfile in restlist:
+ srcfile = os.path.join(rundir, _restfile)
+ logger.info("removing interim restart file {}".format(srcfile))
+ if (os.path.isfile(srcfile)):
+ try:
+ os.remove(srcfile)
+ except OSError:
+ logger.warning("unable to remove interim restart file {}".format(srcfile))
+ else:
+ logger.warning("interim restart file {} does not exist".format(srcfile))
+ elif len(flist) == 2:
+ flist0 = glob.glob(rundir + "/" + casename + "_*_restart.nc")
+ if len(flist0) > 1:
+ rstfl01 = flist0[0]
+ rstfl01spl = rstfl01.split("/")
+ logger.debug("splitted name {}".format(rstfl01spl))
+ rstfl01nm = rstfl01spl[-1]
+ rstfl01nmspl = rstfl01nm.split("_")
+ logger.debug("splitted name step2 {}".format(rstfl01nmspl))
+ rsttm01 = rstfl01nmspl[-2]
+
+ rstfl02 = flist0[1]
+ rstfl02spl = rstfl02.split("/")
+ logger.debug("splitted name {}".format(rstfl02spl))
+ rstfl02nm = rstfl02spl[-1]
+ rstfl02nmspl = rstfl02nm.split("_")
+ logger.debug("splitted name step2 {}".format(rstfl02nmspl))
+ rsttm02 = rstfl02nmspl[-2]
+
+ if int(rsttm01) > int(rsttm02):
+ restlist = glob.glob(rundir + "/" + casename + "_" + rsttm02 + "_restart_*.nc")
+ else:
+ restlist = glob.glob(rundir + "/" + casename + "_" + rsttm01 + "_restart_*.nc")
+ logger.debug("nemo restart list {}".format(restlist))
+ if restlist:
+ for _rfile in restlist:
+ srcfile = os.path.join(rundir, _rfile)
+ logger.info("removing interim restart file {}".format(srcfile))
+ if (os.path.isfile(srcfile)):
+ try:
+ os.remove(srcfile)
+ except OSError:
+ logger.warning("unable to remove interim restart file {}".format(srcfile))
+ else:
+ logger.warning("interim restart file {} does not exist".format(srcfile))
+ else:
+ logger.warning("unable to find NEMO restart file in {}".format(rundir))
+
+
else:
- logger.warning("interim restart file {} does not exist".format(srcfile))
+ srcfile = os.path.join(rundir, rfile)
+ logger.info("removing interim restart file {}".format(srcfile))
+ if (os.path.isfile(srcfile)):
+ try:
+ os.remove(srcfile)
+ except OSError:
+ logger.warning("unable to remove interim restart file {}".format(srcfile))
+ else:
+ logger.warning("interim restart file {} does not exist".format(srcfile))
return histfiles_savein_rundir
@@ -597,6 +712,7 @@ def case_st_archive(self, last_date_str=None, archive_incomplete_logs=True, copy
"""
Create archive object and perform short term archiving
"""
+ logger.debug("resubmit {}".format(resubmit))
caseroot = self.get_value("CASEROOT")
self.load_env(job="case.st_archive")
if last_date_str is not None:
@@ -631,18 +747,19 @@ def case_st_archive(self, last_date_str=None, archive_incomplete_logs=True, copy
logger.info("st_archive completed")
# resubmit case if appropriate
- resubmit_cnt = self.get_value("RESUBMIT")
- logger.debug("resubmit_cnt {} resubmit {}".format(resubmit_cnt, resubmit))
- if resubmit_cnt > 0 and resubmit:
- logger.info("resubmitting from st_archive, resubmit={:d}".format(resubmit_cnt))
- if self.get_value("MACH") == "mira":
- expect(os.path.isfile(".original_host"), "ERROR alcf host file not found")
- with open(".original_host", "r") as fd:
- sshhost = fd.read()
- run_cmd("ssh cooleylogin1 ssh {} '{case}/case.submit {case} --resubmit' "\
+ if not self.get_value("EXTERNAL_WORKFLOW"):
+ resubmit_cnt = self.get_value("RESUBMIT")
+ logger.debug("resubmit_cnt {} resubmit {}".format(resubmit_cnt, resubmit))
+ if resubmit_cnt > 0 and resubmit:
+ logger.info("resubmitting from st_archive, resubmit={:d}".format(resubmit_cnt))
+ if self.get_value("MACH") == "mira":
+ expect(os.path.isfile(".original_host"), "ERROR alcf host file not found")
+ with open(".original_host", "r") as fd:
+ sshhost = fd.read()
+ run_cmd("ssh cooleylogin1 ssh {} '{case}/case.submit {case} --resubmit' "\
.format(sshhost, case=caseroot), verbose=True)
- else:
- self.submit(resubmit=True)
+ else:
+ self.submit(resubmit=True)
return True
diff --git a/cime/scripts/lib/CIME/case/case_submit.py b/cime/scripts/lib/CIME/case/case_submit.py
index aea7141b7285..7559f8869e7f 100644
--- a/cime/scripts/lib/CIME/case/case_submit.py
+++ b/cime/scripts/lib/CIME/case/case_submit.py
@@ -55,8 +55,8 @@ def _submit(case, job=None, no_batch=False, prereq=None, allow_fail=False, resub
# if case.submit is called with the no_batch flag then we assume that this
# flag will stay in effect for the duration of the RESUBMITs
env_batch = case.get_env("batch")
-
- if resubmit and env_batch.get_batch_system_type() == "none":
+ external_workflow = case.get_value("EXTERNAL_WORKFLOW")
+ if resubmit and env_batch.get_batch_system_type() == "none" or external_workflow:
no_batch = True
if no_batch:
batch_system = "none"
@@ -66,12 +66,13 @@ def _submit(case, job=None, no_batch=False, prereq=None, allow_fail=False, resub
case.set_value("BATCH_SYSTEM", batch_system)
env_batch_has_changed = False
- try:
- case.check_lockedfile(os.path.basename(env_batch.filename))
- except:
- env_batch_has_changed = True
+ if not external_workflow:
+ try:
+ case.check_lockedfile(os.path.basename(env_batch.filename))
+ except:
+ env_batch_has_changed = True
- if batch_system != "none" and env_batch_has_changed:
+ if batch_system != "none" and env_batch_has_changed and not external_workflow:
# May need to regen batch files if user made batch setting changes (e.g. walltime, queue, etc)
logger.warning(\
"""
@@ -215,7 +216,6 @@ def check_case(self):
# the ww3 buildnml has dependancies on inputdata so we must run it again
self.create_namelists(component='WAV')
-
expect(self.get_value("BUILD_COMPLETE"), "Build complete is "
"not True please rebuild the model by calling case.build")
logger.info("Check case OK")
diff --git a/cime/scripts/lib/CIME/get_timing.py b/cime/scripts/lib/CIME/get_timing.py
index 5254dfd7e427..f2ed3ff77fe9 100644
--- a/cime/scripts/lib/CIME/get_timing.py
+++ b/cime/scripts/lib/CIME/get_timing.py
@@ -450,15 +450,16 @@ def _getTiming(self, inst=0):
self.write("\n")
self.write(" Overall Metrics: \n")
- self.write(" Model Cost: {:10.2f} pe-hrs/simulated_year ".format((tmax*365.0*pecost)/(3600.0*adays)))
- if inst_label:
- self.write(" (Model Cost is for entire ensemble)")
- self.write("\n")
- self.write(" Model Throughput: {:10.2f} simulated_years/day \n".format((86400.0*adays)/(tmax*365.0)) )
+ if adays > 0:
+ self.write(" Model Cost: {:10.2f} pe-hrs/simulated_year \n".format((tmax*365.0*pecost)/(3600.0*adays)))
+ if tmax > 0:
+ self.write(" Model Throughput: {:10.2f} simulated_years/day \n".format((86400.0*adays)/(tmax*365.0)) )
+
self.write("\n")
self.write(" Init Time : {:10.3f} seconds \n".format(nmax))
- self.write(" Run Time : {:10.3f} seconds {:10.3f} seconds/day \n".format(tmax, tmax/adays))
+ if adays > 0:
+ self.write(" Run Time : {:10.3f} seconds {:10.3f} seconds/day \n".format(tmax, tmax/adays))
self.write(" Final Time : {:10.3f} seconds \n".format(fmax))
self.write("\n")
@@ -477,14 +478,13 @@ def _getTiming(self, inst=0):
"with other components \n")
self.write("\n")
- self.write(" TOT Run Time: {:10.3f} seconds {:10.3f} seconds/mday {:10.2f} myears/wday \n".format(tmax, tmax/adays, tmaxr))
- for k in self.case.get_values("COMP_CLASSES"):
- m = self.models[k]
- self.write(" {} Run Time: {:10.3f} seconds {:10.3f} seconds/mday {:10.2f} myears/wday \n".format(k, m.tmax, m.tmax/adays, m.tmaxr))
- self.write(" CPL COMM Time: {:10.3f} seconds {:10.3f} seconds/mday {:10.2f} myears/wday \n".format(xmax, xmax/adays, xmaxr))
- if self._driver == "mct":
- self.write("\n\n---------------- DRIVER TIMING FLOWCHART "
- "--------------------- \n\n")
+
+ if adays > 0:
+ self.write(" TOT Run Time: {:10.3f} seconds {:10.3f} seconds/mday {:10.2f} myears/wday \n".format(tmax, tmax/adays, tmaxr))
+ for k in self.case.get_values("COMP_CLASSES"):
+ m = self.models[k]
+ self.write(" {} Run Time: {:10.3f} seconds {:10.3f} seconds/mday {:10.2f} myears/wday \n".format(k, m.tmax, m.tmax/adays, m.tmaxr))
+ self.write(" CPL COMM Time: {:10.3f} seconds {:10.3f} seconds/mday {:10.2f} myears/wday \n".format(xmax, xmax/adays, xmaxr))
pstrlen = 25
hoffset = 1
diff --git a/cime/scripts/tests/scripts_regression_tests.py b/cime/scripts/tests/scripts_regression_tests.py
index 4422aa4b10fa..5cae8347c3b4 100755
--- a/cime/scripts/tests/scripts_regression_tests.py
+++ b/cime/scripts/tests/scripts_regression_tests.py
@@ -1097,6 +1097,11 @@ def tearDown(self):
###########################################################################
def _create_test(self, extra_args, test_id=None, pre_run_errors=False, run_errors=False, env_changes=""):
###########################################################################
+ # All stub model not supported in nuopc driver
+ driver = CIME.utils.get_cime_default_driver()
+ if driver == 'nuopc':
+ extra_args.append(" ^SMS.T42_T42.S")
+
test_id = CIME.utils.get_timestamp() if test_id is None else test_id
extra_args.append("-t {}".format(test_id))
extra_args.append("--baseline-root {}".format(self._baseline_area))
@@ -1837,6 +1842,8 @@ def _batch_test_fixture(self, testcase_name):
run_cmd_assert_result(self, "{}/create_newcase {}".format(SCRIPT_DIR, args),
from_dir=SCRIPT_DIR)
+ run_cmd_assert_result(self, "./case.setup", from_dir=testdir)
+
return testdir
###########################################################################
diff --git a/cime/src/build_scripts/buildlib.csm_share b/cime/src/build_scripts/buildlib.csm_share
index e45dbad9a97e..ab0fea86b4fd 100755
--- a/cime/src/build_scripts/buildlib.csm_share
+++ b/cime/src/build_scripts/buildlib.csm_share
@@ -71,6 +71,8 @@ def buildlib(bldroot, installpath, case):
else:
use_esmf = "noesmf"
filepath.append(os.path.join(cimeroot, "src", "share", "esmf_wrf_timemgr"))
+
+ comp_interface = case.get_value("COMP_INTERFACE")
ninst_value = case.get_value("NINST_VALUE")
libdir = os.path.join(bldroot,comp_interface,use_esmf, ninst_value,"csm_share")
if not os.path.isdir(libdir):
@@ -99,6 +101,9 @@ def buildlib(bldroot, installpath, case):
ninst_comp = case.get_value("NINST_{}".format(comp))
multiinst_cppdefs += " -DNUM_COMP_INST_{}={}".format(comp, ninst_comp)
+ if case.get_value("COMP_OCN") == "nemo":
+ multiinst_cppdefs += " -DNEMO_IN_CCSM "
+
installdir = os.path.join(installpath, comp_interface,
use_esmf, ninst_value)
for ndir in ("lib", "include"):
diff --git a/cime/src/components/data_comps/datm/cime_config/namelist_definition_datm.xml b/cime/src/components/data_comps/datm/cime_config/namelist_definition_datm.xml
index aef5b26ae645..2a1389bd2db8 100644
--- a/cime/src/components/data_comps/datm/cime_config/namelist_definition_datm.xml
+++ b/cime/src/components/data_comps/datm/cime_config/namelist_definition_datm.xml
@@ -352,7 +352,6 @@
aerosoldep_SSP4-6.0_monthly_1849-2101_${CLM_USRDAT_NAME}.nc
aerosoldep_SSP5-3.4_monthly_1849-2101_${CLM_USRDAT_NAME}.nc
aerosoldep_SSP5-8.5_monthly_1849-2101_${CLM_USRDAT_NAME}.nc
-
aerosoldep_monthly_1850_mean_1.9x2.5_c090421.nc
aerosoldep_monthly_2000_mean_1.9x2.5_c090421.nc
aerosoldep_monthly_1849-2006_1.9x2.5_c090803.nc
@@ -1732,6 +1731,7 @@
aerosoldep_SSP5-3.4_monthly_1849-2101_${CLM_USRDAT_NAME}.nc
aerosoldep_SSP5-8.5_monthly_1849-2101_${CLM_USRDAT_NAME}.nc
+ aerodep_clm_SSP126_b.e21.BSSP126cmip6.f09_g17.CMIP6-SSP1-2.6.001_2014-2101_monthly_0.9x1.25_c190523.nc
aerodep_clm_SSP245_b.e21.BWSSP245cmip6.f09_g17.CMIP6-SSP2-4.5-WACCM.001_2014-2101_monthly_0.9x1.25_c190401.nc
aerodep_clm_SSP370_b.e21.BWSSP370cmip6.f09_g17.CMIP6-SSP3-7.0-WACCM.001_2014-2101_monthly_0.9x1.25_c190402.nc
null
diff --git a/cime/src/components/data_comps/dice/nuopc/dice_comp_mod.F90 b/cime/src/components/data_comps/dice/nuopc/dice_comp_mod.F90
index 8f1a7d6443ee..308671b8bd86 100644
--- a/cime/src/components/data_comps/dice/nuopc/dice_comp_mod.F90
+++ b/cime/src/components/data_comps/dice/nuopc/dice_comp_mod.F90
@@ -465,7 +465,7 @@ subroutine dice_comp_init(flds_i2o_per_cat, mpicom, compid, my_task, master_task
! error check that mesh lats and lons correspond to those on the input domain file
index_lon = mct_aVect_indexRA(SDICE%grid%data,'lon')
do n = 1, lsize
- if (abs( SDICE%grid%data%rattr(index_lon,n) - xc(n)) > 1.e-4) then
+ if (abs(mod(SDICE%grid%data%rattr(index_lon,n) - xc(n),360.0_R8)) > 1.e-4) then
write(6,*)'ERROR: lon diff = ',abs(SDICE%grid%data%rattr(index_lon,n) - xc(n)),' too large'
call shr_sys_abort()
end if
diff --git a/cime/src/components/data_comps/dlnd/nuopc/dlnd_comp_mod.F90 b/cime/src/components/data_comps/dlnd/nuopc/dlnd_comp_mod.F90
index a996d01f3ace..388280d70815 100644
--- a/cime/src/components/data_comps/dlnd/nuopc/dlnd_comp_mod.F90
+++ b/cime/src/components/data_comps/dlnd/nuopc/dlnd_comp_mod.F90
@@ -134,15 +134,16 @@ subroutine dlnd_comp_advertise(importState, exportState, flds_scalar_name, &
end do
! The following puts all of the elevation class fields as an
- ! undidstributed dimension in the export state field
+ ! undidstributed dimension in the export state field - index1 is bare land - and the total number of
+ ! elevation classes not equal to bare land go from index2 -> glc_nec+1
call dshr_fld_add(med_fld="Sl_lfrin", fldlist_num=fldsFrLnd_num, fldlist=fldsFrLnd)
call dshr_fld_add(med_fld='Sl_tsrf_elev', fldlist_num=fldsFrLnd_num, fldlist=fldsFrLnd, &
- ungridded_lbound=1, ungridded_ubound=glc_nec)
+ ungridded_lbound=1, ungridded_ubound=glc_nec+1)
call dshr_fld_add(med_fld='Sl_topo_elev', fldlist_num=fldsFrLnd_num, fldlist=fldsFrLnd, &
- ungridded_lbound=1, ungridded_ubound=glc_nec)
+ ungridded_lbound=1, ungridded_ubound=glc_nec+1)
call dshr_fld_add(med_fld='Flgl_qice_elev', fldlist_num=fldsFrLnd_num, fldlist=fldsFrLnd, &
- ungridded_lbound=1, ungridded_ubound=glc_nec)
+ ungridded_lbound=1, ungridded_ubound=glc_nec+1)
end if
@@ -534,19 +535,19 @@ subroutine dlnd_comp_export(exportState, rc)
call dshr_export(l2x%rattr(k,:), exportState, "Sl_lfrin", rc=rc)
if (ChkErr(rc,__LINE__,u_FILE_u)) return
- do n = 1,glc_nec
+ do n = 0,glc_nec
nec_str = glc_elevclass_as_string(n)
k = mct_aVect_indexRA(l2x, "Sl_tsrf" // nec_str)
- call dshr_export(l2x%rattr(k,:), exportState, "Sl_tsrf_elev", ungridded_index=n, rc=rc)
+ call dshr_export(l2x%rattr(k,:), exportState, "Sl_tsrf_elev", ungridded_index=n+1, rc=rc)
if (ChkErr(rc,__LINE__,u_FILE_u)) return
k = mct_aVect_indexRA(l2x, "Sl_topo" // nec_str)
- call dshr_export(l2x%rattr(k,:), exportState, "Sl_topo_elev", ungridded_index=n, rc=rc)
+ call dshr_export(l2x%rattr(k,:), exportState, "Sl_topo_elev", ungridded_index=n+1, rc=rc)
if (ChkErr(rc,__LINE__,u_FILE_u)) return
k = mct_aVect_indexRA(l2x, "Flgl_qice" // nec_str)
- call dshr_export(l2x%rattr(k,:), exportState, "Flgl_qice_elev", ungridded_index=n, rc=rc)
+ call dshr_export(l2x%rattr(k,:), exportState, "Flgl_qice_elev", ungridded_index=n+1, rc=rc)
if (ChkErr(rc,__LINE__,u_FILE_u)) return
end do
diff --git a/cime/src/drivers/mct/cime_config/config_component.xml b/cime/src/drivers/mct/cime_config/config_component.xml
index 73cdb63e2b7d..071446b5b572 100644
--- a/cime/src/drivers/mct/cime_config/config_component.xml
+++ b/cime/src/drivers/mct/cime_config/config_component.xml
@@ -1966,7 +1966,7 @@
real
- 1.0e-02
+ 1.0e-01
run_domain
env_run.xml
Error tolerance for differences in ocean/ice lon/lat in domain checking
@@ -2847,18 +2847,27 @@
External script to be run after model completion
+
+ logical
+ TRUE,FALSE
+ FALSE
+ external_tools
+ env_run.xml
+ whether the case uses an external workflow driver
+
+
char
job_submission
- env_batch.xml
+ env_workflow.xml
Store user override for queue
char
job_submission
- env_batch.xml
+ env_workflow.xml
Store user override for walltime
@@ -2867,7 +2876,7 @@
job_submission
- env_batch.xml
+ env_workflow.xml
The machine queue in which to submit the job. Default determined in config_machines.xml can be overwritten by testing
@@ -2876,7 +2885,7 @@
job_submission
- env_batch.xml
+ env_workflow.xml
The machine wallclock setting. Default determined in config_machines.xml can be overwritten by testing
@@ -2885,7 +2894,7 @@
job_submission
- env_batch.xml
+ env_workflow.xml
Override the batch submit command this job. Do not include executable or dependencies
@@ -2893,7 +2902,7 @@
char
job_submission
- env_batch.xml
+ env_workflow.xml
project for project-sensitive build and run paths, and job scripts
@@ -2901,7 +2910,7 @@
char
job_submission
- env_batch.xml
+ env_workflow.xml
project to charge in scripts if different from PROJECT
@@ -2931,7 +2940,6 @@
TRUE=>turn on CPP variable COMPARE_TO_NUOPC
-
=========================================
Notes:
diff --git a/cime/src/drivers/mct/cime_config/config_component_cesm.xml b/cime/src/drivers/mct/cime_config/config_component_cesm.xml
index 8a1a42a86994..e02870d7a8b6 100644
--- a/cime/src/drivers/mct/cime_config/config_component_cesm.xml
+++ b/cime/src/drivers/mct/cime_config/config_component_cesm.xml
@@ -300,6 +300,7 @@
1
$ATM_NCPL
$ATM_NCPL
+ 24
run_coupling
env_run.xml
@@ -381,8 +382,10 @@
FALSE
TRUE
+ TRUE
TRUE
FALSE
+ FALSE
run_component_cpl
env_run.xml
diff --git a/cime/src/share/util/shr_file_mod.F90 b/cime/src/share/util/shr_file_mod.F90
index 8306f51064bc..167d67978cdd 100644
--- a/cime/src/share/util/shr_file_mod.F90
+++ b/cime/src/share/util/shr_file_mod.F90
@@ -61,6 +61,9 @@ MODULE shr_file_mod
public :: shr_file_setLogLevel ! Reset the logging debug level
public :: shr_file_getLogUnit ! Get the log unit number
public :: shr_file_getLogLevel ! Get the logging debug level
+#if defined NEMO_IN_CCSM
+ public :: shr_file_maxUnit ! Max unit number to give
+#endif
! !PUBLIC DATA MEMBERS: