diff --git a/configure_polaris_envs.py b/configure_polaris_envs.py index fcdc03fdc..85abd955e 100755 --- a/configure_polaris_envs.py +++ b/configure_polaris_envs.py @@ -43,7 +43,12 @@ def setup_install_env(env_name, activate_base, use_local, logger, recreate, channels = '--use-local' else: channels = '' - packages = f'jinja2 {mache} packaging progressbar2' + if mache == '': + # development mache so include dependencies + packages = 'importlib_resources jinja2 lxml packaging progressbar2 ' \ + 'pyyaml' + else: + packages = f'jinja2 {mache} packaging progressbar2' if recreate or not os.path.exists(env_path): print('Setting up a conda environment for installing polaris\n') commands = f'{activate_base} && ' \ diff --git a/deploy/default.cfg b/deploy/default.cfg index 62da4cbde..150c89b84 100644 --- a/deploy/default.cfg +++ b/deploy/default.cfg @@ -21,7 +21,7 @@ mpi = nompi # versions of conda packages geometric_features = 1.2.0 -mache = 1.16.0 +mache = 1.19.0 mpas_tools = 0.27.0 otps = 2021.10 parallelio = 2.6.0 diff --git a/deploy/unsupported.txt b/deploy/unsupported.txt index 4490368e8..e96d64521 100644 --- a/deploy/unsupported.txt +++ b/deploy/unsupported.txt @@ -13,11 +13,9 @@ compy, intel, mvapich2 compy, gnu, openmpi compy, pgi, impi compy, pgi, mvapich2 -pm-cpu, nvidia, mpich pm-cpu, aocc, mpich pm-cpu, amdclang, mpich - # compiles but tests unreliable (errors or hanging), # see https://github.com/MPAS-Dev/compass/issues/336 anvil, intel, mvapich diff --git a/docs/developers_guide/machines/index.md b/docs/developers_guide/machines/index.md index 8f17fd3ee..d85e5f4a0 100644 --- a/docs/developers_guide/machines/index.md +++ b/docs/developers_guide/machines/index.md @@ -2,11 +2,11 @@ # Machines -Polaris attempts to be aware of the capabilities of the machine it is running -on. This is a particular advantage for so-called "supported" machines with a -config file defined for them in the `polaris` package. But even for "unknown" -machines, it is not difficult to set a few config options in your user config -file to describe your machine. Then, polaris can use this data to make sure +Polaris attempts to be aware of the capabilities of the machine it is running +on. This is a particular advantage for so-called "supported" machines with a +config file defined for them in the `polaris` package. But even for "unknown" +machines, it is not difficult to set a few config options in your user config +file to describe your machine. Then, polaris can use this data to make sure test cases are configured in a way that is appropriate for your machine. (dev-supported-machines)= @@ -57,6 +57,8 @@ supported for those configurations with `gnu` compilers. | compy | intel | impi | intel-mpi | +--------------+------------+-----------+-------------------+ | pm-cpu | gnu | mpich | gnu-cray | +| +------------+-----------+-------------------+ +| | intel | mpich | intel-cray | +--------------+------------+-----------+-------------------+ ``` @@ -288,8 +290,8 @@ spack: modules: [] environment: {} extra_rpaths: [] -``` - +``` + Typically your system will already have compilers if nothing else, and this is what we assume here. Give the appropriate path (replace `/usr` with the appropriate path on your system). We have had better luck with `gcc` than diff --git a/docs/developers_guide/machines/perlmutter.md b/docs/developers_guide/machines/perlmutter.md index cca5cb004..56a524efd 100644 --- a/docs/developers_guide/machines/perlmutter.md +++ b/docs/developers_guide/machines/perlmutter.md @@ -14,3 +14,18 @@ Then, you can build the MPAS model with ```bash make [DEBUG=true] gnu-cray ``` + +## pm-cpu, intel + +Similarly to `gnu`, for `intel`, if you've set things up right, sourcing the +load scrip will look something like: + +```bash +source load_dev_polaris_0.1.0-alpha.1_pm-cpu_intel_mpich.sh +``` + +To build MPAS components, use: + +```bash +make [DEBUG=true] intel-cray +``` diff --git a/polaris/machines/pm-cpu.cfg b/polaris/machines/pm-cpu.cfg index 744a6ca65..7e526deaf 100644 --- a/polaris/machines/pm-cpu.cfg +++ b/polaris/machines/pm-cpu.cfg @@ -21,6 +21,12 @@ software_compiler = gnu # the system MPI library to use for gnu compiler mpi_gnu = mpich +# the system MPI library to use for intel compiler +mpi_intel = mpich + +# the system MPI library to use for nvidia compiler +mpi_nvidia = mpich + # the base path for spack environments used by polaris spack = /global/cfs/cdirs/e3sm/software/polaris/pm-cpu/spack diff --git a/polaris/machines/pm-gpu.cfg b/polaris/machines/pm-gpu.cfg new file mode 100644 index 000000000..6cb1a2c91 --- /dev/null +++ b/polaris/machines/pm-gpu.cfg @@ -0,0 +1,50 @@ +# The paths section describes paths for data and environments +[paths] + +# A shared root directory where polaris data can be found +database_root = /global/cfs/cdirs/e3sm/polaris + +# the path to the base conda environment where polaris environments have +# been created +polaris_envs = /global/common/software/e3sm/polaris/pm-gpu/conda/base + + +# Options related to deploying a polaris conda and spack environments +[deploy] + +# the compiler set to use for system libraries and MPAS builds +compiler = nvidiagpu + +# the compiler to use to build software (e.g. ESMF and MOAB) with spack +software_compiler = gnu + +# the system MPI library to use for gnu compiler +mpi_gnu = mpich + +# the system MPI library to use for gnugpu compiler +mpi_gnugpu = mpich + +# the system MPI library to use for nvidia compiler +mpi_nvidia = mpich + +# the system MPI library to use for nvidiagpu compiler +mpi_nvidiagpu = mpich + +# the base path for spack environments used by polaris +spack = /global/cfs/cdirs/e3sm/software/polaris/pm-gpu/spack + +# whether to use the same modules for hdf5, netcdf-c, netcdf-fortran and +# pnetcdf as E3SM (spack modules are used otherwise) +use_e3sm_hdf5_netcdf = True + +# The parallel section describes options related to running jobs in parallel. +# Most options in this section come from mache so here we just add or override +# some defaults +[parallel] + +# cores per node on the machine +cores_per_node = 128 + +# threads per core (set to 1 because trying to hyperthread seems to be causing +# hanging on perlmutter) +threads_per_core = 1