diff --git a/gcm_setup b/gcm_setup
index 16f9c113..767a6ea4 100755
--- a/gcm_setup
+++ b/gcm_setup
@@ -401,50 +401,28 @@ if ( $SITE == 'NCCS' ) then
 
 else if ( $SITE == 'NAS' ) then
 
-   set BUILT_ON_ROME = @BUILT_ON_ROME@
-
-   if ( $BUILT_ON_ROME == "TRUE") then
-
-      echo "Enter the ${C1}Processor Type${CN} you wish to run on:"
-      echo "   ${C2}rom (AMD Rome) (default)${CN}"
-      echo " "
-      echo " NOTE GEOS is non-zero-diff when running on AMD Rome"
-      echo "      compared to the other Intel nodes."
-      echo " "
-      set MODEL = `echo $<`
-      set MODEL = `echo $MODEL | tr "[:upper:]" "[:lower:]"`
-      if ( .$MODEL == .) then
-         set MODEL = 'rom'
-      endif
-
-      if( $MODEL != 'rom' ) goto ASKPROC
-   else
-
-      echo "Enter the ${C1}Processor Type${CN} you wish to run on:"
-      echo "   ${C2}has (Haswell)${CN}"
-      echo "   ${C2}bro (Broadwell)${CN}"
-      echo "   ${C2}sky (Skylake)${CN} (default)"
-      echo "   ${C2}cas (Cascade Lake)${CN}"
-      echo " "
-      echo " NOTE 1: Due to how FV3 is compiled by default, Sandy Bridge"
-      echo "         and Ivy Bridge are not supported by current GEOS"
-      echo " "
-      echo " NOTE 2: Due to OS differences, if you want to run on the AMD"
-      echo "         Rome nodes at NAS, you must recompile on the Rome nodes"
-      echo " "
-      set MODEL = `echo $<`
-      set MODEL = `echo $MODEL | tr "[:upper:]" "[:lower:]"`
-      if ( .$MODEL == .) then
-         set MODEL = 'sky'
-      endif
-
-      if( $MODEL != 'has' & \
-          $MODEL != 'bro' & \
-          $MODEL != 'sky' & \
-          $MODEL != 'cas' ) goto ASKPROC
-
+   echo "Enter the ${C1}Processor Type${CN} you wish to run on:"
+   echo "   ${C2}has (Haswell)${CN}"
+   echo "   ${C2}bro (Broadwell)${CN}"
+   echo "   ${C2}sky (Skylake)${CN} (default)"
+   echo "   ${C2}cas (Cascade Lake)${CN}"
+   echo "   ${C2}rom (AMD Rome)${CN}"
+   echo " "
+   echo " NOTE Due to how FV3 is compiled by default, Sandy Bridge"
+   echo "      and Ivy Bridge are not supported by current GEOS"
+   echo " "
+   set MODEL = `echo $<`
+   set MODEL = `echo $MODEL | tr "[:upper:]" "[:lower:]"`
+   if ( .$MODEL == .) then
+      set MODEL = 'sky'
    endif
 
+   if( $MODEL != 'has' & \
+       $MODEL != 'bro' & \
+       $MODEL != 'sky' & \
+       $MODEL != 'cas' & \
+       $MODEL != 'rom' ) goto ASKPROC
+
    # Some processors have weird names at NAS
    # ---------------------------------------
 
@@ -472,8 +450,43 @@ else if ( $SITE == 'NAS' ) then
       set NCPUS_PER_NODE = 128
    endif
 
+else if( $SITE == 'AWS' | $SITE == 'Azure' ) then
+
+   # Because we do not know the name of the model or the number of CPUs
+   # per node. We ask the user to set these variables in the script
+
+   # AWS and Azure users must set the MODEL and NCPUS_PER_NODE
+   set MODEL = USER_MUST_SET
+   set NCPUS_PER_NODE = USER_MUST_SET
+
+   # Above we need a user to set the MODEL and NCPUS_PER_NODE
+   # variables. Here we check that they have been set. If not,
+   # we ask the user to set them
+   # --------------------------------------------------------
+
+   if ( $MODEL == USER_MUST_SET | $NCPUS_PER_NODE == USER_MUST_SET ) then
+      echo "ERROR: We have detected you are on $SITE. As we do not have"
+      echo "       official fixed node info yet, we ask you to edit $0"
+      echo "       and set the MODEL and NCPUS_PER_NODE variables."
+      echo "       Look for the section that says:"
+      echo " "
+      echo "       # AWS and Azure users must set the MODEL and NCPUS_PER_NODE"
+      exit 1
+   endif
+
 else
    set MODEL = 'UNKNOWN'
+   # As we do not know how many CPUs per node, we detect the number
+   # of CPUs per node by looking at the number of CPUs. This is different
+   # on Linux and macOS
+   if ( $ARCH == 'Linux' ) then
+      set NCPUS_PER_NODE = `grep -c ^processor /proc/cpuinfo`
+   else if ( $ARCH == 'Darwin' ) then
+      set NCPUS_PER_NODE = `sysctl -n hw.ncpu`
+   else
+      echo "ERROR: Unknown architecture $ARCH"
+      exit 1
+   endif
 endif
 
 #######################################################################
@@ -1734,30 +1747,30 @@ else if( $SITE == 'NCCS' ) then
               if ( "$OCNMODEL" == "MIT" ) then
                 setenv COUPLEDIR  /gpfsm/dnb32/estrobac/geos5/GRIDDIR                  # Coupled Ocean/Atmos Forcing
               endif
-else if( $SITE == 'AWS' ) then
-              setenv BATCH_CMD "sbatch"                                                # SLURM Batch command
-              setenv BATCH_GROUP DELETE                                                # SLURM Syntax for account name
-              setenv BATCH_TIME "SBATCH --time="                                       # SLURM Syntax for walltime
-              setenv BATCH_JOBNAME "SBATCH --job-name="                                # SLURM Syntax for job name
-              setenv BATCH_OUTPUTNAME "SBATCH --output="                               # SLURM Syntax for job output name
-              setenv BATCH_JOINOUTERR "DELETE"                                         # SLURM joins out and err by default
-              setenv     RUN_FT "06:00:00"                                             # Wallclock Time   for gcm_forecast.j
-              setenv     RUN_T  "12:00:00"                                             # Wallclock Time   for gcm_run.j
-              setenv    POST_T  "8:00:00"                                              # Wallclock Time   for gcm_post.j
-              setenv    PLOT_T  "12:00:00"                                             # Wallclock Time   for gcm_plot.j
-              setenv ARCHIVE_T  "1:00:00"                                              # Wallclock Time   for gcm_archive.j
-              setenv  RUN_Q     DELETE                                                 # batch queue name for gcm_run.j
-              setenv  RUN_P   "SBATCH --ntasks=${MODEL_NPES}"                          # PE Configuration for gcm_run.j
-              setenv  RUN_FP  "SBATCH --ntasks=${MODEL_NPES}"                          # PE Configuration for gcm_forecast.j
-              setenv    POST_Q  NULL                                                   # batch queue name for gcm_post.j
-              setenv    PLOT_Q  NULL                                                   # batch queue name for gcm_plot.j
-              setenv    MOVE_Q  NULL                                                   # batch queue name for gcm_moveplot.j
-              setenv ARCHIVE_Q  NULL                                                   # batch queue name for gcm_archive.j
-              setenv    POST_P  "SBATCH --ntasks=${POST_NPES}"                         # PE Configuration for gcm_post.j
-              setenv    PLOT_P  "SBATCH --nodes=4 --ntasks=4"                          # PE Configuration for gcm_plot.j
-              setenv ARCHIVE_P  "SBATCH --ntasks=1"                                    # PE Configuration for gcm_archive.j
-              setenv CONVERT_P  "SBATCH --ntasks=${CNV_NPES}"                          # PE Configuration for gcm_convert.j
-              setenv    MOVE_P  "SBATCH --ntasks=1"                                    # PE Configuration for gcm_moveplot.j
+else if( $SITE == 'AWS' | $SITE == 'Azure' ) then
+              setenv BATCH_CMD "sbatch"                                                    # SLURM Batch command
+              setenv BATCH_GROUP DELETE                                                    # SLURM Syntax for account name
+              setenv BATCH_TIME "SBATCH --time="                                           # SLURM Syntax for walltime
+              setenv BATCH_JOBNAME "SBATCH --job-name="                                    # SLURM Syntax for job name
+              setenv BATCH_OUTPUTNAME "SBATCH --output="                                   # SLURM Syntax for job output name
+              setenv BATCH_JOINOUTERR "DELETE"                                             # SLURM joins out and err by default
+              setenv RUN_FT "06:00:00"                                                     # Wallclock Time   for gcm_forecast.j
+              setenv RUN_T  "12:00:00"                                                     # Wallclock Time   for gcm_run.j
+              setenv POST_T  "8:00:00"                                                     # Wallclock Time   for gcm_post.j
+              setenv PLOT_T  "12:00:00"                                                    # Wallclock Time   for gcm_plot.j
+              setenv ARCHIVE_T  "1:00:00"                                                  # Wallclock Time   for gcm_archive.j
+              setenv RUN_Q     "SBATCH --constraint=${MODEL}"                              # batch queue name for gcm_run.j
+              setenv RUN_P   "SBATCH --nodes=${NODES} --ntasks-per-node=${NCPUS_PER_NODE}" # PE Configuration for gcm_run.j
+              setenv RUN_FP  "SBATCH --nodes=${NODES} --ntasks-per-node=${NCPUS_PER_NODE}" # PE Configuration for gcm_forecast.j
+              setenv POST_Q  NULL                                                          # batch queue name for gcm_post.j
+              setenv PLOT_Q  NULL                                                          # batch queue name for gcm_plot.j
+              setenv MOVE_Q  NULL                                                          # batch queue name for gcm_moveplot.j
+              setenv ARCHIVE_Q  NULL                                                       # batch queue name for gcm_archive.j
+              setenv POST_P  "SBATCH --ntasks=${POST_NPES}"                                # PE Configuration for gcm_post.j
+              setenv PLOT_P  "SBATCH --nodes=4 --ntasks=4"                                 # PE Configuration for gcm_plot.j
+              setenv ARCHIVE_P  "SBATCH --ntasks=1"                                        # PE Configuration for gcm_archive.j
+              setenv CONVERT_P  "SBATCH --ntasks=${CNV_NPES}"                              # PE Configuration for gcm_convert.j
+              setenv MOVE_P  "SBATCH --ntasks=1"                                           # PE Configuration for gcm_moveplot.j
 
               setenv BCSDIR     /ford1/share/gmao_SIteam/ModelData/bcs/${LSM_BCS}/${LSM_BCS}_${OCEAN_TAG}  # location of Boundary Conditions
               setenv REPLAY_ANA_EXPID    REPLAY_UNSUPPORTED                                                # Default Analysis Experiment for REPLAY
@@ -1770,11 +1783,6 @@ else if( $SITE == 'AWS' ) then
               setenv COUPLEDIR  /ford1/share/gmao_SIteam/ModelData/aogcm               # Coupled Ocean/Atmos Forcing
               setenv GWDRSDIR   /ford1/share/gmao_SIteam/ModelData/GWD_RIDGE           # Location of GWD_RIDGE files
 
-              # By default on AWS, just ignore IOSERVER for now until testing
-              set USE_IOSERVER = 0
-              set NUM_OSERVER_NODES = 0
-              set NUM_BACKEND_PES = 0
-              set NCPUS_PER_NODE = 0
 else
 # These are defaults for the desktop
               setenv BATCH_CMD "sbatch"                                                # SLURM Batch command
@@ -1819,7 +1827,6 @@ else
               set USE_IOSERVER = 0
               set NUM_OSERVER_NODES = 0
               set NUM_BACKEND_PES = 0
-              set NCPUS_PER_NODE = 0
 endif
 
 #######################################################################
diff --git a/geoschemchem_setup b/geoschemchem_setup
index e996fa8c..5f0ac3c3 100755
--- a/geoschemchem_setup
+++ b/geoschemchem_setup
@@ -401,50 +401,28 @@ if ( $SITE == 'NCCS' ) then
 
 else if ( $SITE == 'NAS' ) then
 
-   set BUILT_ON_ROME = @BUILT_ON_ROME@
-
-   if ( $BUILT_ON_ROME == "TRUE") then
-
-      echo "Enter the ${C1}Processor Type${CN} you wish to run on:"
-      echo "   ${C2}rom (AMD Rome) (default)${CN}"
-      echo " "
-      echo " NOTE GEOS is non-zero-diff when running on AMD Rome"
-      echo "      compared to the other Intel nodes."
-      echo " "
-      set MODEL = `echo $<`
-      set MODEL = `echo $MODEL | tr "[:upper:]" "[:lower:]"`
-      if ( .$MODEL == .) then
-         set MODEL = 'rom'
-      endif
-
-      if( $MODEL != 'rom' ) goto ASKPROC
-   else
-
-      echo "Enter the ${C1}Processor Type${CN} you wish to run on:"
-      echo "   ${C2}has (Haswell)${CN}"
-      echo "   ${C2}bro (Broadwell)${CN}"
-      echo "   ${C2}sky (Skylake)${CN} (default)"
-      echo "   ${C2}cas (Cascade Lake)${CN}"
-      echo " "
-      echo " NOTE 1: Due to how FV3 is compiled by default, Sandy Bridge"
-      echo "         and Ivy Bridge are not supported by current GEOS"
-      echo " "
-      echo " NOTE 2: Due to OS differences, if you want to run on the AMD"
-      echo "         Rome nodes at NAS, you must recompile on the Rome nodes"
-      echo " "
-      set MODEL = `echo $<`
-      set MODEL = `echo $MODEL | tr "[:upper:]" "[:lower:]"`
-      if ( .$MODEL == .) then
-         set MODEL = 'sky'
-      endif
-
-      if( $MODEL != 'has' & \
-          $MODEL != 'bro' & \
-          $MODEL != 'sky' & \
-          $MODEL != 'cas' ) goto ASKPROC
-
+   echo "Enter the ${C1}Processor Type${CN} you wish to run on:"
+   echo "   ${C2}has (Haswell)${CN}"
+   echo "   ${C2}bro (Broadwell)${CN}"
+   echo "   ${C2}sky (Skylake)${CN} (default)"
+   echo "   ${C2}cas (Cascade Lake)${CN}"
+   echo "   ${C2}rom (AMD Rome)${CN}"
+   echo " "
+   echo " NOTE Due to how FV3 is compiled by default, Sandy Bridge"
+   echo "      and Ivy Bridge are not supported by current GEOS"
+   echo " "
+   set MODEL = `echo $<`
+   set MODEL = `echo $MODEL | tr "[:upper:]" "[:lower:]"`
+   if ( .$MODEL == .) then
+      set MODEL = 'sky'
    endif
 
+   if( $MODEL != 'has' & \
+       $MODEL != 'bro' & \
+       $MODEL != 'sky' & \
+       $MODEL != 'cas' & \
+       $MODEL != 'rom' ) goto ASKPROC
+
    # Some processors have weird names at NAS
    # ---------------------------------------
 
@@ -472,8 +450,43 @@ else if ( $SITE == 'NAS' ) then
       set NCPUS_PER_NODE = 128
    endif
 
+else if( $SITE == 'AWS' | $SITE == 'Azure' ) then
+
+   # Because we do not know the name of the model or the number of CPUs
+   # per node. We ask the user to set these variables in the script
+
+   # AWS and Azure users must set the MODEL and NCPUS_PER_NODE
+   set MODEL = USER_MUST_SET
+   set NCPUS_PER_NODE = USER_MUST_SET
+
+   # Above we need a user to set the MODEL and NCPUS_PER_NODE
+   # variables. Here we check that they have been set. If not,
+   # we ask the user to set them
+   # --------------------------------------------------------
+
+   if ( $MODEL == USER_MUST_SET | $NCPUS_PER_NODE == USER_MUST_SET ) then
+      echo "ERROR: We have detected you are on $SITE. As we do not have"
+      echo "       official fixed node info yet, we ask you to edit $0"
+      echo "       and set the MODEL and NCPUS_PER_NODE variables."
+      echo "       Look for the section that says:"
+      echo " "
+      echo "       # AWS and Azure users must set the MODEL and NCPUS_PER_NODE"
+      exit 1
+   endif
+
 else
    set MODEL = 'UNKNOWN'
+   # As we do not know how many CPUs per node, we detect the number
+   # of CPUs per node by looking at the number of CPUs. This is different
+   # on Linux and macOS
+   if ( $ARCH == 'Linux' ) then
+      set NCPUS_PER_NODE = `grep -c ^processor /proc/cpuinfo`
+   else if ( $ARCH == 'Darwin' ) then
+      set NCPUS_PER_NODE = `sysctl -n hw.ncpu`
+   else
+      echo "ERROR: Unknown architecture $ARCH"
+      exit 1
+   endif
 endif
 
 #######################################################################
@@ -1764,30 +1777,30 @@ else if( $SITE == 'NCCS' ) then
               if ( "$OCNMODEL" == "MIT" ) then
                 setenv COUPLEDIR  /gpfsm/dnb32/estrobac/geos5/GRIDDIR                  # Coupled Ocean/Atmos Forcing
               endif
-else if( $SITE == 'AWS' ) then
-              setenv BATCH_CMD "sbatch"                                                # SLURM Batch command
-              setenv BATCH_GROUP DELETE                                                # SLURM Syntax for account name
-              setenv BATCH_TIME "SBATCH --time="                                       # SLURM Syntax for walltime
-              setenv BATCH_JOBNAME "SBATCH --job-name="                                # SLURM Syntax for job name
-              setenv BATCH_OUTPUTNAME "SBATCH --output="                               # SLURM Syntax for job output name
-              setenv BATCH_JOINOUTERR "DELETE"                                         # SLURM joins out and err by default
-              setenv     RUN_FT "06:00:00"                                             # Wallclock Time   for gcm_forecast.j
-              setenv     RUN_T  "12:00:00"                                             # Wallclock Time   for gcm_run.j
-              setenv    POST_T  "8:00:00"                                              # Wallclock Time   for gcm_post.j
-              setenv    PLOT_T  "12:00:00"                                             # Wallclock Time   for gcm_plot.j
-              setenv ARCHIVE_T  "1:00:00"                                              # Wallclock Time   for gcm_archive.j
-              setenv  RUN_Q     DELETE                                                 # batch queue name for gcm_run.j
-              setenv  RUN_P   "SBATCH --ntasks=${MODEL_NPES}"                          # PE Configuration for gcm_run.j
-              setenv  RUN_FP  "SBATCH --ntasks=${MODEL_NPES}"                          # PE Configuration for gcm_forecast.j
-              setenv    POST_Q  NULL                                                   # batch queue name for gcm_post.j
-              setenv    PLOT_Q  NULL                                                   # batch queue name for gcm_plot.j
-              setenv    MOVE_Q  NULL                                                   # batch queue name for gcm_moveplot.j
-              setenv ARCHIVE_Q  NULL                                                   # batch queue name for gcm_archive.j
-              setenv    POST_P  "SBATCH --ntasks=${POST_NPES}"                         # PE Configuration for gcm_post.j
-              setenv    PLOT_P  "SBATCH --nodes=4 --ntasks=4"                          # PE Configuration for gcm_plot.j
-              setenv ARCHIVE_P  "SBATCH --ntasks=1"                                    # PE Configuration for gcm_archive.j
-              setenv CONVERT_P  "SBATCH --ntasks=${CNV_NPES}"                          # PE Configuration for gcm_convert.j
-              setenv    MOVE_P  "SBATCH --ntasks=1"                                    # PE Configuration for gcm_moveplot.j
+else if( $SITE == 'AWS' | $SITE == 'Azure' ) then
+              setenv BATCH_CMD "sbatch"                                                    # SLURM Batch command
+              setenv BATCH_GROUP DELETE                                                    # SLURM Syntax for account name
+              setenv BATCH_TIME "SBATCH --time="                                           # SLURM Syntax for walltime
+              setenv BATCH_JOBNAME "SBATCH --job-name="                                    # SLURM Syntax for job name
+              setenv BATCH_OUTPUTNAME "SBATCH --output="                                   # SLURM Syntax for job output name
+              setenv BATCH_JOINOUTERR "DELETE"                                             # SLURM joins out and err by default
+              setenv RUN_FT "06:00:00"                                                     # Wallclock Time   for gcm_forecast.j
+              setenv RUN_T  "12:00:00"                                                     # Wallclock Time   for gcm_run.j
+              setenv POST_T  "8:00:00"                                                     # Wallclock Time   for gcm_post.j
+              setenv PLOT_T  "12:00:00"                                                    # Wallclock Time   for gcm_plot.j
+              setenv ARCHIVE_T  "1:00:00"                                                  # Wallclock Time   for gcm_archive.j
+              setenv RUN_Q     "SBATCH --constraint=${MODEL}"                              # batch queue name for gcm_run.j
+              setenv RUN_P   "SBATCH --nodes=${NODES} --ntasks-per-node=${NCPUS_PER_NODE}" # PE Configuration for gcm_run.j
+              setenv RUN_FP  "SBATCH --nodes=${NODES} --ntasks-per-node=${NCPUS_PER_NODE}" # PE Configuration for gcm_forecast.j
+              setenv POST_Q  NULL                                                          # batch queue name for gcm_post.j
+              setenv PLOT_Q  NULL                                                          # batch queue name for gcm_plot.j
+              setenv MOVE_Q  NULL                                                          # batch queue name for gcm_moveplot.j
+              setenv ARCHIVE_Q  NULL                                                       # batch queue name for gcm_archive.j
+              setenv POST_P  "SBATCH --ntasks=${POST_NPES}"                                # PE Configuration for gcm_post.j
+              setenv PLOT_P  "SBATCH --nodes=4 --ntasks=4"                                 # PE Configuration for gcm_plot.j
+              setenv ARCHIVE_P  "SBATCH --ntasks=1"                                        # PE Configuration for gcm_archive.j
+              setenv CONVERT_P  "SBATCH --ntasks=${CNV_NPES}"                              # PE Configuration for gcm_convert.j
+              setenv MOVE_P  "SBATCH --ntasks=1"                                           # PE Configuration for gcm_moveplot.j
 
               setenv BCSDIR     /ford1/share/gmao_SIteam/ModelData/bcs/${LSM_BCS}/${LSM_BCS}_${OCEAN_TAG}  # location of Boundary Conditions
               setenv REPLAY_ANA_EXPID    REPLAY_UNSUPPORTED                                                # Default Analysis Experiment for REPLAY
@@ -1800,11 +1813,6 @@ else if( $SITE == 'AWS' ) then
               setenv COUPLEDIR  /ford1/share/gmao_SIteam/ModelData/aogcm               # Coupled Ocean/Atmos Forcing
               setenv GWDRSDIR   /ford1/share/gmao_SIteam/ModelData/GWD_RIDGE           # Location of GWD_RIDGE files
 
-              # By default on AWS, just ignore IOSERVER for now until testing
-              set USE_IOSERVER = 0
-              set NUM_OSERVER_NODES = 0
-              set NUM_BACKEND_PES = 0
-              set NCPUS_PER_NODE = 0
 else
 # These are defaults for the desktop
               setenv BATCH_CMD "sbatch"                                                # SLURM Batch command
@@ -1849,7 +1857,6 @@ else
               set USE_IOSERVER = 0
               set NUM_OSERVER_NODES = 0
               set NUM_BACKEND_PES = 0
-              set NCPUS_PER_NODE = 0
 endif
 
 #######################################################################
diff --git a/gmichem_setup b/gmichem_setup
index e270828d..96f624fe 100755
--- a/gmichem_setup
+++ b/gmichem_setup
@@ -401,50 +401,28 @@ if ( $SITE == 'NCCS' ) then
 
 else if ( $SITE == 'NAS' ) then
 
-   set BUILT_ON_ROME = @BUILT_ON_ROME@
-
-   if ( $BUILT_ON_ROME == "TRUE") then
-
-      echo "Enter the ${C1}Processor Type${CN} you wish to run on:"
-      echo "   ${C2}rom (AMD Rome) (default)${CN}"
-      echo " "
-      echo " NOTE GEOS is non-zero-diff when running on AMD Rome"
-      echo "      compared to the other Intel nodes."
-      echo " "
-      set MODEL = `echo $<`
-      set MODEL = `echo $MODEL | tr "[:upper:]" "[:lower:]"`
-      if ( .$MODEL == .) then
-         set MODEL = 'rom'
-      endif
-
-      if( $MODEL != 'rom' ) goto ASKPROC
-   else
-
-      echo "Enter the ${C1}Processor Type${CN} you wish to run on:"
-      echo "   ${C2}has (Haswell)${CN}"
-      echo "   ${C2}bro (Broadwell)${CN}"
-      echo "   ${C2}sky (Skylake)${CN} (default)"
-      echo "   ${C2}cas (Cascade Lake)${CN}"
-      echo " "
-      echo " NOTE 1: Due to how FV3 is compiled by default, Sandy Bridge"
-      echo "         and Ivy Bridge are not supported by current GEOS"
-      echo " "
-      echo " NOTE 2: Due to OS differences, if you want to run on the AMD"
-      echo "         Rome nodes at NAS, you must recompile on the Rome nodes"
-      echo " "
-      set MODEL = `echo $<`
-      set MODEL = `echo $MODEL | tr "[:upper:]" "[:lower:]"`
-      if ( .$MODEL == .) then
-         set MODEL = 'sky'
-      endif
-
-      if( $MODEL != 'has' & \
-          $MODEL != 'bro' & \
-          $MODEL != 'sky' & \
-          $MODEL != 'cas' ) goto ASKPROC
-
+   echo "Enter the ${C1}Processor Type${CN} you wish to run on:"
+   echo "   ${C2}has (Haswell)${CN}"
+   echo "   ${C2}bro (Broadwell)${CN}"
+   echo "   ${C2}sky (Skylake)${CN} (default)"
+   echo "   ${C2}cas (Cascade Lake)${CN}"
+   echo "   ${C2}rom (AMD Rome)${CN}"
+   echo " "
+   echo " NOTE Due to how FV3 is compiled by default, Sandy Bridge"
+   echo "      and Ivy Bridge are not supported by current GEOS"
+   echo " "
+   set MODEL = `echo $<`
+   set MODEL = `echo $MODEL | tr "[:upper:]" "[:lower:]"`
+   if ( .$MODEL == .) then
+      set MODEL = 'sky'
    endif
 
+   if( $MODEL != 'has' & \
+       $MODEL != 'bro' & \
+       $MODEL != 'sky' & \
+       $MODEL != 'cas' & \
+       $MODEL != 'rom' ) goto ASKPROC
+
    # Some processors have weird names at NAS
    # ---------------------------------------
 
@@ -472,8 +450,43 @@ else if ( $SITE == 'NAS' ) then
       set NCPUS_PER_NODE = 128
    endif
 
+else if( $SITE == 'AWS' | $SITE == 'Azure' ) then
+
+   # Because we do not know the name of the model or the number of CPUs
+   # per node. We ask the user to set these variables in the script
+
+   # AWS and Azure users must set the MODEL and NCPUS_PER_NODE
+   set MODEL = USER_MUST_SET
+   set NCPUS_PER_NODE = USER_MUST_SET
+
+   # Above we need a user to set the MODEL and NCPUS_PER_NODE
+   # variables. Here we check that they have been set. If not,
+   # we ask the user to set them
+   # --------------------------------------------------------
+
+   if ( $MODEL == USER_MUST_SET | $NCPUS_PER_NODE == USER_MUST_SET ) then
+      echo "ERROR: We have detected you are on $SITE. As we do not have"
+      echo "       official fixed node info yet, we ask you to edit $0"
+      echo "       and set the MODEL and NCPUS_PER_NODE variables."
+      echo "       Look for the section that says:"
+      echo " "
+      echo "       # AWS and Azure users must set the MODEL and NCPUS_PER_NODE"
+      exit 1
+   endif
+
 else
    set MODEL = 'UNKNOWN'
+   # As we do not know how many CPUs per node, we detect the number
+   # of CPUs per node by looking at the number of CPUs. This is different
+   # on Linux and macOS
+   if ( $ARCH == 'Linux' ) then
+      set NCPUS_PER_NODE = `grep -c ^processor /proc/cpuinfo`
+   else if ( $ARCH == 'Darwin' ) then
+      set NCPUS_PER_NODE = `sysctl -n hw.ncpu`
+   else
+      echo "ERROR: Unknown architecture $ARCH"
+      exit 1
+   endif
 endif
 
 # Per previous gmichem_setup, reduce cores-per-node by 1
@@ -1319,7 +1332,7 @@ if( $AGCM_IM == "c1536" ) then
      set GMICHEM_DT = `expr $DT \* 2`
      set AGCM_IM  = 1536
      set AGCM_JM  = `expr $AGCM_IM \* 6`
-     set       NX = 96 
+     set       NX = 96
      set       NY = `expr $NX \* 6`
      set HYDROSTATIC = $USE_HYDROSTATIC
      set HIST_IM  = `expr $AGCM_IM \* 4`
@@ -1925,30 +1938,30 @@ else if( $SITE == 'NCCS' ) then
               if ( "$OCNMODEL" == "MIT" ) then
                 setenv COUPLEDIR  /gpfsm/dnb32/estrobac/geos5/GRIDDIR                  # Coupled Ocean/Atmos Forcing
               endif
-else if( $SITE == 'AWS' ) then
-              setenv BATCH_CMD "sbatch"                                                # SLURM Batch command
-              setenv BATCH_GROUP DELETE                                                # SLURM Syntax for account name
-              setenv BATCH_TIME "SBATCH --time="                                       # SLURM Syntax for walltime
-              setenv BATCH_JOBNAME "SBATCH --job-name="                                # SLURM Syntax for job name
-              setenv BATCH_OUTPUTNAME "SBATCH --output="                               # SLURM Syntax for job output name
-              setenv BATCH_JOINOUTERR "DELETE"                                         # SLURM joins out and err by default
-              setenv     RUN_FT "06:00:00"                                             # Wallclock Time   for gcm_forecast.j
-              setenv     RUN_T  "12:00:00"                                             # Wallclock Time   for gcm_run.j
-              setenv    POST_T  "8:00:00"                                              # Wallclock Time   for gcm_post.j
-              setenv    PLOT_T  "12:00:00"                                             # Wallclock Time   for gcm_plot.j
-              setenv ARCHIVE_T  "1:00:00"                                              # Wallclock Time   for gcm_archive.j
-              setenv  RUN_Q     DELETE                                                 # batch queue name for gcm_run.j
-              setenv  RUN_P   "SBATCH --ntasks=${MODEL_NPES}"                          # PE Configuration for gcm_run.j
-              setenv  RUN_FP  "SBATCH --ntasks=${MODEL_NPES}"                          # PE Configuration for gcm_forecast.j
-              setenv    POST_Q  NULL                                                   # batch queue name for gcm_post.j
-              setenv    PLOT_Q  NULL                                                   # batch queue name for gcm_plot.j
-              setenv    MOVE_Q  NULL                                                   # batch queue name for gcm_moveplot.j
-              setenv ARCHIVE_Q  NULL                                                   # batch queue name for gcm_archive.j
-              setenv    POST_P  "SBATCH --ntasks=${POST_NPES}"                         # PE Configuration for gcm_post.j
-              setenv    PLOT_P  "SBATCH --nodes=4 --ntasks=4"                          # PE Configuration for gcm_plot.j
-              setenv ARCHIVE_P  "SBATCH --ntasks=1"                                    # PE Configuration for gcm_archive.j
-              setenv CONVERT_P  "SBATCH --ntasks=${CNV_NPES}"                          # PE Configuration for gcm_convert.j
-              setenv    MOVE_P  "SBATCH --ntasks=1"                                    # PE Configuration for gcm_moveplot.j
+else if( $SITE == 'AWS' | $SITE == 'Azure' ) then
+              setenv BATCH_CMD "sbatch"                                                    # SLURM Batch command
+              setenv BATCH_GROUP DELETE                                                    # SLURM Syntax for account name
+              setenv BATCH_TIME "SBATCH --time="                                           # SLURM Syntax for walltime
+              setenv BATCH_JOBNAME "SBATCH --job-name="                                    # SLURM Syntax for job name
+              setenv BATCH_OUTPUTNAME "SBATCH --output="                                   # SLURM Syntax for job output name
+              setenv BATCH_JOINOUTERR "DELETE"                                             # SLURM joins out and err by default
+              setenv RUN_FT "06:00:00"                                                     # Wallclock Time   for gcm_forecast.j
+              setenv RUN_T  "12:00:00"                                                     # Wallclock Time   for gcm_run.j
+              setenv POST_T  "8:00:00"                                                     # Wallclock Time   for gcm_post.j
+              setenv PLOT_T  "12:00:00"                                                    # Wallclock Time   for gcm_plot.j
+              setenv ARCHIVE_T  "1:00:00"                                                  # Wallclock Time   for gcm_archive.j
+              setenv RUN_Q     "SBATCH --constraint=${MODEL}"                              # batch queue name for gcm_run.j
+              setenv RUN_P   "SBATCH --nodes=${NODES} --ntasks-per-node=${NCPUS_PER_NODE}" # PE Configuration for gcm_run.j
+              setenv RUN_FP  "SBATCH --nodes=${NODES} --ntasks-per-node=${NCPUS_PER_NODE}" # PE Configuration for gcm_forecast.j
+              setenv POST_Q  NULL                                                          # batch queue name for gcm_post.j
+              setenv PLOT_Q  NULL                                                          # batch queue name for gcm_plot.j
+              setenv MOVE_Q  NULL                                                          # batch queue name for gcm_moveplot.j
+              setenv ARCHIVE_Q  NULL                                                       # batch queue name for gcm_archive.j
+              setenv POST_P  "SBATCH --ntasks=${POST_NPES}"                                # PE Configuration for gcm_post.j
+              setenv PLOT_P  "SBATCH --nodes=4 --ntasks=4"                                 # PE Configuration for gcm_plot.j
+              setenv ARCHIVE_P  "SBATCH --ntasks=1"                                        # PE Configuration for gcm_archive.j
+              setenv CONVERT_P  "SBATCH --ntasks=${CNV_NPES}"                              # PE Configuration for gcm_convert.j
+              setenv MOVE_P  "SBATCH --ntasks=1"                                           # PE Configuration for gcm_moveplot.j
 
               setenv BCSDIR     /ford1/share/gmao_SIteam/ModelData/bcs/${LSM_BCS}/${LSM_BCS}_${OCEAN_TAG}  # location of Boundary Conditions
               setenv REPLAY_ANA_EXPID    REPLAY_UNSUPPORTED                                                # Default Analysis Experiment for REPLAY
@@ -1961,11 +1974,6 @@ else if( $SITE == 'AWS' ) then
               setenv COUPLEDIR  /ford1/share/gmao_SIteam/ModelData/aogcm               # Coupled Ocean/Atmos Forcing
               setenv GWDRSDIR   /ford1/share/gmao_SIteam/ModelData/GWD_RIDGE           # Location of GWD_RIDGE files
 
-              # By default on AWS, just ignore IOSERVER for now until testing
-              set USE_IOSERVER = 0
-              set NUM_OSERVER_NODES = 0
-              set NUM_BACKEND_PES = 0
-              set NCPUS_PER_NODE = 0
 else
 # These are defaults for the desktop
               setenv BATCH_CMD "sbatch"                                                # SLURM Batch command
@@ -2010,7 +2018,6 @@ else
               set USE_IOSERVER = 0
               set NUM_OSERVER_NODES = 0
               set NUM_BACKEND_PES = 0
-              set NCPUS_PER_NODE = 0
 endif
 
 # If SST and SEAICE files are not in the standard location:
@@ -3000,7 +3007,7 @@ if( $LGOCART2G == TRUE ) then
               -e '/ACTIVE_INSTANCES_CA:/ s/CA.oc /CA.oc.data /' \
               -e '/ACTIVE_INSTANCES_CA:/ s/CA.bc /CA.bc.data /' \
               -e '/ACTIVE_INSTANCES_CA:/ s/CA.br /CA.br.data /' > $EXPDIR/RC/GOCART2G_GridComp.rc
-    endif 
+    endif
 
     /bin/mv $EXPDIR/RC/GEOS_ChemGridComp.rc $EXPDIR/RC/GEOS_ChemGridComp.tmp
     cat $EXPDIR/RC/GEOS_ChemGridComp.tmp | \
diff --git a/stratchem_setup b/stratchem_setup
index c5dc997b..0f0c0644 100755
--- a/stratchem_setup
+++ b/stratchem_setup
@@ -401,50 +401,28 @@ if ( $SITE == 'NCCS' ) then
 
 else if ( $SITE == 'NAS' ) then
 
-   set BUILT_ON_ROME = @BUILT_ON_ROME@
-
-   if ( $BUILT_ON_ROME == "TRUE") then
-
-      echo "Enter the ${C1}Processor Type${CN} you wish to run on:"
-      echo "   ${C2}rom (AMD Rome) (default)${CN}"
-      echo " "
-      echo " NOTE GEOS is non-zero-diff when running on AMD Rome"
-      echo "      compared to the other Intel nodes."
-      echo " "
-      set MODEL = `echo $<`
-      set MODEL = `echo $MODEL | tr "[:upper:]" "[:lower:]"`
-      if ( .$MODEL == .) then
-         set MODEL = 'rom'
-      endif
-
-      if( $MODEL != 'rom' ) goto ASKPROC
-   else
-
-      echo "Enter the ${C1}Processor Type${CN} you wish to run on:"
-      echo "   ${C2}has (Haswell)${CN}"
-      echo "   ${C2}bro (Broadwell)${CN}"
-      echo "   ${C2}sky (Skylake)${CN} (default)"
-      echo "   ${C2}cas (Cascade Lake)${CN}"
-      echo " "
-      echo " NOTE 1: Due to how FV3 is compiled by default, Sandy Bridge"
-      echo "         and Ivy Bridge are not supported by current GEOS"
-      echo " "
-      echo " NOTE 2: Due to OS differences, if you want to run on the AMD"
-      echo "         Rome nodes at NAS, you must recompile on the Rome nodes"
-      echo " "
-      set MODEL = `echo $<`
-      set MODEL = `echo $MODEL | tr "[:upper:]" "[:lower:]"`
-      if ( .$MODEL == .) then
-         set MODEL = 'sky'
-      endif
-
-      if( $MODEL != 'has' & \
-          $MODEL != 'bro' & \
-          $MODEL != 'sky' & \
-          $MODEL != 'cas' ) goto ASKPROC
-
+   echo "Enter the ${C1}Processor Type${CN} you wish to run on:"
+   echo "   ${C2}has (Haswell)${CN}"
+   echo "   ${C2}bro (Broadwell)${CN}"
+   echo "   ${C2}sky (Skylake)${CN} (default)"
+   echo "   ${C2}cas (Cascade Lake)${CN}"
+   echo "   ${C2}rom (AMD Rome)${CN}"
+   echo " "
+   echo " NOTE Due to how FV3 is compiled by default, Sandy Bridge"
+   echo "      and Ivy Bridge are not supported by current GEOS"
+   echo " "
+   set MODEL = `echo $<`
+   set MODEL = `echo $MODEL | tr "[:upper:]" "[:lower:]"`
+   if ( .$MODEL == .) then
+      set MODEL = 'sky'
    endif
 
+   if( $MODEL != 'has' & \
+       $MODEL != 'bro' & \
+       $MODEL != 'sky' & \
+       $MODEL != 'cas' & \
+       $MODEL != 'rom' ) goto ASKPROC
+
    # Some processors have weird names at NAS
    # ---------------------------------------
 
@@ -472,8 +450,43 @@ else if ( $SITE == 'NAS' ) then
       set NCPUS_PER_NODE = 128
    endif
 
+else if( $SITE == 'AWS' | $SITE == 'Azure' ) then
+
+   # Because we do not know the name of the model or the number of CPUs
+   # per node. We ask the user to set these variables in the script
+
+   # AWS and Azure users must set the MODEL and NCPUS_PER_NODE
+   set MODEL = USER_MUST_SET
+   set NCPUS_PER_NODE = USER_MUST_SET
+
+   # Above we need a user to set the MODEL and NCPUS_PER_NODE
+   # variables. Here we check that they have been set. If not,
+   # we ask the user to set them
+   # --------------------------------------------------------
+
+   if ( $MODEL == USER_MUST_SET | $NCPUS_PER_NODE == USER_MUST_SET ) then
+      echo "ERROR: We have detected you are on $SITE. As we do not have"
+      echo "       official fixed node info yet, we ask you to edit $0"
+      echo "       and set the MODEL and NCPUS_PER_NODE variables."
+      echo "       Look for the section that says:"
+      echo " "
+      echo "       # AWS and Azure users must set the MODEL and NCPUS_PER_NODE"
+      exit 1
+   endif
+
 else
    set MODEL = 'UNKNOWN'
+   # As we do not know how many CPUs per node, we detect the number
+   # of CPUs per node by looking at the number of CPUs. This is different
+   # on Linux and macOS
+   if ( $ARCH == 'Linux' ) then
+      set NCPUS_PER_NODE = `grep -c ^processor /proc/cpuinfo`
+   else if ( $ARCH == 'Darwin' ) then
+      set NCPUS_PER_NODE = `sysctl -n hw.ncpu`
+   else
+      echo "ERROR: Unknown architecture $ARCH"
+      exit 1
+   endif
 endif
 
 #######################################################################
@@ -1222,7 +1235,7 @@ if( $AGCM_IM == "c1536" ) then
      set SC_SPLIT = 1
      set AGCM_IM  = 1536
      set AGCM_JM  = `expr $AGCM_IM \* 6`
-     set       NX = 96 
+     set       NX = 96
      set       NY = `expr $NX \* 6`
      set HYDROSTATIC = $USE_HYDROSTATIC
      set HIST_IM  = `expr $AGCM_IM \* 4`
@@ -1749,30 +1762,30 @@ else if( $SITE == 'NCCS' ) then
               if ( "$OCNMODEL" == "MIT" ) then
                 setenv COUPLEDIR  /gpfsm/dnb32/estrobac/geos5/GRIDDIR                  # Coupled Ocean/Atmos Forcing
               endif
-else if( $SITE == 'AWS' ) then
-              setenv BATCH_CMD "sbatch"                                                # SLURM Batch command
-              setenv BATCH_GROUP DELETE                                                # SLURM Syntax for account name
-              setenv BATCH_TIME "SBATCH --time="                                       # SLURM Syntax for walltime
-              setenv BATCH_JOBNAME "SBATCH --job-name="                                # SLURM Syntax for job name
-              setenv BATCH_OUTPUTNAME "SBATCH --output="                               # SLURM Syntax for job output name
-              setenv BATCH_JOINOUTERR "DELETE"                                         # SLURM joins out and err by default
-              setenv     RUN_FT "06:00:00"                                             # Wallclock Time   for gcm_forecast.j
-              setenv     RUN_T  "12:00:00"                                             # Wallclock Time   for gcm_run.j
-              setenv    POST_T  "8:00:00"                                              # Wallclock Time   for gcm_post.j
-              setenv    PLOT_T  "12:00:00"                                             # Wallclock Time   for gcm_plot.j
-              setenv ARCHIVE_T  "1:00:00"                                              # Wallclock Time   for gcm_archive.j
-              setenv  RUN_Q     DELETE                                                 # batch queue name for gcm_run.j
-              setenv  RUN_P   "SBATCH --ntasks=${MODEL_NPES}"                          # PE Configuration for gcm_run.j
-              setenv  RUN_FP  "SBATCH --ntasks=${MODEL_NPES}"                          # PE Configuration for gcm_forecast.j
-              setenv    POST_Q  NULL                                                   # batch queue name for gcm_post.j
-              setenv    PLOT_Q  NULL                                                   # batch queue name for gcm_plot.j
-              setenv    MOVE_Q  NULL                                                   # batch queue name for gcm_moveplot.j
-              setenv ARCHIVE_Q  NULL                                                   # batch queue name for gcm_archive.j
-              setenv    POST_P  "SBATCH --ntasks=${POST_NPES}"                         # PE Configuration for gcm_post.j
-              setenv    PLOT_P  "SBATCH --nodes=4 --ntasks=4"                          # PE Configuration for gcm_plot.j
-              setenv ARCHIVE_P  "SBATCH --ntasks=1"                                    # PE Configuration for gcm_archive.j
-              setenv CONVERT_P  "SBATCH --ntasks=${CNV_NPES}"                          # PE Configuration for gcm_convert.j
-              setenv    MOVE_P  "SBATCH --ntasks=1"                                    # PE Configuration for gcm_moveplot.j
+else if( $SITE == 'AWS' | $SITE == 'Azure' ) then
+              setenv BATCH_CMD "sbatch"                                                    # SLURM Batch command
+              setenv BATCH_GROUP DELETE                                                    # SLURM Syntax for account name
+              setenv BATCH_TIME "SBATCH --time="                                           # SLURM Syntax for walltime
+              setenv BATCH_JOBNAME "SBATCH --job-name="                                    # SLURM Syntax for job name
+              setenv BATCH_OUTPUTNAME "SBATCH --output="                                   # SLURM Syntax for job output name
+              setenv BATCH_JOINOUTERR "DELETE"                                             # SLURM joins out and err by default
+              setenv RUN_FT "06:00:00"                                                     # Wallclock Time   for gcm_forecast.j
+              setenv RUN_T  "12:00:00"                                                     # Wallclock Time   for gcm_run.j
+              setenv POST_T  "8:00:00"                                                     # Wallclock Time   for gcm_post.j
+              setenv PLOT_T  "12:00:00"                                                    # Wallclock Time   for gcm_plot.j
+              setenv ARCHIVE_T  "1:00:00"                                                  # Wallclock Time   for gcm_archive.j
+              setenv RUN_Q     "SBATCH --constraint=${MODEL}"                              # batch queue name for gcm_run.j
+              setenv RUN_P   "SBATCH --nodes=${NODES} --ntasks-per-node=${NCPUS_PER_NODE}" # PE Configuration for gcm_run.j
+              setenv RUN_FP  "SBATCH --nodes=${NODES} --ntasks-per-node=${NCPUS_PER_NODE}" # PE Configuration for gcm_forecast.j
+              setenv POST_Q  NULL                                                          # batch queue name for gcm_post.j
+              setenv PLOT_Q  NULL                                                          # batch queue name for gcm_plot.j
+              setenv MOVE_Q  NULL                                                          # batch queue name for gcm_moveplot.j
+              setenv ARCHIVE_Q  NULL                                                       # batch queue name for gcm_archive.j
+              setenv POST_P  "SBATCH --ntasks=${POST_NPES}"                                # PE Configuration for gcm_post.j
+              setenv PLOT_P  "SBATCH --nodes=4 --ntasks=4"                                 # PE Configuration for gcm_plot.j
+              setenv ARCHIVE_P  "SBATCH --ntasks=1"                                        # PE Configuration for gcm_archive.j
+              setenv CONVERT_P  "SBATCH --ntasks=${CNV_NPES}"                              # PE Configuration for gcm_convert.j
+              setenv MOVE_P  "SBATCH --ntasks=1"                                           # PE Configuration for gcm_moveplot.j
 
               setenv BCSDIR     /ford1/share/gmao_SIteam/ModelData/bcs/${LSM_BCS}/${LSM_BCS}_${OCEAN_TAG}  # location of Boundary Conditions
               setenv REPLAY_ANA_EXPID    REPLAY_UNSUPPORTED                                                # Default Analysis Experiment for REPLAY
@@ -1785,11 +1798,6 @@ else if( $SITE == 'AWS' ) then
               setenv COUPLEDIR  /ford1/share/gmao_SIteam/ModelData/aogcm               # Coupled Ocean/Atmos Forcing
               setenv GWDRSDIR   /ford1/share/gmao_SIteam/ModelData/GWD_RIDGE           # Location of GWD_RIDGE files
 
-              # By default on AWS, just ignore IOSERVER for now until testing
-              set USE_IOSERVER = 0
-              set NUM_OSERVER_NODES = 0
-              set NUM_BACKEND_PES = 0
-              set NCPUS_PER_NODE = 0
 else
 # These are defaults for the desktop
               setenv BATCH_CMD "sbatch"                                                # SLURM Batch command
@@ -1834,7 +1842,6 @@ else
               set USE_IOSERVER = 0
               set NUM_OSERVER_NODES = 0
               set NUM_BACKEND_PES = 0
-              set NCPUS_PER_NODE = 0
 endif
 
 #######################################################################