diff --git a/docs/installation.md b/docs/installation.md index ae6e8fdf..5eec393a 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -120,8 +120,8 @@ For the version 5 of openmpi the backend changed to `pmix`, this requires the ad ``` conda install -c conda-forge flux-core flux-sched flux-pmix openmpi>=5 executorlib ``` -In addition, the `flux_executor_pmi_mode="pmix"` parameter has to be set for the `executorlib.Executor` to switch to -`pmix` as backend. +In addition, the `flux_executor_pmi_mode="pmix"` parameter has to be set for the `FluxJobExecutor` or the +`FluxClusterExecutor` to switch to `pmix` as backend. ### Test Flux Framework To validate the installation of flux and confirm the GPUs are correctly recognized, you can start a flux session on the diff --git a/executorlib/executor/flux.py b/executorlib/executor/flux.py index aaff34b7..5fcfc030 100644 --- a/executorlib/executor/flux.py +++ b/executorlib/executor/flux.py @@ -19,11 +19,11 @@ class FluxJobExecutor(BaseExecutor): """ - The executorlib.Executor leverages either the message passing interface (MPI), the SLURM workload manager or + The executorlib.FluxJobExecutor leverages either the message passing interface (MPI), the SLURM workload manager or preferable the flux framework for distributing python functions within a given resource allocation. In contrast to - the mpi4py.futures.MPIPoolExecutor the executorlib.Executor can be executed in a serial python process and does not - require the python script to be executed with MPI. It is even possible to execute the executorlib.Executor directly - in an interactive Jupyter notebook. + the mpi4py.futures.MPIPoolExecutor the executorlib.FluxJobExecutor can be executed in a serial python process and + does not require the python script to be executed with MPI. It is even possible to execute the + executorlib.FluxJobExecutor directly in an interactive Jupyter notebook. Args: max_workers (int): for backwards compatibility with the standard library, max_workers also defines the number of @@ -65,7 +65,7 @@ class FluxJobExecutor(BaseExecutor): Examples: ``` >>> import numpy as np - >>> from executorlib.executor.flux import FluxJobExecutor + >>> from executorlib import FluxJobExecutor >>> >>> def calc(i, j, k): >>> from mpi4py import MPI @@ -102,12 +102,11 @@ def __init__( plot_dependency_graph_filename: Optional[str] = None, ): """ - Instead of returning a executorlib.Executor object this function returns either a executorlib.mpi.PyMPIExecutor, - executorlib.slurm.PySlurmExecutor or executorlib.flux.PyFluxExecutor depending on which backend is available. The - executorlib.flux.PyFluxExecutor is the preferred choice while the executorlib.mpi.PyMPIExecutor is primarily used - for development and testing. The executorlib.flux.PyFluxExecutor requires flux-core from the flux-framework to be - installed and in addition flux-sched to enable GPU scheduling. Finally, the executorlib.slurm.PySlurmExecutor - requires the SLURM workload manager to be installed on the system. + The executorlib.FluxJobExecutor leverages either the message passing interface (MPI), the SLURM workload manager + or preferable the flux framework for distributing python functions within a given resource allocation. In + contrast to the mpi4py.futures.MPIPoolExecutor the executorlib.FluxJobExecutor can be executed in a serial + python process and does not require the python script to be executed with MPI. It is even possible to execute + the executorlib.FluxJobExecutor directly in an interactive Jupyter notebook. Args: max_workers (int): for backwards compatibility with the standard library, max_workers also defines the @@ -204,11 +203,11 @@ def __init__( class FluxClusterExecutor(BaseExecutor): """ - The executorlib.Executor leverages either the message passing interface (MPI), the SLURM workload manager or - preferable the flux framework for distributing python functions within a given resource allocation. In contrast to - the mpi4py.futures.MPIPoolExecutor the executorlib.Executor can be executed in a serial python process and does not - require the python script to be executed with MPI. It is even possible to execute the executorlib.Executor directly - in an interactive Jupyter notebook. + The executorlib.FluxClusterExecutor leverages either the message passing interface (MPI), the SLURM workload manager + or preferable the flux framework for distributing python functions within a given resource allocation. In contrast + to the mpi4py.futures.MPIPoolExecutor the executorlib.FluxClusterExecutor can be executed in a serial python process + and does not require the python script to be executed with MPI. It is even possible to execute the + executorlib.FluxClusterExecutor directly in an interactive Jupyter notebook. Args: max_workers (int): for backwards compatibility with the standard library, max_workers also defines the number of @@ -246,7 +245,7 @@ class FluxClusterExecutor(BaseExecutor): Examples: ``` >>> import numpy as np - >>> from executorlib.executor.flux import FluxClusterExecutor + >>> from executorlib import FluxClusterExecutor >>> >>> def calc(i, j, k): >>> from mpi4py import MPI @@ -280,12 +279,11 @@ def __init__( plot_dependency_graph_filename: Optional[str] = None, ): """ - Instead of returning a executorlib.Executor object this function returns either a executorlib.mpi.PyMPIExecutor, - executorlib.slurm.PySlurmExecutor or executorlib.flux.PyFluxExecutor depending on which backend is available. The - executorlib.flux.PyFluxExecutor is the preferred choice while the executorlib.mpi.PyMPIExecutor is primarily used - for development and testing. The executorlib.flux.PyFluxExecutor requires flux-core from the flux-framework to be - installed and in addition flux-sched to enable GPU scheduling. Finally, the executorlib.slurm.PySlurmExecutor - requires the SLURM workload manager to be installed on the system. + The executorlib.FluxClusterExecutor leverages either the message passing interface (MPI), the SLURM workload + manager or preferable the flux framework for distributing python functions within a given resource allocation. + In contrast to the mpi4py.futures.MPIPoolExecutor the executorlib.FluxClusterExecutor can be executed in a + serial python process and does not require the python script to be executed with MPI. It is even possible to + execute the executorlib.FluxClusterExecutor directly in an interactive Jupyter notebook. Args: max_workers (int): for backwards compatibility with the standard library, max_workers also defines the diff --git a/executorlib/executor/single.py b/executorlib/executor/single.py index 3081f412..459551f7 100644 --- a/executorlib/executor/single.py +++ b/executorlib/executor/single.py @@ -19,11 +19,11 @@ class SingleNodeExecutor(BaseExecutor): """ - The executorlib.Executor leverages either the message passing interface (MPI), the SLURM workload manager or - preferable the flux framework for distributing python functions within a given resource allocation. In contrast to - the mpi4py.futures.MPIPoolExecutor the executorlib.Executor can be executed in a serial python process and does not - require the python script to be executed with MPI. It is even possible to execute the executorlib.Executor directly - in an interactive Jupyter notebook. + The executorlib.SingleNodeExecutor leverages either the message passing interface (MPI), the SLURM workload manager + or preferable the flux framework for distributing python functions within a given resource allocation. In contrast + to the mpi4py.futures.MPIPoolExecutor the executorlib.SingleNodeExecutor can be executed in a serial python process + and does not require the python script to be executed with MPI. It is even possible to execute the + executorlib.SingleNodeExecutor directly in an interactive Jupyter notebook. Args: max_workers (int): for backwards compatibility with the standard library, max_workers also defines the number of @@ -60,7 +60,7 @@ class SingleNodeExecutor(BaseExecutor): Examples: ``` >>> import numpy as np - >>> from executorlib.executor.single import SingleNodeExecutor + >>> from executorlib import SingleNodeExecutor >>> >>> def calc(i, j, k): >>> from mpi4py import MPI @@ -93,12 +93,11 @@ def __init__( plot_dependency_graph_filename: Optional[str] = None, ): """ - Instead of returning a executorlib.Executor object this function returns either a executorlib.mpi.PyMPIExecutor, - executorlib.slurm.PySlurmExecutor or executorlib.flux.PyFluxExecutor depending on which backend is available. The - executorlib.flux.PyFluxExecutor is the preferred choice while the executorlib.mpi.PyMPIExecutor is primarily used - for development and testing. The executorlib.flux.PyFluxExecutor requires flux-core from the flux-framework to be - installed and in addition flux-sched to enable GPU scheduling. Finally, the executorlib.slurm.PySlurmExecutor - requires the SLURM workload manager to be installed on the system. + The executorlib.SingleNodeExecutor leverages either the message passing interface (MPI), the SLURM workload + manager or preferable the flux framework for distributing python functions within a given resource allocation. + In contrast to the mpi4py.futures.MPIPoolExecutor the executorlib.SingleNodeExecutor can be executed in a serial + python process and does not require the python script to be executed with MPI. It is even possible to execute + the executorlib.SingleNodeExecutor directly in an interactive Jupyter notebook. Args: max_workers (int): for backwards compatibility with the standard library, max_workers also defines the diff --git a/executorlib/executor/slurm.py b/executorlib/executor/slurm.py index b0cc5a46..fa75b10b 100644 --- a/executorlib/executor/slurm.py +++ b/executorlib/executor/slurm.py @@ -20,11 +20,11 @@ class SlurmClusterExecutor(BaseExecutor): """ - The executorlib.Executor leverages either the message passing interface (MPI), the SLURM workload manager or - preferable the flux framework for distributing python functions within a given resource allocation. In contrast to - the mpi4py.futures.MPIPoolExecutor the executorlib.Executor can be executed in a serial python process and does not - require the python script to be executed with MPI. It is even possible to execute the executorlib.Executor directly - in an interactive Jupyter notebook. + The executorlib.SlurmClusterExecutor leverages either the message passing interface (MPI), the SLURM workload + manager or preferable the flux framework for distributing python functions within a given resource allocation. In + contrast to the mpi4py.futures.MPIPoolExecutor the executorlib.SlurmClusterExecutor can be executed in a serial + python process and does not require the python script to be executed with MPI. It is even possible to execute the + executorlib.SlurmClusterExecutor directly in an interactive Jupyter notebook. Args: max_workers (int): for backwards compatibility with the standard library, max_workers also defines the number of @@ -62,7 +62,7 @@ class SlurmClusterExecutor(BaseExecutor): Examples: ``` >>> import numpy as np - >>> from executorlib.executor.slurm import SlurmClusterExecutor + >>> from executorlib import SlurmClusterExecutor >>> >>> def calc(i, j, k): >>> from mpi4py import MPI @@ -96,12 +96,11 @@ def __init__( plot_dependency_graph_filename: Optional[str] = None, ): """ - Instead of returning a executorlib.Executor object this function returns either a executorlib.mpi.PyMPIExecutor, - executorlib.slurm.PySlurmExecutor or executorlib.flux.PyFluxExecutor depending on which backend is available. The - executorlib.flux.PyFluxExecutor is the preferred choice while the executorlib.mpi.PyMPIExecutor is primarily used - for development and testing. The executorlib.flux.PyFluxExecutor requires flux-core from the flux-framework to be - installed and in addition flux-sched to enable GPU scheduling. Finally, the executorlib.slurm.PySlurmExecutor - requires the SLURM workload manager to be installed on the system. + The executorlib.SlurmClusterExecutor leverages either the message passing interface (MPI), the SLURM workload + manager or preferable the flux framework for distributing python functions within a given resource allocation. + In contrast to the mpi4py.futures.MPIPoolExecutor the executorlib.SlurmClusterExecutor can be executed in a + serial python process and does not require the python script to be executed with MPI. It is even possible to + execute the executorlib.SlurmClusterExecutor directly in an interactive Jupyter notebook. Args: max_workers (int): for backwards compatibility with the standard library, max_workers also defines the @@ -196,11 +195,11 @@ def __init__( class SlurmJobExecutor(BaseExecutor): """ - The executorlib.Executor leverages either the message passing interface (MPI), the SLURM workload manager or + The executorlib.SlurmJobExecutor leverages either the message passing interface (MPI), the SLURM workload manager or preferable the flux framework for distributing python functions within a given resource allocation. In contrast to - the mpi4py.futures.MPIPoolExecutor the executorlib.Executor can be executed in a serial python process and does not - require the python script to be executed with MPI. It is even possible to execute the executorlib.Executor directly - in an interactive Jupyter notebook. + the mpi4py.futures.MPIPoolExecutor the executorlib.SlurmJobExecutor can be executed in a serial python process and + does not require the python script to be executed with MPI. It is even possible to execute the + executorlib.SlurmJobExecutor directly in an interactive Jupyter notebook. Args: max_workers (int): for backwards compatibility with the standard library, max_workers also defines the number of @@ -241,7 +240,7 @@ class SlurmJobExecutor(BaseExecutor): Examples: ``` >>> import numpy as np - >>> from executorlib.executor.slurm import SlurmJobExecutor + >>> from executorlib import SlurmJobExecutor >>> >>> def calc(i, j, k): >>> from mpi4py import MPI @@ -274,12 +273,11 @@ def __init__( plot_dependency_graph_filename: Optional[str] = None, ): """ - Instead of returning a executorlib.Executor object this function returns either a executorlib.mpi.PyMPIExecutor, - executorlib.slurm.PySlurmExecutor or executorlib.flux.PyFluxExecutor depending on which backend is available. The - executorlib.flux.PyFluxExecutor is the preferred choice while the executorlib.mpi.PyMPIExecutor is primarily used - for development and testing. The executorlib.flux.PyFluxExecutor requires flux-core from the flux-framework to be - installed and in addition flux-sched to enable GPU scheduling. Finally, the executorlib.slurm.PySlurmExecutor - requires the SLURM workload manager to be installed on the system. + The executorlib.SlurmJobExecutor leverages either the message passing interface (MPI), the SLURM workload + manager or preferable the flux framework for distributing python functions within a given resource allocation. + In contrast to the mpi4py.futures.MPIPoolExecutor the executorlib.SlurmJobExecutor can be executed in a serial + python process and does not require the python script to be executed with MPI. It is even possible to execute + the executorlib.SlurmJobExecutor directly in an interactive Jupyter notebook. Args: max_workers (int): for backwards compatibility with the standard library, max_workers also defines the