@@ -151,12 +151,7 @@ def create_executor(
151151 backend : str = "local" ,
152152 max_cores : int = 1 ,
153153 cache_directory : Optional [str ] = None ,
154- cores_per_worker : int = 1 ,
155- threads_per_core : int = 1 ,
156- gpus_per_worker : int = 0 ,
157- cwd : Optional [str ] = None ,
158- openmpi_oversubscribe : bool = False ,
159- slurm_cmd_args : list [str ] = [],
154+ resource_dict : Optional [dict ] = None ,
160155 flux_executor = None ,
161156 flux_executor_pmi_mode : Optional [str ] = None ,
162157 flux_executor_nesting : bool = False ,
@@ -179,12 +174,14 @@ def create_executor(
179174 backend (str): Switch between the different backends "flux", "local" or "slurm". The default is "local".
180175 max_cores (int): defines the number cores which can be used in parallel
181176 cache_directory (str, optional): The directory to store cache files. Defaults to "cache".
182- cores_per_worker (int): number of MPI cores to be used for each function call
183- threads_per_core (int): number of OpenMP threads to be used for each function call
184- gpus_per_worker (int): number of GPUs per worker - defaults to 0
185- cwd (str/None): current working directory where the parallel python task is executed
186- openmpi_oversubscribe (bool): adds the `--oversubscribe` command line flag (OpenMPI and SLURM only) - default False
187- slurm_cmd_args (list): Additional command line arguments for the srun call (SLURM only)
177+ resource_dict (dict): A dictionary of resources required by the task. With the following keys:
178+ - cores_per_worker (int): number of MPI cores to be used for each function call
179+ - threads_per_core (int): number of OpenMP threads to be used for each function call
180+ - gpus_per_worker (int): number of GPUs per worker - defaults to 0
181+ - cwd (str/None): current working directory where the parallel python task is executed
182+ - openmpi_oversubscribe (bool): adds the `--oversubscribe` command line flag (OpenMPI and
183+ SLURM only) - default False
184+ - slurm_cmd_args (list): Additional command line arguments for the srun call (SLURM only)
188185 flux_executor (flux.job.FluxExecutor): Flux Python interface to submit the workers to flux
189186 flux_executor_pmi_mode (str): PMI interface to use (OpenMPI v5 requires pmix) default is None (Flux only)
190187 flux_executor_nesting (bool): Provide hierarchically nested Flux job scheduler inside the submitted function.
@@ -205,70 +202,69 @@ def create_executor(
205202 if flux_executor is not None and backend != "flux" :
206203 backend = "flux"
207204 check_pmi (backend = backend , pmi = flux_executor_pmi_mode )
208- executor_kwargs = {
209- "cores" : cores_per_worker ,
210- "hostname_localhost" : hostname_localhost ,
211- "cwd" : cwd ,
212- "cache_directory" : cache_directory ,
213- }
205+ cores_per_worker = resource_dict ["cores" ]
206+ resource_dict ["cache_directory" ] = cache_directory
207+ resource_dict ["hostname_localhost" ] = hostname_localhost
214208 if backend == "flux" :
215- check_oversubscribe (oversubscribe = openmpi_oversubscribe )
216- check_command_line_argument_lst (command_line_argument_lst = slurm_cmd_args )
217- executor_kwargs ["threads_per_core" ] = threads_per_core
218- executor_kwargs ["gpus_per_core" ] = int (gpus_per_worker / cores_per_worker )
219- executor_kwargs ["flux_executor" ] = flux_executor
220- executor_kwargs ["flux_executor_pmi_mode" ] = flux_executor_pmi_mode
221- executor_kwargs ["flux_executor_nesting" ] = flux_executor_nesting
209+ check_oversubscribe (oversubscribe = resource_dict ["openmpi_oversubscribe" ])
210+ check_command_line_argument_lst (
211+ command_line_argument_lst = resource_dict ["slurm_cmd_args" ]
212+ )
213+ del resource_dict ["openmpi_oversubscribe" ]
214+ del resource_dict ["slurm_cmd_args" ]
215+ resource_dict ["flux_executor" ] = flux_executor
216+ resource_dict ["flux_executor_pmi_mode" ] = flux_executor_pmi_mode
217+ resource_dict ["flux_executor_nesting" ] = flux_executor_nesting
222218 if block_allocation :
223- executor_kwargs ["init_function" ] = init_function
219+ resource_dict ["init_function" ] = init_function
224220 return InteractiveExecutor (
225221 max_workers = int (max_cores / cores_per_worker ),
226- executor_kwargs = executor_kwargs ,
222+ executor_kwargs = resource_dict ,
227223 spawner = FluxPythonSpawner ,
228224 )
229225 else :
230226 return InteractiveStepExecutor (
231227 max_cores = max_cores ,
232- executor_kwargs = executor_kwargs ,
228+ executor_kwargs = resource_dict ,
233229 spawner = FluxPythonSpawner ,
234230 )
235231 elif backend == "slurm" :
236232 check_executor (executor = flux_executor )
237233 check_nested_flux_executor (nested_flux_executor = flux_executor_nesting )
238- executor_kwargs ["threads_per_core" ] = threads_per_core
239- executor_kwargs ["gpus_per_core" ] = int (gpus_per_worker / cores_per_worker )
240- executor_kwargs ["slurm_cmd_args" ] = slurm_cmd_args
241- executor_kwargs ["openmpi_oversubscribe" ] = openmpi_oversubscribe
242234 if block_allocation :
243- executor_kwargs ["init_function" ] = init_function
235+ resource_dict ["init_function" ] = init_function
244236 return InteractiveExecutor (
245237 max_workers = int (max_cores / cores_per_worker ),
246- executor_kwargs = executor_kwargs ,
238+ executor_kwargs = resource_dict ,
247239 spawner = SrunSpawner ,
248240 )
249241 else :
250242 return InteractiveStepExecutor (
251243 max_cores = max_cores ,
252- executor_kwargs = executor_kwargs ,
244+ executor_kwargs = resource_dict ,
253245 spawner = SrunSpawner ,
254246 )
255247 else : # backend="local"
256- check_threads_per_core (threads_per_core = threads_per_core )
257- check_gpus_per_worker (gpus_per_worker = gpus_per_worker )
258- check_command_line_argument_lst (command_line_argument_lst = slurm_cmd_args )
259248 check_executor (executor = flux_executor )
260249 check_nested_flux_executor (nested_flux_executor = flux_executor_nesting )
261- executor_kwargs ["openmpi_oversubscribe" ] = openmpi_oversubscribe
250+ check_threads_per_core (threads_per_core = resource_dict ["threads_per_core" ])
251+ check_gpus_per_worker (gpus_per_worker = resource_dict ["gpus_per_core" ])
252+ check_command_line_argument_lst (
253+ command_line_argument_lst = resource_dict ["slurm_cmd_args" ]
254+ )
255+ del resource_dict ["threads_per_core" ]
256+ del resource_dict ["gpus_per_core" ]
257+ del resource_dict ["slurm_cmd_args" ]
262258 if block_allocation :
263- executor_kwargs ["init_function" ] = init_function
259+ resource_dict ["init_function" ] = init_function
264260 return InteractiveExecutor (
265261 max_workers = int (max_cores / cores_per_worker ),
266- executor_kwargs = executor_kwargs ,
262+ executor_kwargs = resource_dict ,
267263 spawner = MpiExecSpawner ,
268264 )
269265 else :
270266 return InteractiveStepExecutor (
271267 max_cores = max_cores ,
272- executor_kwargs = executor_kwargs ,
268+ executor_kwargs = resource_dict ,
273269 spawner = MpiExecSpawner ,
274270 )
0 commit comments