From 0720aa1ac9c672e8b7a6ede4828e603e62778b86 Mon Sep 17 00:00:00 2001 From: mauriliogenovese Date: Fri, 22 Mar 2024 13:56:24 +0100 Subject: [PATCH 01/11] support for gpu queue --- nipype/pipeline/engine/nodes.py | 4 ++ nipype/pipeline/plugins/multiproc.py | 62 ++++++++++++++++--- .../pipeline/plugins/tests/test_multiproc.py | 15 +++++ 3 files changed, 74 insertions(+), 7 deletions(-) diff --git a/nipype/pipeline/engine/nodes.py b/nipype/pipeline/engine/nodes.py index 3756d00ce8..5afea316c2 100644 --- a/nipype/pipeline/engine/nodes.py +++ b/nipype/pipeline/engine/nodes.py @@ -821,6 +821,10 @@ def update(self, **opts): """Update inputs""" self.inputs.update(**opts) + def is_gpu_node(self): + return ((hasattr(self.inputs, 'use_cuda') and self.inputs.use_cuda) + or (hasattr(self.inputs, 'use_gpu') and self.inputs.use_gpu)) + class JoinNode(Node): """Wraps interface objects that join inputs into a list. diff --git a/nipype/pipeline/plugins/multiproc.py b/nipype/pipeline/plugins/multiproc.py index 401b01b388..8213c6c821 100644 --- a/nipype/pipeline/plugins/multiproc.py +++ b/nipype/pipeline/plugins/multiproc.py @@ -100,6 +100,7 @@ class MultiProcPlugin(DistributedPluginBase): - non_daemon: boolean flag to execute as non-daemon processes - n_procs: maximum number of threads to be executed in parallel + - n_gpu_procs: maximum number of GPU threads to be executed in parallel - memory_gb: maximum memory (in GB) that can be used at once. - raise_insufficient: raise error if the requested resources for a node over the maximum `n_procs` and/or `memory_gb` @@ -130,10 +131,22 @@ def __init__(self, plugin_args=None): ) self.raise_insufficient = self.plugin_args.get("raise_insufficient", True) + # GPU found on system + self.n_gpus_visible = MultiProcPlugin.gpu_count() + # proc per GPU set by user + self.n_gpu_procs = plugin_args.get('n_gpu_procs', self.n_gpus_visible) + + # total no. of processes allowed on all gpus + if self.n_gpu_procs > self.n_gpus_visible: + logger.info( + 'Total number of GPUs proc requested (%d) exceeds the available number of GPUs (%d) on the system. Using requested GPU slots at your own risk!' % ( + self.n_gpu_procs, self.n_gpus_visible)) + # Instantiate different thread pools for non-daemon processes logger.debug( - "[MultiProc] Starting (n_procs=%d, mem_gb=%0.2f, cwd=%s)", + "[MultiProc] Starting (n_procs=%d, n_gpu_procs=%d, mem_gb=%0.2f, cwd=%s)", self.processors, + self.n_gpu_procs, self.memory_gb, self._cwd, ) @@ -184,9 +197,12 @@ def _prerun_check(self, graph): """Check if any node exceeds the available resources""" tasks_mem_gb = [] tasks_num_th = [] + tasks_gpu_th = [] for node in graph.nodes(): tasks_mem_gb.append(node.mem_gb) tasks_num_th.append(node.n_procs) + if node.is_gpu_node(): + tasks_gpu_th.append(node.n_procs) if np.any(np.array(tasks_mem_gb) > self.memory_gb): logger.warning( @@ -203,6 +219,12 @@ def _prerun_check(self, graph): ) if self.raise_insufficient: raise RuntimeError("Insufficient resources available for job") + if np.any(np.array(tasks_gpu_th) > self.n_gpu_procs): + logger.warning( + 'Nodes demand more GPU than allowed (%d).', + self.n_gpu_procs) + if self.raise_insufficient: + raise RuntimeError('Insufficient GPU resources available for job') def _postrun_check(self): self.pool.shutdown() @@ -213,11 +235,14 @@ def _check_resources(self, running_tasks): """ free_memory_gb = self.memory_gb free_processors = self.processors + free_gpu_slots = self.n_gpu_procs for _, jobid in running_tasks: free_memory_gb -= min(self.procs[jobid].mem_gb, free_memory_gb) free_processors -= min(self.procs[jobid].n_procs, free_processors) + if self.procs[jobid].is_gpu_node(): + free_gpu_slots -= min(self.procs[jobid].n_procs, free_gpu_slots) - return free_memory_gb, free_processors + return free_memory_gb, free_processors, free_gpu_slots def _send_procs_to_workers(self, updatehash=False, graph=None): """ @@ -232,7 +257,7 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): ) # Check available resources by summing all threads and memory used - free_memory_gb, free_processors = self._check_resources(self.pending_tasks) + free_memory_gb, free_processors, free_gpu_slots = self._check_resources(self.pending_tasks) stats = ( len(self.pending_tasks), @@ -241,6 +266,8 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): self.memory_gb, free_processors, self.processors, + free_gpu_slots, + self.n_gpu_procs ) if self._stats != stats: tasks_list_msg = "" @@ -256,13 +283,15 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): tasks_list_msg = indent(tasks_list_msg, " " * 21) logger.info( "[MultiProc] Running %d tasks, and %d jobs ready. Free " - "memory (GB): %0.2f/%0.2f, Free processors: %d/%d.%s", + "memory (GB): %0.2f/%0.2f, Free processors: %d/%d, Free GPU slot:%d/%d.%s", len(self.pending_tasks), len(jobids), free_memory_gb, self.memory_gb, free_processors, self.processors, + free_gpu_slots, + self.n_gpu_procs, tasks_list_msg, ) self._stats = stats @@ -304,28 +333,36 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): # Check requirements of this job next_job_gb = min(self.procs[jobid].mem_gb, self.memory_gb) next_job_th = min(self.procs[jobid].n_procs, self.processors) + next_job_gpu_th = min(self.procs[jobid].n_procs, self.n_gpu_procs) + + is_gpu_node = self.procs[jobid].is_gpu_node() # If node does not fit, skip at this moment - if next_job_th > free_processors or next_job_gb > free_memory_gb: + if (next_job_th > free_processors or next_job_gb > free_memory_gb + or (is_gpu_node and next_job_gpu_th > free_gpu_slots)): logger.debug( - "Cannot allocate job %d (%0.2fGB, %d threads).", + "Cannot allocate job %d (%0.2fGB, %d threads, %d GPU slots).", jobid, next_job_gb, next_job_th, + next_job_gpu_th, ) continue free_memory_gb -= next_job_gb free_processors -= next_job_th + if is_gpu_node: + free_gpu_slots -= next_job_gpu_th logger.debug( "Allocating %s ID=%d (%0.2fGB, %d threads). Free: " - "%0.2fGB, %d threads.", + "%0.2fGB, %d threads, %d GPU slots.", self.procs[jobid].fullname, jobid, next_job_gb, next_job_th, free_memory_gb, free_processors, + free_gpu_slots, ) # change job status in appropriate queues @@ -352,6 +389,8 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): self._remove_node_dirs() free_memory_gb += next_job_gb free_processors += next_job_th + if is_gpu_node: + free_gpu_slots -= next_job_gpu_th # Display stats next loop self._stats = None @@ -379,3 +418,12 @@ def _sort_jobs(self, jobids, scheduler="tsort"): key=lambda item: (self.procs[item].mem_gb, self.procs[item].n_procs), ) return jobids + + @staticmethod + def gpu_count(): + n_gpus = 1 + try: + import GPUtil + return len(GPUtil.getGPUs()) + except ImportError: + return n_gpus diff --git a/nipype/pipeline/plugins/tests/test_multiproc.py b/nipype/pipeline/plugins/tests/test_multiproc.py index 938e1aab9e..b954cb9517 100644 --- a/nipype/pipeline/plugins/tests/test_multiproc.py +++ b/nipype/pipeline/plugins/tests/test_multiproc.py @@ -56,6 +56,7 @@ def test_run_multiproc(tmpdir): class InputSpecSingleNode(nib.TraitedSpec): input1 = nib.traits.Int(desc="a random int") input2 = nib.traits.Int(desc="a random int") + use_gpu = nib.traits.Bool(False, mandatory = False, desc="boolean for GPU nodes") class OutputSpecSingleNode(nib.TraitedSpec): @@ -116,6 +117,20 @@ def test_no_more_threads_than_specified(tmpdir): with pytest.raises(RuntimeError): pipe.run(plugin="MultiProc", plugin_args={"n_procs": max_threads}) +def test_no_more_gpu_threads_than_specified(tmpdir): + tmpdir.chdir() + + pipe = pe.Workflow(name="pipe") + n1 = pe.Node(SingleNodeTestInterface(), name="n1", n_procs=2) + n1.inputs.use_gpu = True + n1.inputs.input1 = 4 + pipe.add_nodes([n1]) + + max_threads = 2 + max_gpu = 1 + with pytest.raises(RuntimeError): + pipe.run(plugin="MultiProc", plugin_args={"n_procs": max_threads, 'n_gpu_procs': max_gpu}) + @pytest.mark.skipif( sys.version_info >= (3, 8), reason="multiprocessing issues in Python 3.8" From 6c47dc009aeac77be993d17d5b1fb6ab1fa5d9d2 Mon Sep 17 00:00:00 2001 From: mauriliogenovese Date: Sun, 24 Mar 2024 10:52:44 +0100 Subject: [PATCH 02/11] gputil requirement gputils is required for gpu queue management --- nipype/info.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nipype/info.py b/nipype/info.py index a550e4b389..f4fc365e7e 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -149,6 +149,7 @@ def get_nipype_gitversion(): "filelock>=3.0.0", "etelemetry>=0.2.0", "looseversion!=1.2", + "gputil=1.4.0", ] TESTS_REQUIRES = [ From f1f5d764a3d7452a04c807388b454cf216744e8b Mon Sep 17 00:00:00 2001 From: mauriliogenovese Date: Mon, 25 Mar 2024 07:38:29 +0100 Subject: [PATCH 03/11] Update info.py --- nipype/info.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/info.py b/nipype/info.py index f4fc365e7e..280c641ed6 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -149,7 +149,7 @@ def get_nipype_gitversion(): "filelock>=3.0.0", "etelemetry>=0.2.0", "looseversion!=1.2", - "gputil=1.4.0", + "gputil==1.4.0", ] TESTS_REQUIRES = [ From a6424301d7bd3440fe5f465ba05fde0b38e37aed Mon Sep 17 00:00:00 2001 From: mauriliogenovese Date: Mon, 25 Mar 2024 09:59:41 +0100 Subject: [PATCH 04/11] refactor and fix --- nipype/pipeline/engine/nodes.py | 5 ++-- nipype/pipeline/plugins/multiproc.py | 25 +++++++++++-------- .../pipeline/plugins/tests/test_multiproc.py | 8 ++++-- 3 files changed, 24 insertions(+), 14 deletions(-) diff --git a/nipype/pipeline/engine/nodes.py b/nipype/pipeline/engine/nodes.py index 5afea316c2..d9c066a795 100644 --- a/nipype/pipeline/engine/nodes.py +++ b/nipype/pipeline/engine/nodes.py @@ -822,8 +822,9 @@ def update(self, **opts): self.inputs.update(**opts) def is_gpu_node(self): - return ((hasattr(self.inputs, 'use_cuda') and self.inputs.use_cuda) - or (hasattr(self.inputs, 'use_gpu') and self.inputs.use_gpu)) + return (hasattr(self.inputs, 'use_cuda') and self.inputs.use_cuda) or ( + hasattr(self.inputs, 'use_gpu') and self.inputs.use_gpu + ) class JoinNode(Node): diff --git a/nipype/pipeline/plugins/multiproc.py b/nipype/pipeline/plugins/multiproc.py index 8213c6c821..9aec6ae072 100644 --- a/nipype/pipeline/plugins/multiproc.py +++ b/nipype/pipeline/plugins/multiproc.py @@ -134,13 +134,14 @@ def __init__(self, plugin_args=None): # GPU found on system self.n_gpus_visible = MultiProcPlugin.gpu_count() # proc per GPU set by user - self.n_gpu_procs = plugin_args.get('n_gpu_procs', self.n_gpus_visible) + self.n_gpu_procs = self.plugin_args.get('n_gpu_procs', self.n_gpus_visible) # total no. of processes allowed on all gpus if self.n_gpu_procs > self.n_gpus_visible: logger.info( - 'Total number of GPUs proc requested (%d) exceeds the available number of GPUs (%d) on the system. Using requested GPU slots at your own risk!' % ( - self.n_gpu_procs, self.n_gpus_visible)) + 'Total number of GPUs proc requested (%d) exceeds the available number of GPUs (%d) on the system. Using requested GPU slots at your own risk!' + % (self.n_gpu_procs, self.n_gpus_visible) + ) # Instantiate different thread pools for non-daemon processes logger.debug( @@ -220,9 +221,7 @@ def _prerun_check(self, graph): if self.raise_insufficient: raise RuntimeError("Insufficient resources available for job") if np.any(np.array(tasks_gpu_th) > self.n_gpu_procs): - logger.warning( - 'Nodes demand more GPU than allowed (%d).', - self.n_gpu_procs) + logger.warning('Nodes demand more GPU than allowed (%d).', self.n_gpu_procs) if self.raise_insufficient: raise RuntimeError('Insufficient GPU resources available for job') @@ -257,7 +256,9 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): ) # Check available resources by summing all threads and memory used - free_memory_gb, free_processors, free_gpu_slots = self._check_resources(self.pending_tasks) + free_memory_gb, free_processors, free_gpu_slots = self._check_resources( + self.pending_tasks + ) stats = ( len(self.pending_tasks), @@ -267,7 +268,7 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): free_processors, self.processors, free_gpu_slots, - self.n_gpu_procs + self.n_gpu_procs, ) if self._stats != stats: tasks_list_msg = "" @@ -338,8 +339,11 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): is_gpu_node = self.procs[jobid].is_gpu_node() # If node does not fit, skip at this moment - if (next_job_th > free_processors or next_job_gb > free_memory_gb - or (is_gpu_node and next_job_gpu_th > free_gpu_slots)): + if ( + next_job_th > free_processors + or next_job_gb > free_memory_gb + or (is_gpu_node and next_job_gpu_th > free_gpu_slots) + ): logger.debug( "Cannot allocate job %d (%0.2fGB, %d threads, %d GPU slots).", jobid, @@ -424,6 +428,7 @@ def gpu_count(): n_gpus = 1 try: import GPUtil + return len(GPUtil.getGPUs()) except ImportError: return n_gpus diff --git a/nipype/pipeline/plugins/tests/test_multiproc.py b/nipype/pipeline/plugins/tests/test_multiproc.py index b954cb9517..484c0d07bc 100644 --- a/nipype/pipeline/plugins/tests/test_multiproc.py +++ b/nipype/pipeline/plugins/tests/test_multiproc.py @@ -56,7 +56,7 @@ def test_run_multiproc(tmpdir): class InputSpecSingleNode(nib.TraitedSpec): input1 = nib.traits.Int(desc="a random int") input2 = nib.traits.Int(desc="a random int") - use_gpu = nib.traits.Bool(False, mandatory = False, desc="boolean for GPU nodes") + use_gpu = nib.traits.Bool(False, mandatory=False, desc="boolean for GPU nodes") class OutputSpecSingleNode(nib.TraitedSpec): @@ -117,6 +117,7 @@ def test_no_more_threads_than_specified(tmpdir): with pytest.raises(RuntimeError): pipe.run(plugin="MultiProc", plugin_args={"n_procs": max_threads}) + def test_no_more_gpu_threads_than_specified(tmpdir): tmpdir.chdir() @@ -129,7 +130,10 @@ def test_no_more_gpu_threads_than_specified(tmpdir): max_threads = 2 max_gpu = 1 with pytest.raises(RuntimeError): - pipe.run(plugin="MultiProc", plugin_args={"n_procs": max_threads, 'n_gpu_procs': max_gpu}) + pipe.run( + plugin="MultiProc", + plugin_args={"n_procs": max_threads, 'n_gpu_procs': max_gpu}, + ) @pytest.mark.skipif( From 684b9b0e15618537f78248bf3c8953ad3f4f6eeb Mon Sep 17 00:00:00 2001 From: mauriliogenovese <125388969+mauriliogenovese@users.noreply.github.com> Date: Sat, 18 Jan 2025 16:37:31 +0100 Subject: [PATCH 05/11] removed hard pin --- nipype/info.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/info.py b/nipype/info.py index 280c641ed6..ad45791a50 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -149,7 +149,7 @@ def get_nipype_gitversion(): "filelock>=3.0.0", "etelemetry>=0.2.0", "looseversion!=1.2", - "gputil==1.4.0", + "gputil>=1.4.0", ] TESTS_REQUIRES = [ From 8f74c5dd362e73e28282900297492c9dd7da7ed8 Mon Sep 17 00:00:00 2001 From: mauriliogenovese <125388969+mauriliogenovese@users.noreply.github.com> Date: Sat, 18 Jan 2025 16:42:00 +0100 Subject: [PATCH 06/11] gpu_count refactor --- nipype/pipeline/plugins/multiproc.py | 12 ++---------- nipype/pipeline/plugins/tools.py | 9 +++++++++ 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/nipype/pipeline/plugins/multiproc.py b/nipype/pipeline/plugins/multiproc.py index 9aec6ae072..4d7eaa6c6b 100644 --- a/nipype/pipeline/plugins/multiproc.py +++ b/nipype/pipeline/plugins/multiproc.py @@ -21,6 +21,7 @@ from ...utils.profiler import get_system_total_memory_gb from ..engine import MapNode from .base import DistributedPluginBase +from .tools import gpu_count try: from textwrap import indent @@ -132,7 +133,7 @@ def __init__(self, plugin_args=None): self.raise_insufficient = self.plugin_args.get("raise_insufficient", True) # GPU found on system - self.n_gpus_visible = MultiProcPlugin.gpu_count() + self.n_gpus_visible = gpu_count() # proc per GPU set by user self.n_gpu_procs = self.plugin_args.get('n_gpu_procs', self.n_gpus_visible) @@ -423,12 +424,3 @@ def _sort_jobs(self, jobids, scheduler="tsort"): ) return jobids - @staticmethod - def gpu_count(): - n_gpus = 1 - try: - import GPUtil - - return len(GPUtil.getGPUs()) - except ImportError: - return n_gpus diff --git a/nipype/pipeline/plugins/tools.py b/nipype/pipeline/plugins/tools.py index 8c28f36246..c767be398e 100644 --- a/nipype/pipeline/plugins/tools.py +++ b/nipype/pipeline/plugins/tools.py @@ -178,3 +178,12 @@ def create_pyscript(node, updatehash=False, store_exception=True): with open(pyscript, "w") as fp: fp.writelines(cmdstr) return pyscript + +def gpu_count(): + n_gpus = 1 + try: + import GPUtil + except ImportError: + return 1 + else: + return len(GPUtil.getGPUs()) From a307845390115b1b65659159c662334b39575ff5 Mon Sep 17 00:00:00 2001 From: mauriliogenovese <125388969+mauriliogenovese@users.noreply.github.com> Date: Sat, 18 Jan 2025 16:45:24 +0100 Subject: [PATCH 07/11] more readable --- nipype/pipeline/engine/nodes.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/nipype/pipeline/engine/nodes.py b/nipype/pipeline/engine/nodes.py index d9c066a795..f9036529ac 100644 --- a/nipype/pipeline/engine/nodes.py +++ b/nipype/pipeline/engine/nodes.py @@ -822,9 +822,8 @@ def update(self, **opts): self.inputs.update(**opts) def is_gpu_node(self): - return (hasattr(self.inputs, 'use_cuda') and self.inputs.use_cuda) or ( - hasattr(self.inputs, 'use_gpu') and self.inputs.use_gpu - ) + return bool(getattr(self.inputs, 'use_cuda', False)) or bool( + getattr(self.inputs, 'use_gpu', False)) class JoinNode(Node): From 27448bcfb2ea34029f561dff5efd275f31609096 Mon Sep 17 00:00:00 2001 From: mauriliogenovese <125388969+mauriliogenovese@users.noreply.github.com> Date: Sat, 18 Jan 2025 16:46:21 +0100 Subject: [PATCH 08/11] logger argument --- nipype/pipeline/plugins/multiproc.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/nipype/pipeline/plugins/multiproc.py b/nipype/pipeline/plugins/multiproc.py index 4d7eaa6c6b..e1aa07d13b 100644 --- a/nipype/pipeline/plugins/multiproc.py +++ b/nipype/pipeline/plugins/multiproc.py @@ -140,9 +140,8 @@ def __init__(self, plugin_args=None): # total no. of processes allowed on all gpus if self.n_gpu_procs > self.n_gpus_visible: logger.info( - 'Total number of GPUs proc requested (%d) exceeds the available number of GPUs (%d) on the system. Using requested GPU slots at your own risk!' - % (self.n_gpu_procs, self.n_gpus_visible) - ) + 'Total number of GPUs proc requested (%d) exceeds the available number of GPUs (%d) on the system. Using requested GPU slots at your own risk!', + self.n_gpu_procs, self.n_gpus_visible) # Instantiate different thread pools for non-daemon processes logger.debug( From 2c2c066d846f12738c3ddd57af0e5ca3dc97df31 Mon Sep 17 00:00:00 2001 From: mauriliogenovese <125388969+mauriliogenovese@users.noreply.github.com> Date: Sat, 18 Jan 2025 18:52:56 +0100 Subject: [PATCH 09/11] code refactory --- nipype/pipeline/engine/nodes.py | 3 ++- nipype/pipeline/plugins/multiproc.py | 9 +++++---- nipype/pipeline/plugins/tools.py | 3 ++- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/nipype/pipeline/engine/nodes.py b/nipype/pipeline/engine/nodes.py index f9036529ac..2d524c3efe 100644 --- a/nipype/pipeline/engine/nodes.py +++ b/nipype/pipeline/engine/nodes.py @@ -823,7 +823,8 @@ def update(self, **opts): def is_gpu_node(self): return bool(getattr(self.inputs, 'use_cuda', False)) or bool( - getattr(self.inputs, 'use_gpu', False)) + getattr(self.inputs, 'use_gpu', False) + ) class JoinNode(Node): diff --git a/nipype/pipeline/plugins/multiproc.py b/nipype/pipeline/plugins/multiproc.py index e1aa07d13b..054d0150e6 100644 --- a/nipype/pipeline/plugins/multiproc.py +++ b/nipype/pipeline/plugins/multiproc.py @@ -141,7 +141,9 @@ def __init__(self, plugin_args=None): if self.n_gpu_procs > self.n_gpus_visible: logger.info( 'Total number of GPUs proc requested (%d) exceeds the available number of GPUs (%d) on the system. Using requested GPU slots at your own risk!', - self.n_gpu_procs, self.n_gpus_visible) + self.n_gpu_procs, + self.n_gpus_visible, + ) # Instantiate different thread pools for non-daemon processes logger.debug( @@ -394,7 +396,7 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): free_memory_gb += next_job_gb free_processors += next_job_th if is_gpu_node: - free_gpu_slots -= next_job_gpu_th + free_gpu_slots += next_job_gpu_th # Display stats next loop self._stats = None @@ -421,5 +423,4 @@ def _sort_jobs(self, jobids, scheduler="tsort"): jobids, key=lambda item: (self.procs[item].mem_gb, self.procs[item].n_procs), ) - return jobids - + return jobids \ No newline at end of file diff --git a/nipype/pipeline/plugins/tools.py b/nipype/pipeline/plugins/tools.py index c767be398e..eb50d4b40c 100644 --- a/nipype/pipeline/plugins/tools.py +++ b/nipype/pipeline/plugins/tools.py @@ -179,6 +179,7 @@ def create_pyscript(node, updatehash=False, store_exception=True): fp.writelines(cmdstr) return pyscript + def gpu_count(): n_gpus = 1 try: @@ -186,4 +187,4 @@ def gpu_count(): except ImportError: return 1 else: - return len(GPUtil.getGPUs()) + return len(GPUtil.getGPUs()) \ No newline at end of file From 66d628022155fbb2dcbacc798abd5e0242727804 Mon Sep 17 00:00:00 2001 From: mauriliogenovese <125388969+mauriliogenovese@users.noreply.github.com> Date: Sat, 18 Jan 2025 18:56:10 +0100 Subject: [PATCH 10/11] newlines for style check --- nipype/pipeline/plugins/multiproc.py | 2 +- nipype/pipeline/plugins/tools.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/nipype/pipeline/plugins/multiproc.py b/nipype/pipeline/plugins/multiproc.py index 054d0150e6..ce191b0f7c 100644 --- a/nipype/pipeline/plugins/multiproc.py +++ b/nipype/pipeline/plugins/multiproc.py @@ -423,4 +423,4 @@ def _sort_jobs(self, jobids, scheduler="tsort"): jobids, key=lambda item: (self.procs[item].mem_gb, self.procs[item].n_procs), ) - return jobids \ No newline at end of file + return jobids diff --git a/nipype/pipeline/plugins/tools.py b/nipype/pipeline/plugins/tools.py index 43e50c276b..dabd75116c 100644 --- a/nipype/pipeline/plugins/tools.py +++ b/nipype/pipeline/plugins/tools.py @@ -185,4 +185,5 @@ def gpu_count(): except ImportError: return 1 else: - return len(GPUtil.getGPUs()) \ No newline at end of file + return len(GPUtil.getGPUs()) + \ No newline at end of file From 610f1cbe33490ef5e6f4d2e43037655594a64f18 Mon Sep 17 00:00:00 2001 From: mauriliogenovese <125388969+mauriliogenovese@users.noreply.github.com> Date: Sun, 19 Jan 2025 10:25:09 +0100 Subject: [PATCH 11/11] newline for code check --- nipype/pipeline/plugins/tools.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nipype/pipeline/plugins/tools.py b/nipype/pipeline/plugins/tools.py index dabd75116c..3d879a1971 100644 --- a/nipype/pipeline/plugins/tools.py +++ b/nipype/pipeline/plugins/tools.py @@ -186,4 +186,3 @@ def gpu_count(): return 1 else: return len(GPUtil.getGPUs()) - \ No newline at end of file