Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[DataPipe] Skip FullSync operation when world_size == 1 #1065

Closed
wants to merge 7 commits into from
7 changes: 5 additions & 2 deletions test/test_distributed.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,8 @@
if dist.is_nccl_available() and torch.cuda.device_count() > 0:
_backends.append("nccl")


world_size_parametrize = parametrize("world_size", [1, DEFAULT_WORLD_SIZE])
backend_parametrize = parametrize("backend", _backends)


Expand Down Expand Up @@ -149,9 +151,10 @@ def _test_fullsync(rank, world_size, backend, q):

_finalize_distributed_queue(rank, q)

@world_size_parametrize
@backend_parametrize
def test_fullsync(self, backend) -> None:
world_size = DEFAULT_WORLD_SIZE if backend != "nccl" else torch.cuda.device_count()
def test_fullsync(self, world_size, backend) -> None:
world_size = world_size if backend != "nccl" else torch.cuda.device_count()
launch_distributed_training(backend, world_size, fn=DistributedTest._test_fullsync)

@staticmethod
Expand Down
19 changes: 11 additions & 8 deletions torchdata/datapipes/iter/util/distributed.py
Original file line number Diff line number Diff line change
Expand Up @@ -171,13 +171,17 @@ def _callback_fn(self, exp: Expected) -> None:

def __iter__(self) -> Iterator[T_co]:
assert self._executor is None

if not (dist.is_available() and dist.is_initialized()):
raise RuntimeError("Torch Distributed is required to be initialized")
raise RuntimeError("Torch Distributed is required to be initialized to use `FullSync`.")

if self._process_group is None:
self._process_group = dist.new_group(backend="gloo")
self._world_size = dist.get_world_size()

if self._world_size == 1: # The below functionalities are not needed if `_world_size == 1`
yield from self.datapipe
NivekT marked this conversation as resolved.
Show resolved Hide resolved
return

self._executor = _PrefetchExecutor(iter(self.datapipe), 1, self._callback_fn, self.timeout)
while True:
with self._cv:
Expand Down Expand Up @@ -231,13 +235,12 @@ def __setstate__(self, state):
self._sync_counter = torch.tensor([0], dtype=torch.int32)
self._done_callback = False

@final
def pause(self):
raise RuntimeError("`pause` is not supported for FullSync at the moment.")
# if self._executor is not None:
# self._executor.shutdown()
# self._executor = None
if self._world_size > 1 and self._executor is not None:
raise RuntimeError("`pause` is not supported for FullSync at the moment.")

@final
def resume(self):
raise RuntimeError("`resume` is not supported for FullSync at the moment.")
# self._executor = _PrefetchExecutor(iter(self.datapipe), 1, self._callback_fn, self.timeout)
if self._world_size > 1 and self._executor is not None:
raise RuntimeError("`resume` is not supported for FullSync at the moment.")