Skip to content

Commit

Permalink
[fix] multi-node backward slowdown (#6134)
Browse files Browse the repository at this point in the history
* remove redundant memcpy during backward

* get back record_stream
  • Loading branch information
BurkeHulk authored Nov 14, 2024
1 parent c2fe313 commit cc40fe0
Showing 1 changed file with 3 additions and 3 deletions.
6 changes: 3 additions & 3 deletions colossalai/zero/low_level/bookkeeping/bucket_store.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,13 +78,13 @@ def build_grad_in_bucket(self):
}
"""
for param, padding_size in zip(self._param_list, self._padding_size):
grad = param.grad.clone().detach().flatten()
grad = param.grad.detach().flatten()
if padding_size > 0:
with torch.no_grad():
grad = torch.nn.functional.pad(grad.view(-1), [0, padding_size])
grad_list = grad.split(grad.numel() // self._world_size)
for rank in range(self._world_size):
grad_current_rank = grad_list[rank].clone().detach()
grad_current_rank = grad_list[rank].detach()
self.grad_to_param_mapping[id(grad_current_rank)] = id(param)
self._grad_in_bucket[rank].append(grad_current_rank)
param.grad = None
Expand All @@ -110,7 +110,7 @@ def get_flatten_grad(self) -> Tensor:

flat_grad = []
for grad_list in self._grad_in_bucket.values():
flat_grad.append(_flatten_dense_tensors(grad_list))
flat_grad.extend(grad_list)
flat_grad = _flatten_dense_tensors(flat_grad)
return flat_grad

Expand Down

0 comments on commit cc40fe0

Please sign in to comment.