Skip to content

Commit 7297fa6

Browse files
authored
Remove unused parts in Megatron-LM code and add copyright notice (#110)
1 parent b7955ef commit 7297fa6

File tree

8 files changed

+23
-297
lines changed

8 files changed

+23
-297
lines changed
Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,10 @@
11
import cacheflow.model_executor.parallel_utils.parallel_state
22
import cacheflow.model_executor.parallel_utils.tensor_parallel
3-
import cacheflow.model_executor.parallel_utils.utils
43

54
# Alias parallel_state as mpu, its legacy name
65
mpu = parallel_state
76

87
__all__ = [
98
"parallel_state",
109
"tensor_parallel",
11-
"utils",
1210
]

cacheflow/model_executor/parallel_utils/parallel_state.py

Lines changed: 2 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,12 @@
1+
# Copyright 2023 The CacheFlow team.
2+
# Adapted from https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/core/parallel_state.py
13
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
24

35
"""Model and data parallel groups."""
46

57
import torch
68
from typing import Optional
79

8-
from .utils import GlobalMemoryBuffer
9-
1010
# Intra-layer model parallel group that the current rank belongs to.
1111
_TENSOR_MODEL_PARALLEL_GROUP = None
1212
# Inter-layer model parallel group that the current rank belongs to.
@@ -44,9 +44,6 @@
4444
# rank when broadcasting weights from src to all other data parallel ranks
4545
_DATA_PARALLEL_GLOBAL_RANKS = None
4646

47-
# Memory buffers to avoid dynamic memory allocation
48-
_GLOBAL_MEMORY_BUFFER = None
49-
5047
_ALL_REDUCE_LAUNCHER: Optional['GraphAllReduce'] = None
5148

5249
def initialize_model_parallel(
@@ -199,13 +196,6 @@ def initialize_model_parallel(
199196
if rank in ranks:
200197
_POSITION_EMBEDDING_GLOBAL_RANKS = position_embedding_ranks
201198

202-
# Initialize global memory buffer
203-
# This isn't really "parallel state" but there isn't another good place to
204-
# put this. If we end up with a more generic initialization of megatron-core
205-
# we could stick it there
206-
_set_global_memory_buffer()
207-
208-
209199
def initialize_all_reduce_launcher(
210200
max_num_tokens: int,
211201
hidden_size: int,
@@ -495,17 +485,6 @@ def get_data_parallel_rank():
495485
"""Return my rank for the data parallel group."""
496486
return torch.distributed.get_rank(group=get_data_parallel_group())
497487

498-
def _set_global_memory_buffer():
499-
"""Initialize global buffer"""
500-
global _GLOBAL_MEMORY_BUFFER
501-
assert _GLOBAL_MEMORY_BUFFER is None, 'global memory buffer is already initialized'
502-
_GLOBAL_MEMORY_BUFFER = GlobalMemoryBuffer()
503-
504-
def get_global_memory_buffer():
505-
"""Return the global GlobalMemoryBuffer object"""
506-
assert _GLOBAL_MEMORY_BUFFER is not None, 'global memory buffer is not initialized'
507-
return _GLOBAL_MEMORY_BUFFER
508-
509488
def get_all_reduce_launcher() -> 'GraphAllReduce':
510489
assert _ALL_REDUCE_LAUNCHER is not None, 'all reduce launcher is not initialized'
511490
return _ALL_REDUCE_LAUNCHER
@@ -536,8 +515,6 @@ def destroy_model_parallel():
536515
_MPU_TENSOR_MODEL_PARALLEL_RANK = None
537516
global _MPU_PIPELINE_MODEL_PARALLEL_RANK
538517
_MPU_PIPELINE_MODEL_PARALLEL_RANK = None
539-
global _GLOBAL_MEMORY_BUFFER
540-
_GLOBAL_MEMORY_BUFFER = None
541518

542519

543520
class GraphAllReduce:

cacheflow/model_executor/parallel_utils/tensor_parallel/__init__.py

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -17,15 +17,12 @@
1717
)
1818

1919
from .random import (
20-
checkpoint,
2120
get_cuda_rng_tracker,
2221
model_parallel_cuda_manual_seed,
2322
)
2423

2524
from .utils import (
2625
split_tensor_along_last_dim,
27-
split_tensor_into_1d_equal_chunks,
28-
gather_split_1d_tensor,
2926
)
3027

3128
__all__ = [
@@ -45,11 +42,8 @@
4542
"scatter_to_tensor_model_parallel_region",
4643
"scatter_to_sequence_parallel_region",
4744
# random.py
48-
"checkpoint",
4945
"get_cuda_rng_tracker",
5046
"model_parallel_cuda_manual_seed",
5147
# utils.py
5248
"split_tensor_along_last_dim",
53-
"split_tensor_into_1d_equal_chunks",
54-
"gather_split_1d_tensor",
5549
]

cacheflow/model_executor/parallel_utils/tensor_parallel/layers.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# Copyright 2023 The CacheFlow team.
2+
# Adapted from https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/core/tensor_parallel/layers.py
13
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
24

35
# Parts of the code here are adapted from PyTorch

cacheflow/model_executor/parallel_utils/tensor_parallel/mappings.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# Copyright 2023 The CacheFlow team.
2+
# Adapted from https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/core/tensor_parallel/mappings.py
13
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
24

35
import torch

cacheflow/model_executor/parallel_utils/tensor_parallel/random.py

Lines changed: 2 additions & 91 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# Copyright 2023 The CacheFlow team.
2+
# Adapted from https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/core/tensor_parallel/random.py
13
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
24

35
# Parts of the code here are adapted from PyTorch
@@ -8,22 +10,11 @@
810
import torch
911
from torch import _C
1012
from torch.cuda import _lazy_call, device as device_ctx_manager
11-
from torch.utils.checkpoint import detach_variable
1213

1314
from cacheflow.model_executor.parallel_utils.parallel_state import (
14-
get_data_parallel_rank,
15-
get_tensor_model_parallel_group,
1615
get_tensor_model_parallel_rank,
17-
get_tensor_model_parallel_world_size,
1816
)
1917

20-
from .utils import (
21-
split_tensor_into_1d_equal_chunks,
22-
gather_split_1d_tensor,
23-
)
24-
25-
from cacheflow.model_executor.parallel_utils.utils import safely_set_viewless_tensor_data
26-
2718
# Default name for the model parallel rng tracker.
2819
_MODEL_PARALLEL_RNG_TRACKER_NAME = 'model-parallel-rng'
2920

@@ -171,83 +162,3 @@ def model_parallel_cuda_manual_seed(seed):
171162
# and model parallel state.
172163
_CUDA_RNG_STATE_TRACKER.add(_MODEL_PARALLEL_RNG_TRACKER_NAME,
173164
tensor_model_parallel_seed)
174-
175-
176-
class CheckpointFunction(torch.autograd.Function):
177-
"""This function is adapted from torch.utils.checkpoint with
178-
two main changes:
179-
1) torch.cuda.set_rng_state is replaced with `_set_cuda_rng_state`
180-
2) the states in the model parallel tracker are also properly
181-
tracked/set/reset.
182-
"""
183-
@staticmethod
184-
def forward(ctx, run_function, distribute_saved_activations, *args):
185-
ctx.run_function = run_function
186-
ctx.distribute_saved_activations \
187-
= distribute_saved_activations
188-
189-
# Copy the rng states.
190-
ctx.fwd_cpu_rng_state = torch.get_rng_state()
191-
ctx.fwd_cuda_rng_state = torch.cuda.get_rng_state()
192-
ctx.fwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states()
193-
194-
with torch.no_grad():
195-
outputs = run_function(*args)
196-
197-
# Divide hidden states across model parallel group and only keep
198-
# the chunk corresponding to the current rank.
199-
if distribute_saved_activations:
200-
ctx.input_0_shape = args[0].data.shape
201-
safely_set_viewless_tensor_data(
202-
args[0],
203-
split_tensor_into_1d_equal_chunks(args[0].data, new_buffer=True))
204-
205-
# Store everything.
206-
ctx.save_for_backward(*args)
207-
208-
return outputs
209-
210-
@staticmethod
211-
def backward(ctx, *args):
212-
if not torch.autograd._is_checkpoint_valid():
213-
raise RuntimeError("Checkpointing is not compatible with .grad(), "
214-
"please use .backward() if possible")
215-
inputs = ctx.saved_tensors
216-
if ctx.distribute_saved_activations:
217-
safely_set_viewless_tensor_data(
218-
inputs[0],
219-
gather_split_1d_tensor(inputs[0].data).view(ctx.input_0_shape))
220-
221-
# Store the current states.
222-
bwd_cpu_rng_state = torch.get_rng_state()
223-
bwd_cuda_rng_state = torch.cuda.get_rng_state()
224-
bwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states()
225-
226-
# Set the states to what it used to be before the forward pass.
227-
torch.set_rng_state(ctx.fwd_cpu_rng_state)
228-
_set_cuda_rng_state(ctx.fwd_cuda_rng_state)
229-
get_cuda_rng_tracker().set_states(ctx.fwd_cuda_rng_state_tracker)
230-
231-
# Compute the forward pass.
232-
detached_inputs = detach_variable(inputs)
233-
with torch.enable_grad():
234-
outputs = ctx.run_function(*detached_inputs)
235-
236-
# Set the states back to what it was at the start of this function.
237-
torch.set_rng_state(bwd_cpu_rng_state)
238-
_set_cuda_rng_state(bwd_cuda_rng_state)
239-
get_cuda_rng_tracker().set_states(bwd_cuda_rng_state_tracker)
240-
241-
if isinstance(outputs, torch.Tensor):
242-
outputs = (outputs,)
243-
torch.autograd.backward(outputs, args)
244-
grads = tuple(inp.grad if isinstance(inp, torch.Tensor) else inp
245-
for inp in detached_inputs)
246-
return (None, None) + grads
247-
248-
249-
def checkpoint(function, distribute_saved_activations, *args):
250-
"""Checkpoint a model or part of the model.
251-
This has been directly copied from torch.utils.checkpoint."""
252-
return CheckpointFunction.apply(function,
253-
distribute_saved_activations, *args)

cacheflow/model_executor/parallel_utils/tensor_parallel/utils.py

Lines changed: 15 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,23 @@
1+
# Copyright 2023 The CacheFlow team.
2+
# Adapted from https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/core/tensor_parallel/utils.py
13
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
24

35
import torch
46
from typing import List, Sequence
57

6-
from cacheflow.model_executor.parallel_utils.utils import divide
7-
from cacheflow.model_executor.parallel_utils import parallel_state
8+
def ensure_divisibility(numerator, denominator):
9+
"""Ensure that numerator is divisible by the denominator."""
10+
assert numerator % denominator == 0, "{} is not divisible by {}".format(
11+
numerator, denominator
12+
)
13+
14+
15+
def divide(numerator, denominator):
16+
"""Ensure that numerator is divisible by the denominator and return
17+
the division value."""
18+
ensure_divisibility(numerator, denominator)
19+
return numerator // denominator
20+
821

922
def split_tensor_along_last_dim(
1023
tensor: torch.Tensor,
@@ -33,57 +46,6 @@ def split_tensor_along_last_dim(
3346

3447
return tensor_list
3548

36-
def split_tensor_into_1d_equal_chunks(tensor, new_buffer=False):
37-
""" Break a tensor into equal 1D chunks across tensor parallel ranks.
38-
39-
Returns a Tensor or View with this rank's portion of the data.
40-
41-
Arguments:
42-
tensor: The tensor to split
43-
44-
Keyword Arguments:
45-
new_buffer (bool): If True, returns a new Tensor.
46-
If False, returns a view into the existing Tensor.
47-
Default is False
48-
49-
"""
50-
partition_size = torch.numel(tensor) // \
51-
parallel_state.get_tensor_model_parallel_world_size()
52-
start_index = partition_size * parallel_state.get_tensor_model_parallel_rank()
53-
end_index = start_index + partition_size
54-
if new_buffer:
55-
data = torch.empty(partition_size, dtype=tensor.dtype,
56-
device=torch.cuda.current_device(),
57-
requires_grad=False)
58-
data.copy_(tensor.view(-1)[start_index:end_index])
59-
else:
60-
data = tensor.view(-1)[start_index:end_index]
61-
return data
62-
63-
64-
def gather_split_1d_tensor(tensor):
65-
""" Opposite of split_tensor_into_1d_equal_chunks. Gather values from tensor
66-
model parallel ranks.
67-
68-
Returns a new Tensor with the gathered data.
69-
70-
Arguments:
71-
tensor: A Tensor or view of this rank's portion of the data.
72-
"""
73-
numel_gathered = torch.numel(tensor) * \
74-
parallel_state.get_tensor_model_parallel_world_size()
75-
gathered = torch.empty(numel_gathered, dtype=tensor.dtype,
76-
device=torch.cuda.current_device(),
77-
requires_grad=False)
78-
# TODO: This API is experimental in pytorch (as of Feb 2022) and
79-
# this might break in future pytorch releases. We chose this API
80-
# as opposed to torch.distributed.all_gather for efficiency reasons.
81-
# This API calls directly NCCL all-gather versus the former does
82-
# internal copies and can potentially cause slow down.
83-
torch.distributed._all_gather_base(gathered, tensor,
84-
group=parallel_state.get_tensor_model_parallel_group())
85-
return gathered
86-
8749

8850
class VocabUtility:
8951
""" Split the vocabulary into `world_size` chunks and return the first

0 commit comments

Comments
 (0)