Skip to content

Commit c9d5b6d

Browse files
authored
Replace FlashAttention with xformers (#70)
1 parent 189ae23 commit c9d5b6d

File tree

13 files changed

+87
-131
lines changed

13 files changed

+87
-131
lines changed

README.md

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -3,11 +3,7 @@
33
## Installation
44

55
```bash
6-
pip install psutil numpy ray torch
7-
pip install git+https://github.com/huggingface/transformers # Required for LLaMA.
8-
pip install sentencepiece # Required for LlamaTokenizer.
9-
pip install ninja # To parallelize the compilation of flash-attn.
10-
pip install flash-attn # This may take up to 10 mins.
6+
pip install ninja psutil numpy sentencepiece ray torch transformers xformers
117
pip install -e .
128
```
139

cacheflow/master/server.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -213,7 +213,7 @@ def add_server_arguments(parser: argparse.ArgumentParser):
213213
parser.add_argument('--use-np-cache', action='store_true',
214214
help='save a numpy copy of model weights for faster loading')
215215
parser.add_argument('--use-dummy-weights', action='store_true', help='use dummy values for model weights')
216-
# NOTE(woosuk): FlashAttention does not support float32.
216+
# TODO(woosuk): Support FP32 for debugging.
217217
parser.add_argument('--dtype', type=str, default='default', choices=['default', 'half', 'bfloat16'],
218218
help=('data type for model weights and activations. '
219219
'The "default" option will use FP16 precision '

cacheflow/models/attention.py

Lines changed: 16 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
from typing import Optional
22

3-
from flash_attn.flash_attn_interface import _flash_attn_forward
43
import torch
54
import torch.nn as nn
5+
from xformers import ops as xops
66

77
from cacheflow import attention_ops
88
from cacheflow import cache_ops
@@ -15,39 +15,29 @@ class GPTCacheFlowAttention(nn.Module):
1515
def __init__(self, scale: float) -> None:
1616
super().__init__()
1717
self.scale = float(scale)
18+
self.attn_op = xops.fmha.cutlass.FwOp()
1819

1920
def multi_query_kv_attention(
2021
self,
2122
output: torch.Tensor, # [num_prompt_tokens, num_heads, head_size]
2223
query: torch.Tensor, # [num_prompt_tokens, num_heads, head_size]
2324
key: torch.Tensor, # [num_prompt_tokens, num_heads, head_size]
2425
value: torch.Tensor, # [num_prompt_tokens, num_heads, head_size]
25-
cumulative_prompt_lens: torch.Tensor, # [num_prompts + 1]
26-
max_prompt_len: int,
26+
attn_bias: xops.AttentionBias,
2727
) -> None:
28-
if query.dtype == torch.float:
29-
raise ValueError('The float data type is not supported by '
30-
'FlashAttention. Use the half data type instead.')
31-
head_size = query.shape[-1]
32-
if head_size > 128:
33-
raise ValueError('FlashAttention does not support head_size > 128.')
34-
35-
# Directly call FlashAttention's internal function to avoid allocating
36-
# a new tensor for the output.
37-
_flash_attn_forward(
38-
query,
39-
key,
40-
value,
41-
output,
42-
cumulative_prompt_lens,
43-
cumulative_prompt_lens,
44-
max_prompt_len,
45-
max_prompt_len,
46-
dropout_p=0.0,
47-
softmax_scale=self.scale,
48-
causal=True,
49-
return_softmax=False,
28+
# TODO(woosuk): The unsqueeze op may incur some CPU overhead. Optimize.
29+
out = xops.memory_efficient_attention_forward(
30+
query.unsqueeze(0),
31+
key.unsqueeze(0),
32+
value.unsqueeze(0),
33+
attn_bias=attn_bias,
34+
p=0.0,
35+
scale=self.scale,
36+
op=self.attn_op,
5037
)
38+
# TODO(woosuk): Unnecessary copy. Optimize.
39+
output.copy_(out.squeeze(0))
40+
return output
5141

5242
def single_query_cached_kv_attention(
5343
self,
@@ -109,8 +99,7 @@ def forward(
10999
query[:num_prompt_tokens],
110100
key[:num_prompt_tokens],
111101
value[:num_prompt_tokens],
112-
input_metadata.cumulative_prompt_lens,
113-
input_metadata.max_prompt_len,
102+
input_metadata.attn_bias,
114103
)
115104

116105
# Wait until the cache op is done.
@@ -143,13 +132,6 @@ def forward(
143132
return output.view(-1, num_heads * head_size)
144133

145134

146-
class OPTCacheFlowAttention(GPTCacheFlowAttention):
147-
"""OPT uses the same attention mechanism as GPT."""
148-
149-
def __init__(self, scale: float) -> None:
150-
super().__init__(scale)
151-
152-
153135
class GPTNeoXCacheFlowAttention(GPTCacheFlowAttention):
154136
"""Attention with GPT-NeoX style rotary embedding."""
155137

@@ -207,7 +189,3 @@ def forward(
207189
input_metadata,
208190
cache_event,
209191
)
210-
211-
212-
class LlamaCacheFlowAttention(GPTNeoXCacheFlowAttention):
213-
"""LLaMA uses the GPT-NeoX style rotary embedding."""

cacheflow/models/input_metadata.py

Lines changed: 9 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
from typing import List, Dict, Tuple
22

33
import torch
4+
from xformers.ops.fmha.attn_bias import BlockDiagonalCausalMask
45

56
from cacheflow.sampling_params import SamplingParams
67

@@ -12,7 +13,6 @@ def __init__(
1213
seq_groups: List[Tuple[List[int], SamplingParams]],
1314
seq_logprobs: Dict[int, float], # Seq id -> cumulative logprobs.
1415
prompt_lens: List[int],
15-
cumulative_prompt_lens: torch.Tensor,
1616
slot_mapping: torch.Tensor,
1717
context_lens: torch.Tensor,
1818
max_context_len: int,
@@ -21,15 +21,14 @@ def __init__(
2121
self.seq_groups = seq_groups
2222
self.seq_logprobs = seq_logprobs
2323
self.prompt_lens = prompt_lens
24-
self.cumulative_prompt_lens = cumulative_prompt_lens
2524
self.slot_mapping = slot_mapping
2625
self.context_lens = context_lens
2726
self.max_context_len = max_context_len
2827
self.block_tables = block_tables
2928

29+
self.attn_bias = BlockDiagonalCausalMask.from_seqlens(prompt_lens)
3030
self.num_prompts = len(prompt_lens)
3131
self.num_prompt_tokens = sum(prompt_lens)
32-
self.max_prompt_len = max(prompt_lens) if prompt_lens else 0
3332
self.num_generation_tokens = context_lens.shape[0]
3433
self.num_valid_tokens = slot_mapping.shape[0]
3534
if block_tables.numel() > 0:
@@ -41,15 +40,13 @@ def __init__(
4140

4241
def __repr__(self) -> str:
4342
return (f'InputMetadata('
44-
f'num_prompts={self.num_prompts}, '
45-
f'num_prompt_tokens={self.num_prompt_tokens}, '
46-
f'max_prompt_len={self.max_prompt_len}, '
47-
f'num_generation_tokens={self.num_generation_tokens}, '
4843
f'num_valid_tokens={self.num_valid_tokens}, '
49-
f'max_num_blocks_per_seq={self.max_num_blocks_per_seq}, '
50-
f'max_context_len={self.max_context_len}), '
44+
f'num_prompt_tokens={self.num_prompt_tokens}, '
45+
f'num_prompts={self.num_prompts}, '
5146
f'prompt_lens={self.prompt_lens}, '
52-
f'cumulative_prompt_lens={self.cumulative_prompt_lens}, '
53-
f'slot_mapping={self.slot_mapping}, '
47+
f'num_generation_tokens={self.num_generation_tokens}, '
5448
f'context_lens={self.context_lens}, '
55-
f'block_tables={self.block_tables})')
49+
f'max_context_len={self.max_context_len}), '
50+
f'max_num_blocks_per_seq={self.max_num_blocks_per_seq}, '
51+
f'block_tables={self.block_tables}), '
52+
f'slot_mapping={self.slot_mapping}')

cacheflow/models/llama.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77

88
from cacheflow.models import InputMetadata
99
from cacheflow.models.activation import SiluAndMul
10-
from cacheflow.models.attention import LlamaCacheFlowAttention
10+
from cacheflow.models.attention import GPTNeoXCacheFlowAttention
1111
from cacheflow.models.layernorm import RMSNorm
1212
from cacheflow.models.sample import Sampler
1313
from cacheflow.models.utils import (hf_model_weights_iterator,
@@ -79,7 +79,7 @@ def __init__(
7979
input_is_parallel=True,
8080
perform_initialization=False,
8181
)
82-
self.attn = LlamaCacheFlowAttention(self.scaling, self.head_dim)
82+
self.attn = GPTNeoXCacheFlowAttention(self.scaling, self.head_dim)
8383

8484
def forward(
8585
self,

cacheflow/models/memory_analyzer.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -202,8 +202,8 @@ def get_max_act_size(
202202
# estimating
203203
# 1) the maximum activation tensor size during inference
204204
# 2) the residual tensor size during inference
205-
# Here, we assume that FlashAttention is used and
206-
# thus the attention maps are never materialized in GPU DRAM.
205+
# Here, we assume that we use memory-efficient attention which
206+
# does not materialize the attention maps in GPU DRAM.
207207
residual = max_num_batched_tokens * self.hidden_size
208208
qkv = 3 * (max_num_batched_tokens * self.hidden_size) // self.tensor_parallel_size
209209
ffn = max_num_batched_tokens * self.ffn_size // self.tensor_parallel_size
@@ -277,8 +277,8 @@ def get_max_act_size(
277277
# estimating
278278
# 1) the maximum activation tensor size during inference
279279
# 2) the residual tensor size during inference
280-
# Here, we assume that FlashAttention is used and
281-
# thus the attention maps are never materialized in GPU DRAM.
280+
# Here, we assume that we use memory-efficient attention which
281+
# does not materialize the attention maps in GPU DRAM.
282282
residual = max_num_batched_tokens * self.hidden_size
283283
qkv = 3 * (max_num_batched_tokens * self.hidden_size) // self.tensor_parallel_size
284284
ffn = 2 * (max_num_batched_tokens * self.ffn_size) // self.tensor_parallel_size
@@ -353,8 +353,8 @@ def get_max_act_size(
353353
# estimating
354354
# 1) the maximum activation tensor size during inference
355355
# 2) the residual tensor size during inference
356-
# Here, we assume that FlashAttention is used and
357-
# thus the attention maps are never materialized in GPU DRAM.
356+
# Here, we assume that we use memory-efficient attention which
357+
# does not materialize the attention maps in GPU DRAM.
358358
residual = max_num_batched_tokens * self.hidden_size
359359
qkv = 3 * (max_num_batched_tokens * self.hidden_size) // self.tensor_parallel_size
360360
ffn = 2 * (max_num_batched_tokens * self.ffn_size) // self.tensor_parallel_size

cacheflow/models/opt.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
from transformers import OPTConfig
77

88
from cacheflow.models import InputMetadata
9-
from cacheflow.models.attention import OPTCacheFlowAttention
9+
from cacheflow.models.attention import GPTCacheFlowAttention
1010
from cacheflow.models.sample import Sampler
1111
from cacheflow.models.utils import (hf_model_weights_iterator,
1212
load_tensor_parallel_weights)
@@ -55,7 +55,7 @@ def __init__(
5555
self.out_proj = RowParallelLinear(embed_dim, embed_dim, bias=bias,
5656
input_is_parallel=True,
5757
perform_initialization=False)
58-
self.attn = OPTCacheFlowAttention(scale=self.scaling)
58+
self.attn = GPTCacheFlowAttention(scale=self.scaling)
5959

6060
def forward(
6161
self,

cacheflow/worker/worker.py

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -136,11 +136,6 @@ def prepare_inputs(
136136
slot = block_number * self.block_size + block_offset
137137
slot_mapping.append(slot)
138138

139-
cumulative_prompt_lens: List[int] = [0]
140-
for prompt_len in prompt_lens:
141-
cumulative_prompt_lens.append(
142-
cumulative_prompt_lens[-1] + prompt_len)
143-
144139
# Add generation tokens.
145140
max_context_len = 0
146141
max_num_blocks_per_seq = 0
@@ -196,14 +191,11 @@ def prepare_inputs(
196191
for block_table in generation_block_tables]
197192
block_tables_tensor = torch.tensor(
198193
padded_block_tables, dtype=torch.int, device='cuda')
199-
cumulative_prompt_lens_tensor = torch.tensor(
200-
cumulative_prompt_lens, dtype=torch.int, device='cuda')
201194

202195
input_metadata = InputMetadata(
203196
seq_groups=seq_groups,
204197
seq_logprobs=seq_logprobs,
205198
prompt_lens=prompt_lens,
206-
cumulative_prompt_lens=cumulative_prompt_lens_tensor,
207199
slot_mapping=slot_mapping_tensor,
208200
context_lens=context_lens_tensor,
209201
max_context_len=max_context_len,

tests/kernels/activation.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ def test_silu_and_mul(
2323

2424

2525
if __name__ == '__main__':
26-
for dtype in [torch.half, torch.float]:
26+
for dtype in [torch.half, torch.bfloat16, torch.float]:
2727
for num_tokens in [7, 83, 2048]:
2828
for d in [512, 4096, 13824]:
2929
print(f'Testing dtype={dtype}, num_tokens={num_tokens}, d={d}')

0 commit comments

Comments
 (0)