Skip to content

Commit b3caeb8

Browse files
[ROCm][AITER] Enable fp8 kv cache on rocm aiter backend. (#20295)
Signed-off-by: fsx950223 <fsx950223@outlook.com> Signed-off-by: amd-ruitang3 <Rui.Tang2@amd.com> Co-authored-by: amd-ruitang3 <Rui.Tang2@amd.com>
1 parent eab2f39 commit b3caeb8

File tree

2 files changed

+320
-96
lines changed

2 files changed

+320
-96
lines changed
Lines changed: 191 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,191 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3+
4+
from typing import Optional
5+
6+
import pytest
7+
import torch
8+
9+
import vllm.v1.attention.backends.rocm_aiter_fa # noqa: F401
10+
from vllm.platforms import current_platform
11+
12+
NUM_HEADS = [(4, 4), (8, 2), (16, 2)]
13+
HEAD_SIZES = [128, 256]
14+
BLOCK_SIZES = [16, 32]
15+
DTYPES = [torch.float16, torch.bfloat16]
16+
QDTYPES = [None]
17+
# one value large enough to test overflow in index calculation.
18+
# one value small enough to test the schema op check
19+
NUM_BLOCKS = [32768, 2048]
20+
21+
22+
def ref_paged_attn(
23+
query: torch.Tensor,
24+
key_cache: torch.Tensor,
25+
value_cache: torch.Tensor,
26+
query_lens: list[int],
27+
kv_lens: list[int],
28+
block_tables: torch.Tensor,
29+
scale: float,
30+
sliding_window: Optional[int] = None,
31+
soft_cap: Optional[float] = None,
32+
) -> torch.Tensor:
33+
num_seqs = len(query_lens)
34+
block_tables = block_tables.cpu().numpy()
35+
_, block_size, num_kv_heads, head_size = key_cache.shape
36+
37+
outputs: list[torch.Tensor] = []
38+
start_idx = 0
39+
for i in range(num_seqs):
40+
query_len = query_lens[i]
41+
kv_len = kv_lens[i]
42+
q = query[start_idx:start_idx + query_len]
43+
q *= scale
44+
45+
num_kv_blocks = (kv_len + block_size - 1) // block_size
46+
block_indices = block_tables[i, :num_kv_blocks]
47+
48+
k = key_cache[block_indices].view(-1, num_kv_heads, head_size)
49+
k = k[:kv_len]
50+
v = value_cache[block_indices].view(-1, num_kv_heads, head_size)
51+
v = v[:kv_len]
52+
53+
if q.shape[1] != k.shape[1]:
54+
k = torch.repeat_interleave(k, q.shape[1] // k.shape[1], dim=1)
55+
v = torch.repeat_interleave(v, q.shape[1] // v.shape[1], dim=1)
56+
attn = torch.einsum("qhd,khd->hqk", q, k).float()
57+
empty_mask = torch.ones(query_len, kv_len)
58+
mask = torch.triu(empty_mask, diagonal=kv_len - query_len + 1).bool()
59+
if sliding_window is not None:
60+
sliding_window_mask = torch.triu(empty_mask,
61+
diagonal=kv_len -
62+
(query_len + sliding_window) +
63+
1).bool().logical_not()
64+
mask |= sliding_window_mask
65+
if soft_cap is not None:
66+
attn = soft_cap * torch.tanh(attn / soft_cap)
67+
attn.masked_fill_(mask, float("-inf"))
68+
attn = torch.softmax(attn, dim=-1).to(v.dtype)
69+
out = torch.einsum("hqk,khd->qhd", attn, v)
70+
71+
outputs.append(out)
72+
start_idx += query_len
73+
74+
return torch.cat(outputs, dim=0)
75+
76+
77+
@pytest.mark.skipif(not current_platform.is_rocm(),
78+
reason="Only ROCm is supported")
79+
@pytest.mark.parametrize("seq_lens",
80+
[[(10, 1328), (5, 18),
81+
(129, 463)], [(8, 523), (24, 37), (3, 2011)]])
82+
@pytest.mark.parametrize("num_heads", NUM_HEADS)
83+
@pytest.mark.parametrize("head_size", HEAD_SIZES)
84+
@pytest.mark.parametrize("block_size", BLOCK_SIZES)
85+
@pytest.mark.parametrize("sliding_window", [None, 256])
86+
@pytest.mark.parametrize("dtype", DTYPES)
87+
@pytest.mark.parametrize("soft_cap", [None])
88+
@pytest.mark.parametrize("num_blocks", NUM_BLOCKS)
89+
@pytest.mark.parametrize("q_dtype", QDTYPES)
90+
@torch.inference_mode()
91+
def test_varlen_with_paged_kv(
92+
seq_lens: list[tuple[int, int]],
93+
num_heads: tuple[int, int],
94+
head_size: int,
95+
sliding_window: Optional[int],
96+
dtype: torch.dtype,
97+
block_size: int,
98+
soft_cap: Optional[float],
99+
num_blocks: int,
100+
q_dtype: Optional[torch.dtype],
101+
) -> None:
102+
torch.set_default_device("cuda")
103+
current_platform.seed_everything(0)
104+
num_seqs = len(seq_lens)
105+
query_lens = [x[0] for x in seq_lens]
106+
kv_lens = [x[1] for x in seq_lens]
107+
num_query_heads = num_heads[0]
108+
num_kv_heads = num_heads[1]
109+
assert num_query_heads % num_kv_heads == 0
110+
max_query_len = max(query_lens)
111+
max_kv_len = max(kv_lens)
112+
window_size = ((sliding_window - 1, 0) if sliding_window is not None else
113+
(-1, -1))
114+
scale = head_size**-0.5
115+
116+
query = torch.randn(sum(query_lens),
117+
num_query_heads,
118+
head_size,
119+
dtype=dtype)
120+
key_cache = torch.randn(num_blocks,
121+
block_size,
122+
num_kv_heads,
123+
head_size,
124+
dtype=dtype)
125+
value_cache = torch.randn_like(key_cache)
126+
cu_query_lens = torch.tensor([0] + query_lens,
127+
dtype=torch.int32).cumsum(dim=0,
128+
dtype=torch.int32)
129+
130+
cu_seq_lens = torch.tensor([0] + kv_lens,
131+
dtype=torch.int32).cumsum(dim=0,
132+
dtype=torch.int32)
133+
kv_lens = torch.tensor(kv_lens, dtype=torch.int32)
134+
135+
max_num_blocks_per_seq = (max_kv_len + block_size - 1) // block_size
136+
block_tables = torch.randint(0,
137+
num_blocks,
138+
(num_seqs, max_num_blocks_per_seq),
139+
dtype=torch.int32)
140+
141+
output = torch.empty_like(query)
142+
143+
maybe_quantized_query = query
144+
maybe_quantized_key_cache = key_cache
145+
maybe_quantized_value_cache = value_cache
146+
k_descale = None
147+
v_descale = None
148+
if q_dtype is not None:
149+
# QKV are drawn from N(0, 1): no need for a fp8 scaling factor
150+
maybe_quantized_query = query.to(q_dtype)
151+
maybe_quantized_key_cache = key_cache.to(q_dtype)
152+
maybe_quantized_value_cache = value_cache.to(q_dtype)
153+
154+
scale_shape = (num_seqs, num_kv_heads)
155+
k_descale = torch.ones(scale_shape, dtype=torch.float32)
156+
v_descale = torch.ones(scale_shape, dtype=torch.float32)
157+
158+
torch.ops.vllm.flash_attn_varlen_func(
159+
maybe_quantized_query,
160+
maybe_quantized_key_cache,
161+
maybe_quantized_value_cache,
162+
out=output,
163+
cu_seqlens_q=cu_query_lens,
164+
max_seqlen_q=max_query_len,
165+
max_seqlen_k=max_kv_len,
166+
softmax_scale=scale,
167+
alibi_slopes=None,
168+
window_size=window_size,
169+
block_table=block_tables,
170+
cu_seqlens_k=cu_seq_lens,
171+
k_scale=k_descale,
172+
v_scale=v_descale,
173+
)
174+
175+
ref_output = ref_paged_attn(
176+
query=query,
177+
key_cache=key_cache,
178+
value_cache=value_cache,
179+
query_lens=query_lens,
180+
kv_lens=kv_lens,
181+
block_tables=block_tables,
182+
scale=scale,
183+
sliding_window=sliding_window,
184+
soft_cap=soft_cap,
185+
)
186+
187+
atol, rtol = 2e-2, 2e-2
188+
if q_dtype is not None:
189+
atol, rtol = 1.5e-1, 1.5e-1
190+
torch.testing.assert_close(output, ref_output, atol=atol, rtol=rtol), \
191+
f"{torch.max(torch.abs(output - ref_output))}"

0 commit comments

Comments
 (0)