Skip to content

Commit d0feea3

Browse files
authored
[Kernel] optimize performance of gptq marlin kernel when n is small (#14138)
Signed-off-by: Jinzhen Lin <linjinzhen@hotmail.com>
1 parent 58abe35 commit d0feea3

File tree

6 files changed

+99
-24
lines changed

6 files changed

+99
-24
lines changed

csrc/quantization/gptq_marlin/gptq_marlin.cu

Lines changed: 46 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -538,6 +538,7 @@ __global__ void Marlin(
538538
int prob_n, // output dimension n
539539
int prob_k, // reduction dimension k
540540
int* locks, // extra global storage for barrier synchronization
541+
bool use_atomic_add, // whether to use atomic add to reduce
541542
bool use_fp32_reduce // whether to use fp32 global reduce
542543
) {
543544
// Each threadblock processes one "stripe" of the B matrix with (roughly) the
@@ -1542,7 +1543,17 @@ __global__ void Marlin(
15421543
i < div_ceil(16 * thread_m_blocks, threads / (2 * thread_n_blocks));
15431544
i++) {
15441545
if (c_gl_wr < c_gl_wr_end) {
1545-
C[c_gl_wr] = sh_red[c_sh_rd];
1546+
if (use_atomic_add && slice_count > 1) {
1547+
scalar_t2* C_half2 = reinterpret_cast<scalar_t2*>(&C[c_gl_wr]);
1548+
scalar_t2* sh_red_half2 =
1549+
reinterpret_cast<scalar_t2*>(&sh_red[c_sh_rd]);
1550+
#pragma unroll
1551+
for (int a = 0; a < 4; a++) {
1552+
atomicAdd(&C_half2[a], sh_red_half2[a]);
1553+
}
1554+
} else {
1555+
C[c_gl_wr] = sh_red[c_sh_rd];
1556+
}
15461557
c_gl_wr += c_gl_wr_delta;
15471558
c_sh_rd += c_sh_rd_delta;
15481559
}
@@ -1644,7 +1655,7 @@ __global__ void Marlin(
16441655
}
16451656
cp_async_fence();
16461657
} else {
1647-
if (last) {
1658+
if (last || use_atomic_add) {
16481659
if (s_sh_wr_pred) {
16491660
cp_async4(&sh_s[s_sh_wr], &scales_ptr[s_gl_rd]);
16501661
}
@@ -1664,7 +1675,7 @@ __global__ void Marlin(
16641675
}
16651676

16661677
} else {
1667-
if (last) {
1678+
if (last || use_atomic_add) {
16681679
cp_async_wait<0>();
16691680
__syncthreads();
16701681
if (threadIdx.x / 32 < thread_n_blocks / 4) {
@@ -1703,8 +1714,8 @@ __global__ void Marlin(
17031714
}
17041715
}
17051716

1706-
if (slice_count > 1) { // only globally reduce if there is more than one
1707-
// block in a slice
1717+
if (slice_count > 1 && !use_atomic_add) {
1718+
// only globally reduce if there is more than one block in a slice
17081719
barrier_acquire(&locks[slice_col], slice_idx);
17091720
if (use_fp32_reduce) {
17101721
global_reduce_fp32(slice_idx == 0, last);
@@ -1713,7 +1724,8 @@ __global__ void Marlin(
17131724
}
17141725
barrier_release(&locks[slice_col], last);
17151726
}
1716-
if (last) // only the last block in a slice actually writes the result
1727+
if (last || use_atomic_add)
1728+
// only the last block in a slice actuallywrites the result
17171729
write_result();
17181730
slice_row = 0;
17191731
slice_col_par++;
@@ -1768,7 +1780,8 @@ __global__ void Marlin(
17681780
HAS_ZP, GROUP_BLOCKS, IS_ZP_FLOAT> \
17691781
<<<blocks, NUM_THREADS, max_shared_mem, stream>>>( \
17701782
A_ptr, B_ptr, C_ptr, C_tmp_ptr, s_ptr, zp_ptr, g_idx_ptr, \
1771-
num_groups, prob_m, prob_n, prob_k, locks, use_fp32_reduce); \
1783+
num_groups, prob_m, prob_n, prob_k, locks, use_atomic_add, \
1784+
use_fp32_reduce); \
17721785
} \
17731786
}
17741787

@@ -2062,7 +2075,8 @@ void marlin_mm(const void* A, const void* B, void* C, void* C_tmp, void* s,
20622075
vllm::ScalarType const& q_type, bool has_act_order,
20632076
bool is_k_full, bool has_zp, int num_groups, int group_size,
20642077
int dev, cudaStream_t stream, int thread_k, int thread_n,
2065-
int sms, int max_par, bool use_fp32_reduce, bool is_zp_float) {
2078+
int sms, int max_par, bool use_atomic_add, bool use_fp32_reduce,
2079+
bool is_zp_float) {
20662080
if (has_zp) {
20672081
TORCH_CHECK(
20682082
q_type == vllm::kU4 || q_type == vllm::kU8,
@@ -2243,7 +2257,7 @@ torch::Tensor gptq_marlin_gemm(torch::Tensor& a, torch::Tensor& b_q_weight,
22432257
torch::Tensor& workspace,
22442258
vllm::ScalarTypeId const& b_q_type_id,
22452259
int64_t size_m, int64_t size_n, int64_t size_k,
2246-
bool is_k_full, bool has_zp,
2260+
bool is_k_full, bool has_zp, bool use_atomic_add,
22472261
bool use_fp32_reduce, bool is_zp_float) {
22482262
vllm::ScalarType const b_q_type = vllm::ScalarType::from_id(b_q_type_id);
22492263
if (has_zp) {
@@ -2306,19 +2320,34 @@ torch::Tensor gptq_marlin_gemm(torch::Tensor& a, torch::Tensor& b_q_weight,
23062320
// Alloc buffers
23072321
const at::cuda::OptionalCUDAGuard device_guard(device_of(a));
23082322
auto options = torch::TensorOptions().dtype(a.dtype()).device(a.device());
2309-
torch::Tensor c = torch::empty({size_m, size_n}, options);
2310-
torch::Tensor a_tmp = torch::empty({size_m, size_k}, options);
2323+
torch::Tensor c;
2324+
if (use_atomic_add) {
2325+
c = torch::zeros({size_m, size_n}, options);
2326+
} else {
2327+
c = torch::empty({size_m, size_n}, options);
2328+
}
2329+
2330+
torch::Tensor a_tmp;
2331+
bool has_act_order = g_idx.size(0) != 0;
2332+
if (has_act_order) {
2333+
a_tmp = torch::empty({size_m, size_k}, options);
2334+
} else {
2335+
a_tmp = torch::empty({0}, options);
2336+
}
23112337

23122338
// Alloc C tmp buffer that is going to be used for the global reduce
2339+
torch::Tensor c_tmp;
23132340
int reduce_max_m = marlin::determine_reduce_max_m(size_m, marlin::max_par);
23142341
int reduce_n = size_n;
23152342
auto options_fp32 =
23162343
torch::TensorOptions().dtype(at::kFloat).device(a.device());
2317-
if (!use_fp32_reduce) {
2344+
if (use_fp32_reduce) {
2345+
c_tmp = torch::empty({reduce_max_m, reduce_n}, options_fp32);
2346+
} else {
23182347
reduce_max_m = 0;
23192348
reduce_n = 0;
2349+
c_tmp = torch::empty({0}, options_fp32);
23202350
}
2321-
torch::Tensor c_tmp = torch::empty({reduce_max_m, reduce_n}, options_fp32);
23222351

23232352
// thread_k: `k` size of a thread_tile in `weights` (can usually be left as
23242353
// auto -1)
@@ -2339,7 +2368,6 @@ torch::Tensor gptq_marlin_gemm(torch::Tensor& a, torch::Tensor& b_q_weight,
23392368
// Detect groupsize and act_order
23402369
int num_groups = -1;
23412370
int group_size = -1;
2342-
bool has_act_order = g_idx.size(0) != 0;
23432371

23442372
int rank = b_scales.sizes().size();
23452373
TORCH_CHECK(rank == 2, "b_scales rank = ", rank, " is not 2");
@@ -2407,7 +2435,8 @@ torch::Tensor gptq_marlin_gemm(torch::Tensor& a, torch::Tensor& b_q_weight,
24072435
a_tmp.data_ptr<at::Half>(), size_m, size_n, size_k,
24082436
workspace.data_ptr(), b_q_type, has_act_order, is_k_full, has_zp,
24092437
num_groups, group_size, dev, at::cuda::getCurrentCUDAStream(dev),
2410-
thread_k, thread_n, sms, marlin::max_par, use_fp32_reduce, is_zp_float);
2438+
thread_k, thread_n, sms, marlin::max_par, use_atomic_add,
2439+
use_fp32_reduce, is_zp_float);
24112440
} else if (a.scalar_type() == at::ScalarType::BFloat16) {
24122441
marlin::marlin_mm<nv_bfloat16>(
24132442
a.data_ptr<at::BFloat16>(), b_q_weight.data_ptr(),
@@ -2416,7 +2445,8 @@ torch::Tensor gptq_marlin_gemm(torch::Tensor& a, torch::Tensor& b_q_weight,
24162445
perm.data_ptr(), a_tmp.data_ptr<at::BFloat16>(), size_m, size_n, size_k,
24172446
workspace.data_ptr(), b_q_type, has_act_order, is_k_full, has_zp,
24182447
num_groups, group_size, dev, at::cuda::getCurrentCUDAStream(dev),
2419-
thread_k, thread_n, sms, marlin::max_par, use_fp32_reduce, is_zp_float);
2448+
thread_k, thread_n, sms, marlin::max_par, use_atomic_add,
2449+
use_fp32_reduce, is_zp_float);
24202450
} else {
24212451
TORCH_CHECK(false, "gpt_marlin_gemm only supports bfloat16 and float16");
24222452
}

csrc/torch_bindings.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -272,7 +272,8 @@ TORCH_LIBRARY_EXPAND(TORCH_EXTENSION_NAME, ops) {
272272
"Tensor b_zeros, Tensor g_idx, Tensor perm, Tensor workspace, "
273273
"int b_q_type, "
274274
"SymInt size_m, SymInt size_n, SymInt size_k, bool is_k_full, "
275-
"bool has_zp, bool use_fp32_reduce, bool is_zp_float) -> Tensor",
275+
"bool has_zp, bool use_atomic_add, bool use_fp32_reduce, "
276+
"bool is_zp_float) -> Tensor",
276277
{stride_tag});
277278
// conditionally compiled so impl registration is in source file
278279

tests/kernels/test_marlin_gemm.py

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@
3434

3535
ACT_ORDER_OPTS = [False, True]
3636
K_FULL_OPTS = [False, True]
37+
USE_ATOMIC_ADD_OPTS = [False, True]
3738
USE_FP32_REDUCE_OPTS = [False, True]
3839

3940
MARLIN_K_CHUNKS = [128]
@@ -194,6 +195,7 @@ def test_awq_marlin_repack(k_chunk, n_chunk, quant_type, group_size,
194195
@pytest.mark.parametrize("mnk_factors", MNK_FACTORS)
195196
@pytest.mark.parametrize("act_order", ACT_ORDER_OPTS)
196197
@pytest.mark.parametrize("is_k_full", K_FULL_OPTS)
198+
@pytest.mark.parametrize("use_atomic_add", USE_ATOMIC_ADD_OPTS)
197199
@pytest.mark.parametrize("use_fp32_reduce", USE_FP32_REDUCE_OPTS)
198200
def test_gptq_marlin_gemm(
199201
k_chunk,
@@ -203,6 +205,7 @@ def test_gptq_marlin_gemm(
203205
mnk_factors,
204206
act_order,
205207
is_k_full,
208+
use_atomic_add,
206209
use_fp32_reduce,
207210
):
208211
m_factor, n_factor, k_factor = mnk_factors
@@ -228,12 +231,12 @@ def test_gptq_marlin_gemm(
228231
workspace = MarlinWorkspace(size_n, GPTQ_MARLIN_MIN_THREAD_N,
229232
GPTQ_MARLIN_MAX_PARALLEL)
230233

231-
opcheck(
232-
torch.ops._C.gptq_marlin_gemm,
233-
(a_input, marlin_q_w, marlin_s, marlin_zp, g_idx, sort_indices,
234-
workspace.scratch, quant_type.id, a_input.shape[0], b_weight.shape[1],
235-
a_input.shape[1], is_k_full, False, use_fp32_reduce, False),
236-
test_utils=DEFAULT_OPCHECK_TEST_UTILS)
234+
opcheck(torch.ops._C.gptq_marlin_gemm,
235+
(a_input, marlin_q_w, marlin_s, marlin_zp, g_idx, sort_indices,
236+
workspace.scratch, quant_type.id, a_input.shape[0],
237+
b_weight.shape[1], a_input.shape[1], is_k_full, False,
238+
use_atomic_add, use_fp32_reduce, False),
239+
test_utils=DEFAULT_OPCHECK_TEST_UTILS)
237240

238241
output = ops.gptq_marlin_gemm(
239242
a_input,
@@ -249,6 +252,7 @@ def test_gptq_marlin_gemm(
249252
a_input.shape[1],
250253
is_k_full=is_k_full,
251254
has_zp=False,
255+
use_atomic_add=use_atomic_add,
252256
use_fp32_reduce=use_fp32_reduce,
253257
is_zp_float=False,
254258
)

vllm/_custom_ops.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -301,6 +301,7 @@ def _gptq_marlin_gemm_fake(a: torch.Tensor,
301301
size_k: torch.SymInt,
302302
is_k_full: bool,
303303
has_zp: bool = False,
304+
use_atomic_add: bool = False,
304305
use_fp32_reduce: bool = False,
305306
is_zp_float: bool = False) -> torch.Tensor:
306307
return torch.empty((size_m, size_n), device=a.device, dtype=a.dtype)
@@ -713,12 +714,14 @@ def gptq_marlin_gemm(a: torch.Tensor,
713714
size_k: int,
714715
is_k_full: bool,
715716
has_zp: bool = False,
717+
use_atomic_add: bool = False,
716718
use_fp32_reduce: bool = False,
717719
is_zp_float: bool = False) -> torch.Tensor:
718720
return torch.ops._C.gptq_marlin_gemm(a, b_q_weight, b_scales, b_zeros,
719721
g_idx, perm, workspace, b_q_type.id,
720722
size_m, size_n, size_k, is_k_full,
721-
has_zp, use_fp32_reduce, is_zp_float)
723+
has_zp, use_atomic_add,
724+
use_fp32_reduce, is_zp_float)
722725

723726

724727
# fp8 marlin

vllm/envs.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -95,6 +95,7 @@
9595
VLLM_DP_SIZE: int = 1
9696
VLLM_DP_MASTER_IP: str = ""
9797
VLLM_DP_MASTER_PORT: int = 0
98+
VLLM_MARLIN_USE_ATOMIC_ADD: bool = False
9899

99100

100101
def get_default_cache_root():
@@ -630,6 +631,10 @@ def maybe_convert_int(value: Optional[str]) -> Optional[int]:
630631
# Whether to use S3 path for model loading in CI via RunAI Streamer
631632
"VLLM_CI_USE_S3":
632633
lambda: os.environ.get("VLLM_CI_USE_S3", "0") == "1",
634+
635+
# Whether to use atomicAdd reduce in gptq/awq marlin kernel.
636+
"VLLM_MARLIN_USE_ATOMIC_ADD":
637+
lambda: os.environ.get("VLLM_MARLIN_USE_ATOMIC_ADD", "0") == "1",
633638
}
634639

635640
# end-env-vars-definition

vllm/model_executor/layers/quantization/utils/marlin_utils.py

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
import numpy
66
import torch
77

8+
import vllm.envs as envs
89
from vllm import _custom_ops as ops
910
from vllm.model_executor.layers.linear import LinearBase
1011
from vllm.platforms import current_platform
@@ -290,6 +291,23 @@ def moe_awq_to_marlin_zero_points(q_zp_packed: torch.Tensor, size_k: int,
290291
return output
291292

292293

294+
def should_use_atomic_add_reduce(m: int, n: int, k: int, device: torch.device,
295+
dtype: torch.dtype) -> bool:
296+
# disable atomicAdd reduce by default,
297+
# one can enable it with VLLM_MARLIN_USE_ATOMIC_ADD=1
298+
if not envs.VLLM_MARLIN_USE_ATOMIC_ADD or device.type != "cuda":
299+
return False
300+
301+
# sm8x doesn't support atomicAdd + bfloat16 natively
302+
device_capability = torch.cuda.get_device_capability(device)
303+
if device_capability[0] < 9 and dtype == torch.bfloat16:
304+
return False
305+
306+
# the performance of atomicAdd is better than global reduce
307+
# only when m*n is small and k is large
308+
return max(m, 64) * n < 64 * 2048 and k >= 2048
309+
310+
293311
def apply_gptq_marlin_linear(
294312
input: torch.Tensor,
295313
weight: torch.Tensor,
@@ -307,6 +325,12 @@ def apply_gptq_marlin_linear(
307325
reshaped_x = input.reshape(-1, input.shape[-1])
308326
out_shape = input.shape[:-1] + (output_size_per_partition, )
309327

328+
use_atomic_add = should_use_atomic_add_reduce(m=reshaped_x.size(0),
329+
n=output_size_per_partition,
330+
k=reshaped_x.size(1),
331+
device=input.device,
332+
dtype=input.dtype)
333+
310334
output = ops.gptq_marlin_gemm(reshaped_x,
311335
weight,
312336
weight_scale,
@@ -320,6 +344,7 @@ def apply_gptq_marlin_linear(
320344
size_k=input_size_per_partition,
321345
is_k_full=is_k_full,
322346
has_zp=False,
347+
use_atomic_add=use_atomic_add,
323348
use_fp32_reduce=use_fp32_reduce,
324349
is_zp_float=False)
325350

@@ -345,6 +370,12 @@ def apply_awq_marlin_linear(
345370
reshaped_x = input.reshape(-1, input.shape[-1])
346371
out_shape = input.shape[:-1] + (output_size_per_partition, )
347372

373+
use_atomic_add = should_use_atomic_add_reduce(m=reshaped_x.size(0),
374+
n=output_size_per_partition,
375+
k=reshaped_x.size(1),
376+
device=input.device,
377+
dtype=input.dtype)
378+
348379
output = ops.gptq_marlin_gemm(reshaped_x,
349380
weight,
350381
weight_scale,
@@ -358,6 +389,7 @@ def apply_awq_marlin_linear(
358389
size_k=input_size_per_partition,
359390
is_k_full=True,
360391
has_zp=True,
392+
use_atomic_add=use_atomic_add,
361393
use_fp32_reduce=use_fp32_reduce,
362394
is_zp_float=False)
363395

0 commit comments

Comments
 (0)