Skip to content

Commit b659613

Browse files
committed
Pruning kernel Core Tests
Signed-off-by: Fardin Hoque <kfhfar@amazon.com>
1 parent afc47e4 commit b659613

File tree

4 files changed

+5
-18
lines changed

4 files changed

+5
-18
lines changed

tests/kernels/core/test_fused_quant_layernorm.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,6 @@
1515
# Avoid combinatorial explosion with full Cartesian product
1616
NUM_TOKENS_HIDDEN_SIZES = [
1717
*[(1, i) for i in [1, 64, *VEC_HIDDEN_SIZES, 5120, 5137]],
18-
*[(83, i) for i in [1, 1033, 2048, 5120]],
1918
*[(2048, i) for i in [1, 64, *VEC_HIDDEN_SIZES, 5137]],
2019
*[(4096, i) for i in [1, 64, 5137]],
2120
]

tests/kernels/core/test_layernorm.py

Lines changed: 2 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -11,19 +11,7 @@
1111

1212
DTYPES = [torch.half, torch.bfloat16, torch.float]
1313
NUM_TOKENS = [7, 83, 4096] # Arbitrary values for testing
14-
HIDDEN_SIZES = [
15-
8,
16-
768,
17-
769,
18-
770,
19-
771,
20-
5120,
21-
5124,
22-
5125,
23-
5126,
24-
8192,
25-
8199,
26-
] # Arbitrary values for testing
14+
HIDDEN_SIZES = [8, 768, 769, 5120, 5125, 8192] # Arbitrary values for testing
2715
ADD_RESIDUAL = [False, True]
2816
SEEDS = [0]
2917
CUDA_DEVICES = [f"cuda:{i}" for i in range(1 if torch.cuda.device_count() == 1 else 2)]
@@ -118,7 +106,7 @@ def test_poly_norm(
118106
@pytest.mark.parametrize("hidden_size", HIDDEN_SIZES)
119107
@pytest.mark.parametrize("add_residual", ADD_RESIDUAL)
120108
@pytest.mark.parametrize("dtype", DTYPES)
121-
@pytest.mark.parametrize("quant_scale", [1.0, 0.01, 10.0])
109+
@pytest.mark.parametrize("quant_scale", [0.01, 1.0, 10.0])
122110
@pytest.mark.parametrize("seed", SEEDS)
123111
@pytest.mark.parametrize("device", CUDA_DEVICES)
124112
@pytest.mark.parametrize("strided_input", [False, True])

tests/kernels/core/test_permute_cols.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99

1010

1111
@pytest.mark.parametrize("shape", [(1, 512), (544, 4096), (67, 8192)])
12-
@pytest.mark.parametrize("dtype", [torch.bfloat16, torch.float16])
12+
@pytest.mark.parametrize("dtype", [torch.bfloat16])
1313
def test_permute_cols(shape, dtype):
1414
x = torch.randn(shape, dtype=dtype).cuda()
1515
perm = torch.randperm(x.shape[1]).to(torch.int).cuda()

tests/kernels/core/test_pos_encoding.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,8 @@
1212
from vllm.platforms import current_platform
1313

1414
IS_NEOX_STYLE = [True, False]
15-
DTYPES = [torch.half, torch.bfloat16, torch.float]
16-
HEAD_SIZES = [64, 80, 112, 120, 256]
15+
DTYPES = [torch.bfloat16, torch.float]
16+
HEAD_SIZES = [64, 80, 120, 256]
1717
ROTARY_DIMS = [None, 32] # None means rotary dim == head size
1818
NUM_HEADS = [17] # Arbitrary values for testing
1919
BATCH_SIZES = [5] # Arbitrary values for testing

0 commit comments

Comments
 (0)