Skip to content

Commit 67ecf0c

Browse files
committed
enable mm allreduce test
Signed-off-by: Ronald1995 <ronaldautomobile@163.com>
1 parent af04ee9 commit 67ecf0c

File tree

2 files changed

+39
-1
lines changed

2 files changed

+39
-1
lines changed

tests/e2e/multicard/test_external_launcher.py

Lines changed: 37 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,12 +27,13 @@
2727
from unittest.mock import patch
2828

2929
import pytest
30+
import torch_npu
3031

3132
MODELS = ["Qwen/Qwen3-0.6B"]
33+
DEVICE_NAME = torch_npu.npu.get_device_name(0)[:10]
3234

3335

3436
@pytest.mark.parametrize("model", MODELS)
35-
@patch.dict(os.environ, {"ASCEND_RT_VISIBLE_DEVICES": "0,1,2,3"})
3637
def test_external_launcher(model):
3738
script = Path(
3839
__file__
@@ -71,3 +72,38 @@ def test_external_launcher(model):
7172
assert "TP RANKS: [1]" in output
7273
assert "Generated text:" in output
7374
assert proc.returncode == 0
75+
76+
77+
@pytest.mark.skipif(
78+
DEVICE_NAME != "Ascend910B",
79+
reason="This test is only for Ascend910B devices.",
80+
)
81+
@pytest.mark.parametrize("model", MODELS)
82+
@patch.dict(os.environ, {"VLLM_ASCEND_ENABLE_MATMUL_ALLREDUCE": "1"})
83+
def test_mm_allreduce(model):
84+
script = Path(
85+
__file__
86+
).parent.parent.parent.parent / "examples" / "offline_external_launcher.py"
87+
env = os.environ.copy()
88+
cmd = [
89+
sys.executable,
90+
str(script),
91+
"--model",
92+
model,
93+
"--trust-remote-code",
94+
]
95+
96+
print(f"Running subprocess: {' '.join(cmd)}")
97+
proc = subprocess.run(
98+
cmd,
99+
env=env,
100+
stdout=subprocess.PIPE,
101+
stderr=subprocess.STDOUT,
102+
timeout=600,
103+
)
104+
105+
output = proc.stdout.decode()
106+
print(output)
107+
108+
assert "Generated text:" in output
109+
assert proc.returncode == 0

vllm_ascend/patch/worker/patch_common/patch_linear.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@
2525
from vllm.distributed import (get_tensor_model_parallel_rank,
2626
split_tensor_along_last_dim)
2727
from vllm.distributed.parallel_state import get_tp_group
28+
from vllm.logger import logger
2829
from vllm.model_executor.layers.linear import RowParallelLinear
2930

3031
from vllm_ascend import envs
@@ -142,4 +143,5 @@ def calc_output(self, input_parallel: torch.Tensor) -> torch.Tensor:
142143

143144

144145
if envs.VLLM_ASCEND_ENABLE_MATMUL_ALLREDUCE:
146+
logger.info("AscendRowParallelLinear: Matmul all-reduce is enabled. ")
145147
vllm.model_executor.layers.linear.RowParallelLinear = AscendRowParallelLinear

0 commit comments

Comments
 (0)