@@ -42,7 +42,7 @@ concurrency:
4242
4343jobs :
4444 test-singlenpu :
45- name : vLLM Ascend test (self-host )
45+ name : vLLM Ascend test (single-npu )
4646 runs-on : linux-arm64-npu-1 # actionlint-ignore: runner-label
4747 container :
4848 image : quay.io/ascend/cann:8.0.0-910b-ubuntu22.04-py3.10
7272 uses : actions/checkout@v4
7373 with :
7474 repository : vllm-project/vllm
75+ fetch-depth : 0
7576 path : ./vllm-empty
7677
7778 - name : Install vllm-project/vllm from source
@@ -99,7 +100,28 @@ jobs:
99100
100101 pip install /root/.cache/pta/torch_npu-2.5.1.dev20250320-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl
101102
102- - name : Run vllm-project/vllm-ascend test for V0 Engine
103+ - name : Run vllm-project/vllm-ascend test
104+ env :
105+ VLLM_USE_V1 : 0
106+ HF_ENDPOINT : https://hf-mirror.com
107+ run : |
108+ VLLM_USE_V1=0 pytest -sv -m 'not multinpu' tests
109+
110+ - name : Run vllm-project/vllm test for V0 Engine
111+ env :
112+ VLLM_USE_V1 : 0
113+ PYTORCH_NPU_ALLOC_CONF : max_split_size_mb:256
114+ HF_ENDPOINT : https://hf-mirror.com
115+ run : |
116+ pytest -sv
117+
118+ - name : Checkout to vllm 0.8.3
119+ working-directory : ./vllm-empty
120+ run : |
121+ git checkout v0.8.3
122+ VLLM_TARGET_DEVICE=empty pip install -e .
123+
124+ - name : Run vllm-project/vllm-ascend test
103125 env :
104126 VLLM_USE_V1 : 0
105127 HF_ENDPOINT : https://hf-mirror.com
@@ -114,8 +136,8 @@ jobs:
114136 run : |
115137 pytest -sv
116138
117- test2 :
118- name : test on multiple npu runner
139+ test-multinpu :
140+ name : vLLM Ascend test (multi- npu)
119141 runs-on : linux-arm64-npu-4
120142 container :
121143 image : ascendai/cann:8.0.0-910b-ubuntu22.04-py3.10
@@ -162,7 +184,6 @@ jobs:
162184
163185 - name : Install vllm-project/vllm-ascend
164186 run : |
165- pip uninstall -y numpy
166187 pip install -r requirements-dev.txt
167188 pip install -e .
168189
@@ -186,3 +207,16 @@ jobs:
186207 PYTORCH_NPU_ALLOC_CONF : max_split_size_mb:256
187208 run : |
188209 pytest -sv -m multinpu tests/
210+
211+ - name : Checkout to vllm 0.8.3
212+ working-directory : ./vllm-empty
213+ run : |
214+ git checkout v0.8.3
215+ VLLM_TARGET_DEVICE=empty pip install -e .
216+
217+ - name : Run vllm-project/vllm-ascend test
218+ env :
219+ VLLM_USE_V1 : 0
220+ PYTORCH_NPU_ALLOC_CONF : max_split_size_mb:256
221+ run : |
222+ pytest -sv -m multinpu tests/
0 commit comments