@@ -162,7 +162,16 @@ jobs:
162162 TORCH_DEVICE_BACKEND_AUTOLOAD : 0
163163 run : |
164164 export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/x86_64-linux/devlib
165- pytest -sv tests/ut
165+ pytest -sv --cov --cov-report=xml:unittests-coverage.xml tests/ut
166+
167+ - name : Upload coverage to Codecov
168+ uses : codecov/codecov-action@v5
169+ env :
170+ CODECOV_TOKEN : ${{ secrets.CODECOV_TOKEN }}
171+ with :
172+ flags : unittests
173+ name : vllm-ascend
174+ verbose : true
166175
167176 e2e :
168177 needs : [lint]
@@ -224,19 +233,26 @@ jobs:
224233 VLLM_WORKER_MULTIPROC_METHOD : spawn
225234 VLLM_USE_MODELSCOPE : True
226235 run : |
227- pytest -sv tests/e2e/singlecard/test_offline_inference.py
228236 # TODO: switch hf to modelscope
229237 VLLM_USE_MODELSCOPE=False HF_ENDPOINT=https://hf-mirror.com \
230- pytest -sv tests/e2e/singlecard/test_ilama_lora.py
238+ pytest -sv --cov --cov-report=xml:lora-coverage.xml tests/e2e/singlecard/test_ilama_lora.py
231239 # TODO(sss): guided decoding doesn't work, fix it later
232240 # pytest -sv tests/e2e/singlecard/test_guided_decoding.py
233- pytest -sv tests/e2e/singlecard/test_camem.py
234- pytest -sv tests/e2e/singlecard/ \
235- --ignore=tests/e2e/singlecard/test_offline_inference.py \
241+ pytest -sv --cov --cov-report=xml:camem-coverage.xml tests/e2e/singlecard/test_camem.py
242+ pytest -sv --cov --cov-report=xml:single-coverage.xml tests/e2e/singlecard/ \
236243 --ignore=tests/e2e/singlecard/test_ilama_lora.py \
237244 --ignore=tests/e2e/singlecard/test_guided_decoding.py \
238245 --ignore=tests/e2e/singlecard/test_camem.py
239246
247+ - name : Upload coverage to Codecov
248+ uses : codecov/codecov-action@v5
249+ env :
250+ CODECOV_TOKEN : ${{ secrets.CODECOV_TOKEN }}
251+ with :
252+ flags : integration
253+ name : vllm-ascend
254+ verbose : true
255+
240256 - name : Run e2e test on V0 engine
241257 if : ${{ github.event_name == 'schedule' }}
242258 env :
@@ -323,14 +339,17 @@ jobs:
323339 run : |
324340 # TODO: switch hf to modelscope
325341 VLLM_USE_MODELSCOPE=False HF_ENDPOINT=https://hf-mirror.com \
326- pytest -sv tests/e2e/multicard/test_ilama_lora_tp2.py
327- # Fixme: run VLLM_USE_MODELSCOPE=True pytest -sv tests/e2e/multicard/test_offline_inference_distributed.py will raise error.
328- # To avoid oom, we need to run the test in a single process.
329- pytest -sv tests/e2e/multicard/test_offline_inference_distributed.py::test_models_distributed_QwQ
330- pytest -sv tests/e2e/multicard/test_offline_inference_distributed.py::test_models_distributed_DeepSeek
331- pytest -sv tests/e2e/multicard/test_offline_inference_distributed.py::test_models_distributed_topk
332- pytest -sv tests/e2e/multicard/test_offline_inference_distributed.py::test_models_distributed_DeepSeek_W8A8
333- pytest -sv tests/e2e/multicard/ --ignore=tests/e2e/multicard/test_ilama_lora_tp2.py --ignore=tests/e2e/multicard/test_offline_inference_distributed.py
342+ pytest -sv --cov --cov-report=xml:lora-tp-coverage.xml tests/e2e/multicard/test_ilama_lora_tp2.py
343+ pytest -sv --cov --cov-report=xml:multi-coverage.xml tests/e2e/multicard/ --ignore=tests/e2e/multicard/test_ilama_lora_tp2.py
344+
345+ - name : Upload coverage to Codecov
346+ uses : codecov/codecov-action@v5
347+ env :
348+ CODECOV_TOKEN : ${{ secrets.CODECOV_TOKEN }}
349+ with :
350+ flags : integration
351+ name : vllm-ascend
352+ verbose : true
334353
335354 - name : Run vllm-project/vllm-ascend test on V0 engine
336355 if : ${{ github.event_name == 'schedule' }}
0 commit comments