Skip to content

Commit 1a521f9

Browse files
committed
Init codecov
Signed-off-by: Yikun Jiang <yikunkero@gmail.com>
1 parent 69b817e commit 1a521f9

File tree

2 files changed

+60
-14
lines changed

2 files changed

+60
-14
lines changed

.github/workflows/vllm_ascend_test.yaml

Lines changed: 33 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -162,7 +162,16 @@ jobs:
162162
TORCH_DEVICE_BACKEND_AUTOLOAD: 0
163163
run: |
164164
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/x86_64-linux/devlib
165-
pytest -sv tests/ut
165+
pytest -sv --cov --cov-report=xml:unittests-coverage.xml tests/ut
166+
167+
- name: Upload coverage to Codecov
168+
uses: codecov/codecov-action@v5
169+
env:
170+
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
171+
with:
172+
flags: unittests
173+
name: vllm-ascend
174+
verbose: true
166175

167176
e2e:
168177
needs: [lint]
@@ -224,19 +233,26 @@ jobs:
224233
VLLM_WORKER_MULTIPROC_METHOD: spawn
225234
VLLM_USE_MODELSCOPE: True
226235
run: |
227-
pytest -sv tests/e2e/singlecard/test_offline_inference.py
228236
# TODO: switch hf to modelscope
229237
VLLM_USE_MODELSCOPE=False HF_ENDPOINT=https://hf-mirror.com \
230-
pytest -sv tests/e2e/singlecard/test_ilama_lora.py
238+
pytest -sv --cov --cov-report=xml:lora-coverage.xml tests/e2e/singlecard/test_ilama_lora.py
231239
# TODO(sss): guided decoding doesn't work, fix it later
232240
# pytest -sv tests/e2e/singlecard/test_guided_decoding.py
233-
pytest -sv tests/e2e/singlecard/test_camem.py
234-
pytest -sv tests/e2e/singlecard/ \
235-
--ignore=tests/e2e/singlecard/test_offline_inference.py \
241+
pytest -sv --cov --cov-report=xml:camem-coverage.xml tests/e2e/singlecard/test_camem.py
242+
pytest -sv --cov --cov-report=xml:single-coverage.xml tests/e2e/singlecard/ \
236243
--ignore=tests/e2e/singlecard/test_ilama_lora.py \
237244
--ignore=tests/e2e/singlecard/test_guided_decoding.py \
238245
--ignore=tests/e2e/singlecard/test_camem.py
239246
247+
- name: Upload coverage to Codecov
248+
uses: codecov/codecov-action@v5
249+
env:
250+
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
251+
with:
252+
flags: integration
253+
name: vllm-ascend
254+
verbose: true
255+
240256
- name: Run e2e test on V0 engine
241257
if: ${{ github.event_name == 'schedule' }}
242258
env:
@@ -323,14 +339,17 @@ jobs:
323339
run: |
324340
# TODO: switch hf to modelscope
325341
VLLM_USE_MODELSCOPE=False HF_ENDPOINT=https://hf-mirror.com \
326-
pytest -sv tests/e2e/multicard/test_ilama_lora_tp2.py
327-
# Fixme: run VLLM_USE_MODELSCOPE=True pytest -sv tests/e2e/multicard/test_offline_inference_distributed.py will raise error.
328-
# To avoid oom, we need to run the test in a single process.
329-
pytest -sv tests/e2e/multicard/test_offline_inference_distributed.py::test_models_distributed_QwQ
330-
pytest -sv tests/e2e/multicard/test_offline_inference_distributed.py::test_models_distributed_DeepSeek
331-
pytest -sv tests/e2e/multicard/test_offline_inference_distributed.py::test_models_distributed_topk
332-
pytest -sv tests/e2e/multicard/test_offline_inference_distributed.py::test_models_distributed_DeepSeek_W8A8
333-
pytest -sv tests/e2e/multicard/ --ignore=tests/e2e/multicard/test_ilama_lora_tp2.py --ignore=tests/e2e/multicard/test_offline_inference_distributed.py
342+
pytest -sv --cov --cov-report=xml:lora-tp-coverage.xml tests/e2e/multicard/test_ilama_lora_tp2.py
343+
pytest -sv --cov --cov-report=xml:multi-coverage.xml tests/e2e/multicard/ --ignore=tests/e2e/multicard/test_ilama_lora_tp2.py
344+
345+
- name: Upload coverage to Codecov
346+
uses: codecov/codecov-action@v5
347+
env:
348+
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
349+
with:
350+
flags: integration
351+
name: vllm-ascend
352+
verbose: true
334353

335354
- name: Run vllm-project/vllm-ascend test on V0 engine
336355
if: ${{ github.event_name == 'schedule' }}

codecov.yml

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
#
2+
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
3+
#
4+
# Licensed under the Apache License, Version 2.0 (the "License");
5+
# you may not use this file except in compliance with the License.
6+
# You may obtain a copy of the License at
7+
#
8+
# http://www.apache.org/licenses/LICENSE-2.0
9+
#
10+
# Unless required by applicable law or agreed to in writing, software
11+
# distributed under the License is distributed on an "AS IS" BASIS,
12+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
# See the License for the specific language governing permissions and
14+
# limitations under the License.
15+
# This file is a part of the vllm-ascend project.
16+
#
17+
coverage:
18+
status:
19+
# new code must be fully tested
20+
patch:
21+
default:
22+
target: 100%
23+
# non-voting
24+
informational: true
25+
flags:
26+
- "unittests"
27+
- "integration"

0 commit comments

Comments
 (0)