Skip to content

Commit 9434f24

Browse files
authored
[TEST]Add initial multi modal cases for nightly test and deepseek-r1 tests (#3631)
### What this PR does / why we need it? This PR adds the initial multi modal model for nightly test, including 3 cases for Qwen2.5-vl-7b acc/perf test on A3, we need test them daily. It also inclues 8 cases for deepseek-r1-0528-w8a8 func, acc and perf tests ### Does this PR introduce _any_ user-facing change? No ### How was this patch tested? by running the test - vLLM version: v0.11.0rc3 - vLLM main: https://github.com/vllm-project/vllm/commit/v0.11.0 --------- Signed-off-by: jiangyunfan1 <jiangyunfan1@h-partners.com>
1 parent 427b17e commit 9434f24

File tree

4 files changed

+192
-25
lines changed

4 files changed

+192
-25
lines changed

.github/workflows/vllm_ascend_test_nightly.yaml

Lines changed: 23 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ on:
2626
branches:
2727
- 'main'
2828
- '*-dev'
29-
types: [labeled]
29+
types: [labeled,opened,synchronize]
3030

3131
# Bash shells do not use ~/.profile or ~/.bashrc so these shells need to be explicitly
3232
# declared as "shell: bash -el {0}" on steps that need to be properly activated.
@@ -80,10 +80,7 @@ jobs:
8080
if: contains(github.event.pull_request.labels.*.name, 'run-nightly')
8181
strategy:
8282
matrix:
83-
# should add A3 chip runner when available
8483
os: [ linux-aarch64-a3-16 ]
85-
# Note (yikun): If CI resource are limited we can split job into two chain jobs
86-
# only trigger e2e test after lint passed and the change is e2e related with pull request.
8784
uses: ./.github/workflows/_e2e_nightly.yaml
8885
with:
8986
vllm: v0.11.0
@@ -94,15 +91,32 @@ jobs:
9491
if: contains(github.event.pull_request.labels.*.name, 'run-nightly')
9592
strategy:
9693
matrix:
97-
# should add A3 chip runner when available
9894
os: [ linux-aarch64-a3-16 ]
99-
# Note (yikun): If CI resource are limited we can split job into two chain jobs
100-
# only trigger e2e test after lint passed and the change is e2e related with pull request.
10195
uses: ./.github/workflows/_e2e_nightly.yaml
10296
with:
10397
vllm: v0.11.0
10498
runner: ${{ matrix.os }}
10599
image: swr.cn-southwest-2.myhuaweicloud.com/base_image/ascend-ci/cann:8.2.rc1-a3-ubuntu22.04-py3.11
106100
tests: tests/e2e/nightly/models/test_deepseek_r1_w8a8_eplb.py
107-
108-
101+
qwen2-5-vl-7b:
102+
if: contains(github.event.pull_request.labels.*.name, 'run-nightly')
103+
strategy:
104+
matrix:
105+
os: [ linux-aarch64-a3-4 ]
106+
uses: ./.github/workflows/_e2e_nightly.yaml
107+
with:
108+
vllm: v0.11.0
109+
runner: ${{ matrix.os }}
110+
image: swr.cn-southwest-2.myhuaweicloud.com/base_image/ascend-ci/cann:8.2.rc1-a3-ubuntu22.04-py3.11
111+
tests: tests/e2e/nightly/models/test_qwen2_5_vl_7b.py
112+
deepseek-r1-0528-w8a8:
113+
if: contains(github.event.pull_request.labels.*.name, 'run-nightly')
114+
strategy:
115+
matrix:
116+
os: [ linux-aarch64-a3-16 ]
117+
uses: ./.github/workflows/_e2e_nightly.yaml
118+
with:
119+
vllm: v0.11.0
120+
runner: ${{ matrix.os }}
121+
image: swr.cn-southwest-2.myhuaweicloud.com/base_image/ascend-ci/cann:8.2.rc1-a3-ubuntu22.04-py3.11
122+
tests: tests/e2e/nightly/models/test_deepseek_r1_0528_w8a8.py
Lines changed: 136 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,136 @@
1+
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
2+
# Copyright 2023 The vLLM team.
3+
#
4+
# Licensed under the Apache License, Version 2.0 (the "License");
5+
# you may not use this file except in compliance with the License.
6+
# You may obtain a copy of the License at
7+
#
8+
# http://www.apache.org/licenses/LICENSE-2.0
9+
#
10+
# Unless required by applicable law or agreed to in writing, software
11+
# distributed under the License is distributed on an "AS IS" BASIS,
12+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
# See the License for the specific language governing permissions and
14+
# limitations under the License.
15+
# This file is a part of the vllm-ascend project.
16+
#
17+
import json
18+
from typing import Any
19+
20+
import openai
21+
import pytest
22+
from vllm.utils import get_open_port
23+
24+
from tests.e2e.conftest import RemoteOpenAIServer
25+
from tools.aisbench import run_aisbench_cases
26+
27+
MODELS = [
28+
"vllm-ascend/DeepSeek-R1-0528-W8A8",
29+
]
30+
31+
MODES = [
32+
"torchair",
33+
"single",
34+
"aclgraph",
35+
"no_chunkprefill",
36+
]
37+
38+
prompts = [
39+
"San Francisco is a",
40+
]
41+
42+
api_keyword_args = {
43+
"max_tokens": 10,
44+
}
45+
46+
aisbench_cases = [{
47+
"case_type": "accuracy",
48+
"dataset_path": "vllm-ascend/gsm8k-lite",
49+
"request_conf": "vllm_api_general_chat",
50+
"dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_chat_prompt",
51+
"max_out_len": 32768,
52+
"batch_size": 32,
53+
"baseline": 95,
54+
"threshold": 5
55+
}, {
56+
"case_type": "performance",
57+
"dataset_path": "vllm-ascend/GSM8K-in3500-bs400",
58+
"request_conf": "vllm_api_stream_chat",
59+
"dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_str_perf",
60+
"num_prompts": 400,
61+
"max_out_len": 1500,
62+
"batch_size": 1000,
63+
"baseline": 1,
64+
"threshold": 0.97
65+
}]
66+
67+
68+
@pytest.mark.asyncio
69+
@pytest.mark.parametrize("model", MODELS)
70+
@pytest.mark.parametrize("mode", MODES)
71+
async def test_models(model: str, mode: str) -> None:
72+
port = get_open_port()
73+
env_dict = {
74+
"OMP_NUM_THREADS": "10",
75+
"OMP_PROC_BIND": "false",
76+
"HCCL_BUFFSIZE": "1024",
77+
"PYTORCH_NPU_ALLOC_CONF": "expandable_segments:True"
78+
}
79+
speculative_config = {
80+
"num_speculative_tokens": 1,
81+
"method": "deepseek_mtp"
82+
}
83+
additional_config = {
84+
"ascend_scheduler_config": {
85+
"enabled": False
86+
},
87+
"torchair_graph_config": {
88+
"enabled": True,
89+
"enable_multistream_moe": False,
90+
"enable_multistream_mla": True,
91+
"graph_batch_sizes": [16],
92+
"use_cached_graph": True
93+
},
94+
"chunked_prefill_for_mla": True,
95+
"enable_weight_nz_layout": True
96+
}
97+
server_args = [
98+
"--quantization", "ascend", "--data-parallel-size", "2",
99+
"--tensor-parallel-size", "8", "--enable-expert-parallel", "--port",
100+
str(port), "--seed", "1024", "--max-model-len", "36864",
101+
"--max-num-batched-tokens", "4096", "--max-num-seqs", "16",
102+
"--trust-remote-code", "--gpu-memory-utilization", "0.9",
103+
"--speculative-config",
104+
json.dumps(speculative_config)
105+
]
106+
if mode == "single":
107+
server_args.append("--enforce-eager")
108+
additional_config["torchair_graph_config"] = {"enabled": False}
109+
if mode == "aclgraph":
110+
additional_config["torchair_graph_config"] = {"enabled": False}
111+
if mode == "no_chunkprefill":
112+
additional_config["ascend_scheduler_config"] = {"enabled": True}
113+
i = server_args.index("--max-num-batched-tokens") + 1
114+
server_args[i] = "36864"
115+
server_args.extend(["--additional-config", json.dumps(additional_config)])
116+
request_keyword_args: dict[str, Any] = {
117+
**api_keyword_args,
118+
}
119+
with RemoteOpenAIServer(model,
120+
server_args,
121+
server_port=port,
122+
env_dict=env_dict,
123+
auto_port=False) as server:
124+
client = server.get_async_client()
125+
batch = await client.completions.create(
126+
model=model,
127+
prompt=prompts,
128+
**request_keyword_args,
129+
)
130+
choices: list[openai.types.CompletionChoice] = batch.choices
131+
assert choices[0].text, "empty response"
132+
print(choices)
133+
if mode in ["single", "no_chunkprefill"]:
134+
return
135+
# aisbench test
136+
run_aisbench_cases(model, port, aisbench_cases)

tests/e2e/nightly/models/test_qwen3_32b_int8.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,15 @@
5151
performance_batch_size = batch_size_dict.get(VLLM_CI_RUNNER, 1)
5252

5353
aisbench_cases = [{
54+
"case_type": "accuracy",
55+
"dataset_path": "vllm-ascend/aime2024",
56+
"request_conf": "vllm_api_general_chat",
57+
"dataset_conf": "aime2024/aime2024_gen_0_shot_chat_prompt",
58+
"max_out_len": 32768,
59+
"batch_size": 32,
60+
"baseline": 83.33,
61+
"threshold": 17
62+
}, {
5463
"case_type": "performance",
5564
"dataset_path": "vllm-ascend/GSM8K-in3500-bs400",
5665
"request_conf": "vllm_api_stream_chat",
@@ -60,15 +69,6 @@
6069
"batch_size": performance_batch_size,
6170
"baseline": 1,
6271
"threshold": 0.97
63-
}, {
64-
"case_type": "accuracy",
65-
"dataset_path": "vllm-ascend/aime2024",
66-
"request_conf": "vllm_api_general_chat",
67-
"dataset_conf": "aime2024/aime2024_gen_0_shot_chat_prompt",
68-
"max_out_len": 32768,
69-
"batch_size": 32,
70-
"baseline": 83.33,
71-
"threshold": 17
7272
}]
7373

7474

tools/aisbench.py

Lines changed: 24 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -43,13 +43,12 @@ def _run_aisbench_task(self):
4343
if self.task_type == "accuracy":
4444
aisbench_cmd = [
4545
'ais_bench', '--models', f'{self.request_conf}_custom',
46-
'--datasets', f'{dataset_conf}', '--debug'
46+
'--datasets', f'{dataset_conf}'
4747
]
4848
if self.task_type == "performance":
4949
aisbench_cmd = [
5050
'ais_bench', '--models', f'{self.request_conf}_custom',
51-
'--datasets', f'{dataset_conf}_custom', '--debug', '--mode',
52-
'perf'
51+
'--datasets', f'{dataset_conf}_custom', '--mode', 'perf'
5352
]
5453
if self.num_prompts:
5554
aisbench_cmd.extend(['--num-prompts', str(self.num_prompts)])
@@ -64,20 +63,25 @@ def __init__(self,
6463
port: int,
6564
aisbench_config: dict,
6665
verify=True):
67-
self.result_line = None
6866
self.dataset_path = snapshot_download(aisbench_config["dataset_path"],
6967
repo_type='dataset')
68+
self.model = model
69+
self.model_path = snapshot_download(model)
70+
self.port = port
7071
self.task_type = aisbench_config["case_type"]
7172
self.request_conf = aisbench_config["request_conf"]
7273
self.dataset_conf = aisbench_config.get("dataset_conf")
7374
self.num_prompts = aisbench_config.get("num_prompts")
7475
self.max_out_len = aisbench_config["max_out_len"]
7576
self.batch_size = aisbench_config["batch_size"]
7677
self.request_rate = aisbench_config.get("request_rate", 0)
77-
self.model = model
78-
self.model_path = snapshot_download(model)
79-
self.port = port
78+
self.temperature = aisbench_config.get("temperature")
79+
self.top_k = aisbench_config.get("top_k")
80+
self.top_p = aisbench_config.get("top_p")
81+
self.seed = aisbench_config.get("seed")
82+
self.repetition_penalty = aisbench_config.get("repetition_penalty")
8083
self.exp_folder = None
84+
self.result_line = None
8185
self._init_dataset_conf()
8286
self._init_request_conf()
8387
self._run_aisbench_task()
@@ -138,6 +142,19 @@ def _init_request_conf(self):
138142
content = re.sub(
139143
r"temperature.*",
140144
"temperature = 0.6,\n ignore_eos = False,", content)
145+
if self.temperature:
146+
content = re.sub(r"temperature.*",
147+
f"temperature = {self.temperature}", content)
148+
if self.top_p:
149+
content = re.sub(r"#?top_p.*", f"top_p = {self.top_p}", content)
150+
if self.top_k:
151+
content = re.sub(r"#top_k.*", f"top_k = {self.top_k}", content)
152+
if self.seed:
153+
content = re.sub(r"#seed.*", f"seed = {self.seed}", content)
154+
if self.repetition_penalty:
155+
content = re.sub(
156+
r"#repetition_penalty.*",
157+
f"repetition_penalty = {self.repetition_penalty}", content)
141158
conf_path_new = os.path.join(REQUEST_CONF_DIR,
142159
f'{self.request_conf}_custom.py')
143160
with open(conf_path_new, 'w', encoding='utf-8') as f:

0 commit comments

Comments
 (0)