Skip to content

Commit 85674c4

Browse files
[CI] Add accuracy ci for DP and EP and TP
Signed-off-by: hfadzxy <starmoon_zhang@163.com>
1 parent 5177bef commit 85674c4

File tree

3 files changed

+268
-72
lines changed

3 files changed

+268
-72
lines changed
Lines changed: 267 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,267 @@
1+
#
2+
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
3+
# Copyright 2023 The vLLM team.
4+
#
5+
# Licensed under the Apache License, Version 2.0 (the "License");
6+
# you may not use this file except in compliance with the License.
7+
# You may obtain a copy of the License at
8+
#
9+
# http://www.apache.org/licenses/LICENSE-2.0
10+
#
11+
# Unless required by applicable law or agreed to in writing, software
12+
# distributed under the License is distributed on an "AS IS" BASIS,
13+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14+
# See the License for the specific language governing permissions and
15+
# limitations under the License.
16+
# This file is a part of the vllm-ascend project.
17+
# Adapted from vllm-project/blob/main/tests/entrypoints/llm/test_accuracy.py
18+
#
19+
import gc
20+
import multiprocessing
21+
import os
22+
import signal
23+
import subprocess
24+
import sys
25+
import time
26+
from multiprocessing import Queue
27+
28+
import lm_eval
29+
import pytest
30+
import requests
31+
import torch
32+
33+
SERVER_HOST = "127.0.0.1"
34+
SERVER_PORT = 8000
35+
HEALTH_URL = f"http://{SERVER_HOST}:{SERVER_PORT}/health"
36+
COMPLETIONS_URL = f"http://{SERVER_HOST}:{SERVER_PORT}/v1/completions"
37+
38+
# pre-trained model path on Hugging Face.
39+
# Qwen/Qwen2.5-0.5B-Instruct: accuracy test for DP.
40+
# Qwen/Qwen3-30B-A3B: accuracy test for EP and ETP.
41+
# deepseek-ai/DeepSeek-V2-Lite: accuracy test for TP.
42+
MODEL_NAME = ["Qwen/Qwen3-30B-A3B", "deepseek-ai/DeepSeek-V2-Lite"]
43+
44+
# Benchmark configuration mapping models to evaluation tasks:
45+
# - Text model: GSM8K (grade school math reasoning)
46+
# - Vision-language model: MMMU Art & Design validation (multimodal understanding)
47+
TASK = {
48+
"Qwen/Qwen2.5-0.5B-Instruct": "gsm8k",
49+
"Qwen/Qwen3-30B-A3B": "gsm8k",
50+
"deepseek-ai/DeepSeek-V2-Lite": "gsm8k"
51+
}
52+
# Answer validation requiring format consistency.
53+
FILTER = {
54+
"Qwen/Qwen2.5-0.5B-Instruct": "exact_match,strict-match",
55+
"Qwen/Qwen3-30B-A3B": "exact_match,strict-match",
56+
"deepseek-ai/DeepSeek-V2-Lite": "exact_match,strict-match"
57+
}
58+
# 3% relative tolerance for numerical accuracy.
59+
RTOL = 0.03
60+
# Baseline accuracy after VLLM optimization.
61+
EXPECTED_VALUE = {
62+
"Qwen/Qwen2.5-0.5B-Instruct": 0.316,
63+
"Qwen/Qwen3-30B-A3B": 0.888,
64+
"deepseek-ai/DeepSeek-V2-Lite": 0.376
65+
}
66+
# Maximum context length configuration for each model.
67+
MAX_MODEL_LEN = {
68+
"Qwen/Qwen2.5-0.5B-Instruct": 4096,
69+
"Qwen/Qwen3-30B-A3B": 4096,
70+
"deepseek-ai/DeepSeek-V2-Lite": 4096
71+
}
72+
# Model types distinguishing text-only and vision-language models.
73+
MODEL_TYPE = {
74+
"Qwen/Qwen2.5-0.5B-Instruct": "vllm",
75+
"Qwen/Qwen3-30B-A3B": "vllm",
76+
"deepseek-ai/DeepSeek-V2-Lite": "vllm"
77+
}
78+
# wrap prompts in a chat-style template.
79+
APPLY_CHAT_TEMPLATE = {
80+
"Qwen/Qwen2.5-0.5B-Instruct": False,
81+
"Qwen/Qwen3-30B-A3B": False,
82+
"deepseek-ai/DeepSeek-V2-Lite": False
83+
}
84+
# Few-shot examples handling as multi-turn dialogues.
85+
FEWSHOT_AS_MULTITURN = {
86+
"Qwen/Qwen2.5-0.5B-Instruct": False,
87+
"Qwen/Qwen3-30B-A3B": False,
88+
"deepseek-ai/DeepSeek-V2-Lite": False
89+
}
90+
# MORE_ARGS extra CLI args per model
91+
MORE_ARGS = {
92+
"Qwen/Qwen2.5-0.5B-Instruct":
93+
None,
94+
"Qwen/Qwen3-30B-A3B":
95+
"tensor_parallel_size=4,enable_expert_parallel=True,enforce_eager=True",
96+
"deepseek-ai/DeepSeek-V2-Lite":
97+
"tensor_parallel_size=4,trust_remote_code=True,enforce_eager=True"
98+
}
99+
100+
multiprocessing.set_start_method("spawn", force=True)
101+
102+
103+
def run_test(queue, model, max_model_len, model_type, more_args):
104+
try:
105+
if model_type == "vllm-vlm":
106+
model_args = (f"pretrained={model},max_model_len={max_model_len},"
107+
"dtype=auto,max_images=2")
108+
else:
109+
model_args = (f"pretrained={model},max_model_len={max_model_len},"
110+
"dtype=auto")
111+
if more_args is not None:
112+
model_args = f"{model_args},{more_args}"
113+
results = lm_eval.simple_evaluate(
114+
model=model_type,
115+
model_args=model_args,
116+
tasks=TASK[model],
117+
batch_size="auto",
118+
apply_chat_template=APPLY_CHAT_TEMPLATE[model],
119+
fewshot_as_multiturn=FEWSHOT_AS_MULTITURN[model],
120+
)
121+
result = results["results"][TASK[model]][FILTER[model]]
122+
print("result:", result)
123+
queue.put(result)
124+
except Exception as e:
125+
error_msg = f"{type(e).__name__}: {str(e)}"
126+
queue.put(error_msg)
127+
sys.exit(1)
128+
finally:
129+
gc.collect()
130+
torch.npu.empty_cache()
131+
132+
133+
@pytest.mark.parametrize("model", MODEL_NAME)
134+
@pytest.mark.parametrize("VLLM_USE_V1", ["1"])
135+
def test_lm_eval_accuracy(monkeypatch: pytest.MonkeyPatch, model, VLLM_USE_V1):
136+
os.environ["VLLM_USE_V1"] = VLLM_USE_V1
137+
with monkeypatch.context():
138+
result_queue: Queue[float] = multiprocessing.Queue()
139+
p = multiprocessing.Process(target=run_test,
140+
args=(result_queue, model,
141+
MAX_MODEL_LEN[model],
142+
MODEL_TYPE[model], MORE_ARGS[model]))
143+
p.start()
144+
p.join()
145+
result = result_queue.get()
146+
print(result)
147+
assert (EXPECTED_VALUE[model] - RTOL < result < EXPECTED_VALUE[model] + RTOL), \
148+
f"Expected: {EXPECTED_VALUE[model]}±{RTOL} | Measured: {result}"
149+
150+
151+
@pytest.mark.parametrize("max_tokens", [10])
152+
@pytest.mark.parametrize("VLLM_USE_V1", ["1"])
153+
@pytest.mark.parametrize("model", ["Qwen/Qwen2.5-0.5B-Instruct"])
154+
def test_lm_eval_accuracy_dp(model, max_tokens, VLLM_USE_V1):
155+
os.environ["VLLM_USE_V1"] = VLLM_USE_V1
156+
log_file = open("accuracy.log", "a")
157+
cmd = [
158+
"vllm", "serve", model, "--max_model_len", "4096",
159+
"--tensor_parallel_size", "2", "--data_parallel_size", "2"
160+
]
161+
server_proc = subprocess.Popen(cmd,
162+
stdout=log_file,
163+
stderr=subprocess.DEVNULL)
164+
165+
try:
166+
for _ in range(300):
167+
try:
168+
r = requests.get(HEALTH_URL, timeout=1)
169+
if r.status_code == 200:
170+
break
171+
except requests.exceptions.RequestException:
172+
pass
173+
time.sleep(1)
174+
else:
175+
log_file.flush()
176+
log_file.seek(0)
177+
log_content = log_file.read()
178+
pytest.fail(
179+
f"vLLM serve did not become healthy after 300s: {HEALTH_URL}\n"
180+
f"==== vLLM Serve Log Start ===\n{log_content}\n==== vLLM Serve Log End ==="
181+
)
182+
183+
prompt = "bejing is a"
184+
payload = {
185+
"prompt": prompt,
186+
"max_tokens": max_tokens,
187+
"sampling_params": {
188+
"temperature": 0.0,
189+
"top_p": 1.0,
190+
"seed": 123
191+
}
192+
}
193+
resp = requests.post(COMPLETIONS_URL, json=payload, timeout=30)
194+
resp.raise_for_status()
195+
data = resp.json()
196+
197+
generated = data["choices"][0]["text"].strip()
198+
expected = "city in north china, it has many famous attractions"
199+
assert generated == expected, f"Expected `{expected}`, got `{generated}`"
200+
201+
finally:
202+
server_proc.send_signal(signal.SIGINT)
203+
try:
204+
server_proc.wait(timeout=10)
205+
except subprocess.TimeoutExpired:
206+
server_proc.kill()
207+
server_proc.wait()
208+
209+
210+
@pytest.mark.parametrize("max_tokens", [10])
211+
@pytest.mark.parametrize("VLLM_USE_V1", ["1"])
212+
@pytest.mark.parametrize("model", ["Qwen/Qwen3-30B-A3B"])
213+
def test_lm_eval_accuracy_etp(model, max_tokens, VLLM_USE_V1):
214+
os.environ["VLLM_USE_V1"] = VLLM_USE_V1
215+
log_file = open("accuracy.log", "a")
216+
cmd = [
217+
"vllm", "serve", model, "--tensor_parallel_size", "4",
218+
"--enforce_eager", "True", "--enable_expert_parallel", "True",
219+
"--additional_config", '{"expert_tensor_parallel_size": "4"}'
220+
]
221+
server_proc = subprocess.Popen(cmd,
222+
stdout=log_file,
223+
stderr=subprocess.DEVNULL)
224+
225+
try:
226+
for _ in range(300):
227+
try:
228+
r = requests.get(HEALTH_URL, timeout=1)
229+
if r.status_code == 200:
230+
break
231+
except requests.exceptions.RequestException:
232+
pass
233+
time.sleep(1)
234+
else:
235+
log_file.flush()
236+
log_file.seek(0)
237+
log_content = log_file.read()
238+
pytest.fail(
239+
f"vLLM serve did not become healthy after 300s: {HEALTH_URL}\n"
240+
f"==== vLLM Serve Log Start ===\n{log_content}\n==== vLLM Serve Log End ==="
241+
)
242+
243+
prompt = "bejing is a"
244+
payload = {
245+
"prompt": prompt,
246+
"max_tokens": max_tokens,
247+
"sampling_params": {
248+
"temperature": 0.0,
249+
"top_p": 1.0,
250+
"seed": 123
251+
}
252+
}
253+
resp = requests.post(COMPLETIONS_URL, json=payload, timeout=30)
254+
resp.raise_for_status()
255+
data = resp.json()
256+
257+
generated = data["choices"][0]["text"].strip()
258+
expected = "city in china. it is the capital city of"
259+
assert generated == expected, f"Expected `{expected}`, got `{generated}`"
260+
261+
finally:
262+
server_proc.send_signal(signal.SIGINT)
263+
try:
264+
server_proc.wait(timeout=10)
265+
except subprocess.TimeoutExpired:
266+
server_proc.kill()
267+
server_proc.wait()

tests/e2e/long_term/test_accuracy.py renamed to tests/e2e/long_term/accuracy/accuracy_singlecard.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -108,4 +108,4 @@ def test_lm_eval_accuracy(monkeypatch: pytest.MonkeyPatch, model, VLLM_USE_V1):
108108
result = result_queue.get()
109109
print(result)
110110
assert (EXPECTED_VALUE[model] - RTOL < result < EXPECTED_VALUE[model] + RTOL), \
111-
f"Expected: {EXPECTED_VALUE[model]}±{RTOL} | Measured: {result}"
111+
f"Expected: {EXPECTED_VALUE[model]}±{RTOL} | Measured: {result}"

tests/e2e/long_term/test_deepseek_v2_lite_tp2_accuracy.py

Lines changed: 0 additions & 71 deletions
This file was deleted.

0 commit comments

Comments
 (0)