From 28c4483fcdb17b2384c319f215bc85ab78d1a18d Mon Sep 17 00:00:00 2001 From: hfadzxy Date: Mon, 9 Jun 2025 20:12:26 +0800 Subject: [PATCH 1/3] [CI] Add accuracy ci for DP and EP and TP Signed-off-by: hfadzxy --- .../long_term/accuracy/accuracy_multicard.py | 267 ++++++++++++++++++ .../accuracy_singlecard.py} | 2 +- .../test_deepseek_v2_lite_tp2_accuracy.py | 71 ----- 3 files changed, 268 insertions(+), 72 deletions(-) create mode 100644 tests/e2e/long_term/accuracy/accuracy_multicard.py rename tests/e2e/long_term/{test_accuracy.py => accuracy/accuracy_singlecard.py} (99%) delete mode 100644 tests/e2e/multicard/test_deepseek_v2_lite_tp2_accuracy.py diff --git a/tests/e2e/long_term/accuracy/accuracy_multicard.py b/tests/e2e/long_term/accuracy/accuracy_multicard.py new file mode 100644 index 0000000000..81bc541167 --- /dev/null +++ b/tests/e2e/long_term/accuracy/accuracy_multicard.py @@ -0,0 +1,267 @@ +# +# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved. +# Copyright 2023 The vLLM team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# This file is a part of the vllm-ascend project. +# Adapted from vllm-project/blob/main/tests/entrypoints/llm/test_accuracy.py +# +import gc +import multiprocessing +import os +import signal +import subprocess +import sys +import time +from multiprocessing import Queue + +import lm_eval +import pytest +import requests +import torch + +SERVER_HOST = "127.0.0.1" +SERVER_PORT = 8000 +HEALTH_URL = f"http://{SERVER_HOST}:{SERVER_PORT}/health" +COMPLETIONS_URL = f"http://{SERVER_HOST}:{SERVER_PORT}/v1/completions" + +# pre-trained model path on Hugging Face. +# Qwen/Qwen2.5-0.5B-Instruct: accuracy test for DP. +# Qwen/Qwen3-30B-A3B: accuracy test for EP and ETP. +# deepseek-ai/DeepSeek-V2-Lite: accuracy test for TP. +MODEL_NAME = ["Qwen/Qwen3-30B-A3B", "deepseek-ai/DeepSeek-V2-Lite"] + +# Benchmark configuration mapping models to evaluation tasks: +# - Text model: GSM8K (grade school math reasoning) +# - Vision-language model: MMMU Art & Design validation (multimodal understanding) +TASK = { + "Qwen/Qwen2.5-0.5B-Instruct": "gsm8k", + "Qwen/Qwen3-30B-A3B": "gsm8k", + "deepseek-ai/DeepSeek-V2-Lite": "gsm8k" +} +# Answer validation requiring format consistency. +FILTER = { + "Qwen/Qwen2.5-0.5B-Instruct": "exact_match,strict-match", + "Qwen/Qwen3-30B-A3B": "exact_match,strict-match", + "deepseek-ai/DeepSeek-V2-Lite": "exact_match,strict-match" +} +# 3% relative tolerance for numerical accuracy. +RTOL = 0.03 +# Baseline accuracy after VLLM optimization. +EXPECTED_VALUE = { + "Qwen/Qwen2.5-0.5B-Instruct": 0.316, + "Qwen/Qwen3-30B-A3B": 0.888, + "deepseek-ai/DeepSeek-V2-Lite": 0.376 +} +# Maximum context length configuration for each model. +MAX_MODEL_LEN = { + "Qwen/Qwen2.5-0.5B-Instruct": 4096, + "Qwen/Qwen3-30B-A3B": 4096, + "deepseek-ai/DeepSeek-V2-Lite": 4096 +} +# Model types distinguishing text-only and vision-language models. +MODEL_TYPE = { + "Qwen/Qwen2.5-0.5B-Instruct": "vllm", + "Qwen/Qwen3-30B-A3B": "vllm", + "deepseek-ai/DeepSeek-V2-Lite": "vllm" +} +# wrap prompts in a chat-style template. +APPLY_CHAT_TEMPLATE = { + "Qwen/Qwen2.5-0.5B-Instruct": False, + "Qwen/Qwen3-30B-A3B": False, + "deepseek-ai/DeepSeek-V2-Lite": False +} +# Few-shot examples handling as multi-turn dialogues. +FEWSHOT_AS_MULTITURN = { + "Qwen/Qwen2.5-0.5B-Instruct": False, + "Qwen/Qwen3-30B-A3B": False, + "deepseek-ai/DeepSeek-V2-Lite": False +} +# MORE_ARGS extra CLI args per model +MORE_ARGS = { + "Qwen/Qwen2.5-0.5B-Instruct": + None, + "Qwen/Qwen3-30B-A3B": + "tensor_parallel_size=4,enable_expert_parallel=True,enforce_eager=True", + "deepseek-ai/DeepSeek-V2-Lite": + "tensor_parallel_size=4,trust_remote_code=True,enforce_eager=True" +} + +multiprocessing.set_start_method("spawn", force=True) + + +def run_test(queue, model, max_model_len, model_type, more_args): + try: + if model_type == "vllm-vlm": + model_args = (f"pretrained={model},max_model_len={max_model_len}," + "dtype=auto,max_images=2") + else: + model_args = (f"pretrained={model},max_model_len={max_model_len}," + "dtype=auto") + if more_args is not None: + model_args = f"{model_args},{more_args}" + results = lm_eval.simple_evaluate( + model=model_type, + model_args=model_args, + tasks=TASK[model], + batch_size="auto", + apply_chat_template=APPLY_CHAT_TEMPLATE[model], + fewshot_as_multiturn=FEWSHOT_AS_MULTITURN[model], + ) + result = results["results"][TASK[model]][FILTER[model]] + print("result:", result) + queue.put(result) + except Exception as e: + error_msg = f"{type(e).__name__}: {str(e)}" + queue.put(error_msg) + sys.exit(1) + finally: + gc.collect() + torch.npu.empty_cache() + + +@pytest.mark.parametrize("model", MODEL_NAME) +@pytest.mark.parametrize("VLLM_USE_V1", ["1"]) +def test_lm_eval_accuracy(monkeypatch: pytest.MonkeyPatch, model, VLLM_USE_V1): + os.environ["VLLM_USE_V1"] = VLLM_USE_V1 + with monkeypatch.context(): + result_queue: Queue[float] = multiprocessing.Queue() + p = multiprocessing.Process(target=run_test, + args=(result_queue, model, + MAX_MODEL_LEN[model], + MODEL_TYPE[model], MORE_ARGS[model])) + p.start() + p.join() + result = result_queue.get() + print(result) + assert (EXPECTED_VALUE[model] - RTOL < result < EXPECTED_VALUE[model] + RTOL), \ + f"Expected: {EXPECTED_VALUE[model]}±{RTOL} | Measured: {result}" + + +@pytest.mark.parametrize("max_tokens", [10]) +@pytest.mark.parametrize("VLLM_USE_V1", ["1"]) +@pytest.mark.parametrize("model", ["Qwen/Qwen2.5-0.5B-Instruct"]) +def test_lm_eval_accuracy_dp(model, max_tokens, VLLM_USE_V1): + os.environ["VLLM_USE_V1"] = VLLM_USE_V1 + log_file = open("accuracy.log", "a") + cmd = [ + "vllm", "serve", model, "--max_model_len", "4096", + "--tensor_parallel_size", "2", "--data_parallel_size", "2" + ] + server_proc = subprocess.Popen(cmd, + stdout=log_file, + stderr=subprocess.DEVNULL) + + try: + for _ in range(300): + try: + r = requests.get(HEALTH_URL, timeout=1) + if r.status_code == 200: + break + except requests.exceptions.RequestException: + pass + time.sleep(1) + else: + log_file.flush() + log_file.seek(0) + log_content = log_file.read() + pytest.fail( + f"vLLM serve did not become healthy after 300s: {HEALTH_URL}\n" + f"==== vLLM Serve Log Start ===\n{log_content}\n==== vLLM Serve Log End ===" + ) + + prompt = "bejing is a" + payload = { + "prompt": prompt, + "max_tokens": max_tokens, + "sampling_params": { + "temperature": 0.0, + "top_p": 1.0, + "seed": 123 + } + } + resp = requests.post(COMPLETIONS_URL, json=payload, timeout=30) + resp.raise_for_status() + data = resp.json() + + generated = data["choices"][0]["text"].strip() + expected = "city in north china, it has many famous attractions" + assert generated == expected, f"Expected `{expected}`, got `{generated}`" + + finally: + server_proc.send_signal(signal.SIGINT) + try: + server_proc.wait(timeout=10) + except subprocess.TimeoutExpired: + server_proc.kill() + server_proc.wait() + + +@pytest.mark.parametrize("max_tokens", [10]) +@pytest.mark.parametrize("VLLM_USE_V1", ["1"]) +@pytest.mark.parametrize("model", ["Qwen/Qwen3-30B-A3B"]) +def test_lm_eval_accuracy_etp(model, max_tokens, VLLM_USE_V1): + os.environ["VLLM_USE_V1"] = VLLM_USE_V1 + log_file = open("accuracy.log", "a") + cmd = [ + "vllm", "serve", model, "--tensor_parallel_size", "4", + "--enforce_eager", "True", "--enable_expert_parallel", "True", + "--additional_config", '{"expert_tensor_parallel_size": "4"}' + ] + server_proc = subprocess.Popen(cmd, + stdout=log_file, + stderr=subprocess.DEVNULL) + + try: + for _ in range(300): + try: + r = requests.get(HEALTH_URL, timeout=1) + if r.status_code == 200: + break + except requests.exceptions.RequestException: + pass + time.sleep(1) + else: + log_file.flush() + log_file.seek(0) + log_content = log_file.read() + pytest.fail( + f"vLLM serve did not become healthy after 300s: {HEALTH_URL}\n" + f"==== vLLM Serve Log Start ===\n{log_content}\n==== vLLM Serve Log End ===" + ) + + prompt = "bejing is a" + payload = { + "prompt": prompt, + "max_tokens": max_tokens, + "sampling_params": { + "temperature": 0.0, + "top_p": 1.0, + "seed": 123 + } + } + resp = requests.post(COMPLETIONS_URL, json=payload, timeout=30) + resp.raise_for_status() + data = resp.json() + + generated = data["choices"][0]["text"].strip() + expected = "city in china. it is the capital city of" + assert generated == expected, f"Expected `{expected}`, got `{generated}`" + + finally: + server_proc.send_signal(signal.SIGINT) + try: + server_proc.wait(timeout=10) + except subprocess.TimeoutExpired: + server_proc.kill() + server_proc.wait() diff --git a/tests/e2e/long_term/test_accuracy.py b/tests/e2e/long_term/accuracy/accuracy_singlecard.py similarity index 99% rename from tests/e2e/long_term/test_accuracy.py rename to tests/e2e/long_term/accuracy/accuracy_singlecard.py index a9d9619100..51a67e4a77 100644 --- a/tests/e2e/long_term/test_accuracy.py +++ b/tests/e2e/long_term/accuracy/accuracy_singlecard.py @@ -108,4 +108,4 @@ def test_lm_eval_accuracy(monkeypatch: pytest.MonkeyPatch, model, VLLM_USE_V1): result = result_queue.get() print(result) assert (EXPECTED_VALUE[model] - RTOL < result < EXPECTED_VALUE[model] + RTOL), \ - f"Expected: {EXPECTED_VALUE[model]}±{RTOL} | Measured: {result}" + f"Expected: {EXPECTED_VALUE[model]}±{RTOL} | Measured: {result}" \ No newline at end of file diff --git a/tests/e2e/multicard/test_deepseek_v2_lite_tp2_accuracy.py b/tests/e2e/multicard/test_deepseek_v2_lite_tp2_accuracy.py deleted file mode 100644 index 3a9068ff6b..0000000000 --- a/tests/e2e/multicard/test_deepseek_v2_lite_tp2_accuracy.py +++ /dev/null @@ -1,71 +0,0 @@ -# -# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved. -# Copyright 2023 The vLLM team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# This file is a part of the vllm-ascend project. -# Adapted from vllm-project/blob/main/tests/entrypoints/llm/test_accuracy.py -# - -import gc -import multiprocessing -from multiprocessing import Queue - -import lm_eval -import pytest -import torch - -# pre-trained model path on Hugging Face. -MODELS = ["deepseek-ai/DeepSeek-V2-Lite"] -# Math reasoning benchmark (Grade School Math 8K). -TASK = "gsm8k" -# Answer validation requiring format consistency. -FILTER = "exact_match,strict-match" -# 3% relative tolerance for numerical accuracy. -RTOL = 0.03 -# Baseline accuracy after VLLM optimization. -EXPECTED_VALUE = 0.3843821076573162 - - -def run_test(model_name, queue, more_args=None): - model_args = f"pretrained={model_name},max_model_len=4096,trust_remote_code=True,tensor_parallel_size=4,enforce_eager=True" - if more_args is not None: - model_args = f"{model_args},{more_args}" - results = lm_eval.simple_evaluate( - model="vllm", - model_args=model_args, - tasks=TASK, - batch_size="auto", - ) - result = results["results"][TASK][FILTER] - print(100 * "*", "\nThe accuracy test result:", result) - queue.put(result) - del results - torch.npu.empty_cache() - gc.collect() - - -@pytest.mark.parametrize("model", MODELS) -def test_lm_eval_accuracy(model, monkeypatch: pytest.MonkeyPatch): - with monkeypatch.context(): - result_queue: Queue[float] = multiprocessing.Queue() - p = multiprocessing.Process(target=run_test, - args=( - model, - result_queue, - )) - p.start() - p.join() - result = result_queue.get() - assert (EXPECTED_VALUE - RTOL < result < EXPECTED_VALUE + RTOL), \ - f"Expected: {EXPECTED_VALUE}±{RTOL} | Measured: {result}" From 2b598fd9b7407e3187d909f8d22eb01e4ebadb8b Mon Sep 17 00:00:00 2001 From: hfadzxy Date: Thu, 12 Jun 2025 09:43:18 +0800 Subject: [PATCH 2/3] [CI] Add accuracy ci for DP and EP and TP and ETP Signed-off-by: hfadzxy --- .../long_term/accuracy/accuracy_multicard.py | 24 ++++++++----------- .../long_term/accuracy/accuracy_singlecard.py | 22 ++++++++++------- 2 files changed, 24 insertions(+), 22 deletions(-) diff --git a/tests/e2e/long_term/accuracy/accuracy_multicard.py b/tests/e2e/long_term/accuracy/accuracy_multicard.py index 81bc541167..f9a8b1306e 100644 --- a/tests/e2e/long_term/accuracy/accuracy_multicard.py +++ b/tests/e2e/long_term/accuracy/accuracy_multicard.py @@ -98,6 +98,7 @@ } multiprocessing.set_start_method("spawn", force=True) +os.environ["VLLM_USE_V1"] = "1" def run_test(queue, model, max_model_len, model_type, more_args): @@ -131,9 +132,7 @@ def run_test(queue, model, max_model_len, model_type, more_args): @pytest.mark.parametrize("model", MODEL_NAME) -@pytest.mark.parametrize("VLLM_USE_V1", ["1"]) -def test_lm_eval_accuracy(monkeypatch: pytest.MonkeyPatch, model, VLLM_USE_V1): - os.environ["VLLM_USE_V1"] = VLLM_USE_V1 +def test_lm_eval_accuracy(monkeypatch: pytest.MonkeyPatch, model): with monkeypatch.context(): result_queue: Queue[float] = multiprocessing.Queue() p = multiprocessing.Process(target=run_test, @@ -149,11 +148,9 @@ def test_lm_eval_accuracy(monkeypatch: pytest.MonkeyPatch, model, VLLM_USE_V1): @pytest.mark.parametrize("max_tokens", [10]) -@pytest.mark.parametrize("VLLM_USE_V1", ["1"]) @pytest.mark.parametrize("model", ["Qwen/Qwen2.5-0.5B-Instruct"]) -def test_lm_eval_accuracy_dp(model, max_tokens, VLLM_USE_V1): - os.environ["VLLM_USE_V1"] = VLLM_USE_V1 - log_file = open("accuracy.log", "a") +def test_lm_eval_accuracy_dp(model, max_tokens): + log_file = open("accuracy_pd.log", "a+") cmd = [ "vllm", "serve", model, "--max_model_len", "4096", "--tensor_parallel_size", "2", "--data_parallel_size", "2" @@ -208,15 +205,14 @@ def test_lm_eval_accuracy_dp(model, max_tokens, VLLM_USE_V1): @pytest.mark.parametrize("max_tokens", [10]) -@pytest.mark.parametrize("VLLM_USE_V1", ["1"]) @pytest.mark.parametrize("model", ["Qwen/Qwen3-30B-A3B"]) -def test_lm_eval_accuracy_etp(model, max_tokens, VLLM_USE_V1): - os.environ["VLLM_USE_V1"] = VLLM_USE_V1 - log_file = open("accuracy.log", "a") +def test_lm_eval_accuracy_etp(model, max_tokens): + log_file = open("accuracy_etp.log", "a+") cmd = [ - "vllm", "serve", model, "--tensor_parallel_size", "4", - "--enforce_eager", "True", "--enable_expert_parallel", "True", - "--additional_config", '{"expert_tensor_parallel_size": "4"}' + "vllm", "serve", model, "--max_model_len", "4096", + "--tensor_parallel_size", "4", "--enforce_eager", + "--enable_expert_parallel", "--additional_config", + '{"expert_tensor_parallel_size": "4"}' ] server_proc = subprocess.Popen(cmd, stdout=log_file, diff --git a/tests/e2e/long_term/accuracy/accuracy_singlecard.py b/tests/e2e/long_term/accuracy/accuracy_singlecard.py index 51a67e4a77..c2ec2c0ba9 100644 --- a/tests/e2e/long_term/accuracy/accuracy_singlecard.py +++ b/tests/e2e/long_term/accuracy/accuracy_singlecard.py @@ -45,7 +45,7 @@ # Baseline accuracy after VLLM optimization. EXPECTED_VALUE = { "Qwen/Qwen2.5-0.5B-Instruct": 0.316, - "Qwen/Qwen2.5-VL-3B-Instruct": 0.541 + "Qwen/Qwen2.5-VL-3B-Instruct": 0.566 } # Maximum context length configuration for each model. MAX_MODEL_LEN = { @@ -61,21 +61,28 @@ APPLY_CHAT_TEMPLATE = {"vllm": False, "vllm-vlm": True} # Few-shot examples handling as multi-turn dialogues. FEWSHOT_AS_MULTITURN = {"vllm": False, "vllm-vlm": True} +# batch_size +BATCH_SIZE = { + "Qwen/Qwen2.5-0.5B-Instruct": "auto", + "Qwen/Qwen2.5-VL-3B-Instruct": 1 +} + +multiprocessing.set_start_method("spawn", force=True) def run_test(queue, model, max_model_len, model_type): try: if model_type == "vllm-vlm": model_args = (f"pretrained={model},max_model_len={max_model_len}," - "dtype=auto,max_images=2") + "tensor_parallel_size=1,dtype=auto,max_images=2") else: model_args = (f"pretrained={model},max_model_len={max_model_len}," - "dtype=auto") + "tensor_parallel_size=1,dtype=auto") results = lm_eval.simple_evaluate( model=model_type, model_args=model_args, tasks=TASK[model], - batch_size="auto", + batch_size=BATCH_SIZE[model], apply_chat_template=APPLY_CHAT_TEMPLATE[model_type], fewshot_as_multiturn=FEWSHOT_AS_MULTITURN[model_type], ) @@ -93,9 +100,6 @@ def run_test(queue, model, max_model_len, model_type): @pytest.mark.parametrize("model", MODEL_NAME) @pytest.mark.parametrize("VLLM_USE_V1", ["0", "1"]) def test_lm_eval_accuracy(monkeypatch: pytest.MonkeyPatch, model, VLLM_USE_V1): - if model == "Qwen/Qwen2.5-VL-3B-Instruct" and VLLM_USE_V1 == "1": - pytest.skip( - "Qwen2.5-VL-3B-Instruct is not supported when VLLM_USE_V1=1") with monkeypatch.context() as m: m.setenv("VLLM_USE_V1", VLLM_USE_V1) result_queue: Queue[float] = multiprocessing.Queue() @@ -106,6 +110,8 @@ def test_lm_eval_accuracy(monkeypatch: pytest.MonkeyPatch, model, VLLM_USE_V1): p.start() p.join() result = result_queue.get() + if isinstance(result, Exception): + pytest.fail(f"Subprocess failed with exception: {str(result)}") print(result) assert (EXPECTED_VALUE[model] - RTOL < result < EXPECTED_VALUE[model] + RTOL), \ - f"Expected: {EXPECTED_VALUE[model]}±{RTOL} | Measured: {result}" \ No newline at end of file + f"Expected: {EXPECTED_VALUE[model]}±{RTOL} | Measured: {result}" From 9477ee1a00246ff6fbc2c64ca445a9f0b795e28b Mon Sep 17 00:00:00 2001 From: hfadzxy Date: Wed, 9 Jul 2025 16:43:52 +0800 Subject: [PATCH 3/3] mend Signed-off-by: hfadzxy --- .github/workflows/vllm_ascend_test_long_term.yaml | 6 +++--- tests/e2e/long_term/accuracy/accuracy_multicard.py | 4 +--- tests/e2e/long_term/accuracy/accuracy_singlecard.py | 6 ++---- 3 files changed, 6 insertions(+), 10 deletions(-) diff --git a/.github/workflows/vllm_ascend_test_long_term.yaml b/.github/workflows/vllm_ascend_test_long_term.yaml index 258d969224..808ab24862 100644 --- a/.github/workflows/vllm_ascend_test_long_term.yaml +++ b/.github/workflows/vllm_ascend_test_long_term.yaml @@ -96,8 +96,8 @@ jobs: - name: Run vllm-project/vllm-ascend long term test run: | if [[ "${{ matrix.os }}" == "linux-arm64-npu-1" ]]; then - pytest -sv tests/e2e/long_term/test_accuracy.py - # else + pytest -sv tests/e2e/long_term/accuracy/accuracy_singlecard.py + else # accuracy test multi card - # VLLM_USE_MODELSCOPE=True pytest -sv tests/e2e/long_term/test_deepseek_v2_lite_tp2_accuracy.py + pytest -sv tests/e2e/long_term/accuracy/accuracy_multicard.py fi diff --git a/tests/e2e/long_term/accuracy/accuracy_multicard.py b/tests/e2e/long_term/accuracy/accuracy_multicard.py index f9a8b1306e..94e3724258 100644 --- a/tests/e2e/long_term/accuracy/accuracy_multicard.py +++ b/tests/e2e/long_term/accuracy/accuracy_multicard.py @@ -18,7 +18,6 @@ # import gc import multiprocessing -import os import signal import subprocess import sys @@ -61,7 +60,7 @@ EXPECTED_VALUE = { "Qwen/Qwen2.5-0.5B-Instruct": 0.316, "Qwen/Qwen3-30B-A3B": 0.888, - "deepseek-ai/DeepSeek-V2-Lite": 0.376 + "deepseek-ai/DeepSeek-V2-Lite": 0.375 } # Maximum context length configuration for each model. MAX_MODEL_LEN = { @@ -98,7 +97,6 @@ } multiprocessing.set_start_method("spawn", force=True) -os.environ["VLLM_USE_V1"] = "1" def run_test(queue, model, max_model_len, model_type, more_args): diff --git a/tests/e2e/long_term/accuracy/accuracy_singlecard.py b/tests/e2e/long_term/accuracy/accuracy_singlecard.py index c2ec2c0ba9..2860dd56e7 100644 --- a/tests/e2e/long_term/accuracy/accuracy_singlecard.py +++ b/tests/e2e/long_term/accuracy/accuracy_singlecard.py @@ -98,10 +98,8 @@ def run_test(queue, model, max_model_len, model_type): @pytest.mark.parametrize("model", MODEL_NAME) -@pytest.mark.parametrize("VLLM_USE_V1", ["0", "1"]) -def test_lm_eval_accuracy(monkeypatch: pytest.MonkeyPatch, model, VLLM_USE_V1): - with monkeypatch.context() as m: - m.setenv("VLLM_USE_V1", VLLM_USE_V1) +def test_lm_eval_accuracy(monkeypatch: pytest.MonkeyPatch, model): + with monkeypatch.context(): result_queue: Queue[float] = multiprocessing.Queue() p = multiprocessing.Process(target=run_test, args=(result_queue, model,