diff --git a/.github/workflows/accuracy_test.yaml b/.github/workflows/accuracy_test.yaml index 7140f262f7..74296c6fb6 100644 --- a/.github/workflows/accuracy_test.yaml +++ b/.github/workflows/accuracy_test.yaml @@ -70,6 +70,8 @@ jobs: runner: linux-aarch64-a2-1 - model_name: Qwen3-30B-A3B runner: linux-aarch64-a2-2 + - model_name: DeepSeek-V2-Lite + runner: linux-aarch64-a2-2 fail-fast: false name: ${{ matrix.model_name }} accuracy @@ -200,9 +202,8 @@ jobs: markdown_name="${model_base_name}" echo "markdown_name=$markdown_name" >> $GITHUB_OUTPUT mkdir -p ./benchmarks/accuracy - pytest -sv ./tests/e2e/singlecard/models/test_lm_eval_correctness.py \ - --config ./tests/e2e/singlecard/models/configs/${{ matrix.model_name }}.yaml \ - --report_output ./benchmarks/accuracy/${model_base_name}.md + pytest -sv ./tests/e2e/models/test_lm_eval_correctness.py \ + --config ./tests/e2e/models/configs/${{ matrix.model_name }}.yaml - name: Generate step summary if: ${{ always() }} @@ -312,7 +313,7 @@ jobs: head: `vllm-ascend-ci:${{ env.BRANCH_NAME }}`, base: '${{ github.event.inputs.vllm-ascend-version }}', title: `[Doc] Update accuracy reports for ${{ github.event.inputs.vllm-ascend-version }}`, - body: `The accuracy results running on NPU Altlas A2 have changed, updating reports for: All models (Qwen/Qwen3-30B-A3B, Qwen2.5-VL-7B-Instruct, Qwen3-8B-Base) + body: `The accuracy results running on NPU Altlas A2 have changed, updating reports for: All models (Qwen3-30B-A3B, Qwen2.5-VL-7B-Instruct, Qwen3-8B-Base, DeepSeek-V2-Lite) - [Workflow run][1] diff --git a/.github/workflows/vllm_ascend_test.yaml b/.github/workflows/vllm_ascend_test.yaml index d46b4a9fd4..178eac8dbc 100644 --- a/.github/workflows/vllm_ascend_test.yaml +++ b/.github/workflows/vllm_ascend_test.yaml @@ -211,8 +211,7 @@ jobs: --ignore=tests/e2e/singlecard/test_embedding.py \ --ignore=tests/e2e/singlecard/spec_decode_v1/test_v1_mtp_correctness.py \ --ignore=tests/e2e/singlecard/spec_decode_v1/test_v1_spec_decode.py \ - --ignore=tests/e2e/singlecard/test_offline_inference_310p.py \ - --ignore=tests/e2e/singlecard/models/test_lm_eval_correctness.py + --ignore=tests/e2e/singlecard/test_offline_inference_310p.py e2e-2-cards: needs: [e2e] if: ${{ needs.e2e.result == 'success' }} diff --git a/.github/workflows/vllm_ascend_test_long_term.yaml b/.github/workflows/vllm_ascend_test_long_term.yaml deleted file mode 100644 index 0dfa7e3094..0000000000 --- a/.github/workflows/vllm_ascend_test_long_term.yaml +++ /dev/null @@ -1,102 +0,0 @@ -# -# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved. -# This file is a part of the vllm-ascend project. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -name: 'e2e test / long-term-test' - -on: - schedule: - # Runs at 23:00 UTC (7:00 AM Beijing) every day - - cron: '0 23 * * *' - pull_request: - types: [ labeled ] - -# Bash shells do not use ~/.profile or ~/.bashrc so these shells need to be explicitly -# declared as "shell: bash -el {0}" on steps that need to be properly activated. -# It's used to activate ascend-toolkit environment variables. -defaults: - run: - shell: bash -el {0} - -# only cancel in-progress runs of the same workflow -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -jobs: - long-term-test: - # long-term-test will be triggered when tag 'long-term-test' & 'ready-for-test' or schedule job - if: ${{ contains(github.event.pull_request.labels.*.name, 'long-term-test') && contains(github.event.pull_request.labels.*.name, 'ready-for-test') || github.event_name == 'schedule' }} - strategy: - max-parallel: 2 - matrix: - os: [linux-aarch64-a2-1, linux-aarch64-a2-2] - vllm_version: [main, v0.10.0] - name: vLLM Ascend long term test - runs-on: ${{ matrix.os }} - container: - image: swr.cn-southwest-2.myhuaweicloud.com/base_image/ascend-ci/cann:8.2.rc1-910b-ubuntu22.04-py3.11 - env: - VLLM_LOGGING_LEVEL: ERROR - VLLM_USE_MODELSCOPE: True - steps: - - name: Check npu and CANN info - run: | - npu-smi info - cat /usr/local/Ascend/ascend-toolkit/latest/"$(uname -i)"-linux/ascend_toolkit_install.info - - - name: Config mirrors - run: | - sed -Ei 's@(ports|archive).ubuntu.com@cache-service.nginx-pypi-cache.svc.cluster.local:8081@g' /etc/apt/sources.list - pip config set global.index-url http://cache-service.nginx-pypi-cache.svc.cluster.local/pypi/simple - pip config set global.trusted-host cache-service.nginx-pypi-cache.svc.cluster.local - apt-get update -y - apt install git -y - - - name: Checkout vllm-project/vllm-ascend repo - uses: actions/checkout@v4 - - - name: Install system dependencies - run: | - apt-get -y install `cat packages.txt` - apt-get -y install gcc g++ cmake libnuma-dev - - - name: Checkout vllm-project/vllm repo - uses: actions/checkout@v4 - with: - repository: vllm-project/vllm - ref: ${{ matrix.vllm_version }} - path: ./vllm-empty - - - name: Install vllm-project/vllm from source - working-directory: ./vllm-empty - run: | - VLLM_TARGET_DEVICE=empty pip install -e . - - - name: Install vllm-project/vllm-ascend - env: - PIP_EXTRA_INDEX_URL: https://mirrors.huaweicloud.com/ascend/repos/pypi - run: | - pip install -r requirements-dev.txt - pip install -v -e . - - - name: Run vllm-project/vllm-ascend long term test - run: | - if [[ "${{ matrix.os }}" == "linux-arm64-npu-1" ]]; then - pytest -sv tests/e2e/long_term/accuracy/accuracy_singlecard.py - else - # accuracy test multi card - pytest -sv tests/e2e/long_term/accuracy/accuracy_multicard.py - fi diff --git a/tests/e2e/long_term/accuracy/accuracy_multicard.py b/tests/e2e/long_term/accuracy/accuracy_multicard.py deleted file mode 100644 index 4479c4bf99..0000000000 --- a/tests/e2e/long_term/accuracy/accuracy_multicard.py +++ /dev/null @@ -1,167 +0,0 @@ -# -# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved. -# Copyright 2023 The vLLM team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# This file is a part of the vllm-ascend project. -# Adapted from vllm-project/blob/main/tests/entrypoints/llm/test_accuracy.py -# -import gc -import multiprocessing -import sys -from multiprocessing import Queue - -import lm_eval -import pytest -import torch - -SERVER_HOST = "127.0.0.1" -SERVER_PORT = 8000 -HEALTH_URL = f"http://{SERVER_HOST}:{SERVER_PORT}/health" -COMPLETIONS_URL = f"http://{SERVER_HOST}:{SERVER_PORT}/v1/completions" - -# pre-trained model path on Hugging Face. -# Qwen/Qwen2.5-0.5B-Instruct: accuracy test for DP. -# Qwen/Qwen3-30B-A3B: accuracy test for EP and DP. -# deepseek-ai/DeepSeek-V2-Lite: accuracy test for TP. -MODEL_NAME = ["Qwen/Qwen3-30B-A3B", "deepseek-ai/DeepSeek-V2-Lite"] - -# Benchmark configuration mapping models to evaluation tasks: -# - Text model: GSM8K (grade school math reasoning) -# - Vision-language model: MMMU Art & Design validation (multimodal understanding) -TASK = { - "Qwen/Qwen2.5-0.5B-Instruct": "gsm8k", - "Qwen/Qwen3-30B-A3B": "gsm8k", - "deepseek-ai/DeepSeek-V2-Lite": "gsm8k" -} -# Answer validation requiring format consistency. -FILTER = { - "Qwen/Qwen2.5-0.5B-Instruct": "exact_match,strict-match", - "Qwen/Qwen3-30B-A3B": "exact_match,strict-match", - "deepseek-ai/DeepSeek-V2-Lite": "exact_match,strict-match" -} -# 3% relative tolerance for numerical accuracy. -RTOL = 0.03 -# Baseline accuracy after VLLM optimization. -EXPECTED_VALUE = { - "Qwen/Qwen2.5-0.5B-Instruct": 0.316, - "Qwen/Qwen3-30B-A3B": 0.888, - "deepseek-ai/DeepSeek-V2-Lite": 0.375 -} -# Maximum context length configuration for each model. -MAX_MODEL_LEN = { - "Qwen/Qwen2.5-0.5B-Instruct": 4096, - "Qwen/Qwen3-30B-A3B": 4096, - "deepseek-ai/DeepSeek-V2-Lite": 4096 -} -# Model types distinguishing text-only and vision-language models. -MODEL_TYPE = { - "Qwen/Qwen2.5-0.5B-Instruct": "vllm", - "Qwen/Qwen3-30B-A3B": "vllm", - "deepseek-ai/DeepSeek-V2-Lite": "vllm" -} -# wrap prompts in a chat-style template. -APPLY_CHAT_TEMPLATE = { - "Qwen/Qwen2.5-0.5B-Instruct": False, - "Qwen/Qwen3-30B-A3B": False, - "deepseek-ai/DeepSeek-V2-Lite": False -} -# Few-shot examples handling as multi-turn dialogues. -FEWSHOT_AS_MULTITURN = { - "Qwen/Qwen2.5-0.5B-Instruct": False, - "Qwen/Qwen3-30B-A3B": False, - "deepseek-ai/DeepSeek-V2-Lite": False -} -# MORE_ARGS extra CLI args per model -MORE_ARGS = { - "Qwen/Qwen2.5-0.5B-Instruct": - None, - "Qwen/Qwen3-30B-A3B": - "tensor_parallel_size=2,enable_expert_parallel=True,enforce_eager=True", - "deepseek-ai/DeepSeek-V2-Lite": - "tensor_parallel_size=2,trust_remote_code=True,enforce_eager=True" -} - -multiprocessing.set_start_method("spawn", force=True) - - -def run_test(queue, model, max_model_len, model_type, more_args): - try: - if model_type == "vllm-vlm": - model_args = (f"pretrained={model},max_model_len={max_model_len}," - "dtype=auto,max_images=2") - else: - model_args = (f"pretrained={model},max_model_len={max_model_len}," - "dtype=auto") - if more_args is not None: - model_args = f"{model_args},{more_args}" - results = lm_eval.simple_evaluate( - model=model_type, - model_args=model_args, - tasks=TASK[model], - batch_size="auto", - apply_chat_template=APPLY_CHAT_TEMPLATE[model], - fewshot_as_multiturn=FEWSHOT_AS_MULTITURN[model], - ) - result = results["results"][TASK[model]][FILTER[model]] - print("result:", result) - queue.put(result) - except Exception as e: - error_msg = f"{type(e).__name__}: {str(e)}" - queue.put(error_msg) - sys.exit(1) - finally: - gc.collect() - torch.npu.empty_cache() - - -@pytest.mark.parametrize("model", MODEL_NAME) -def test_lm_eval_accuracy(monkeypatch: pytest.MonkeyPatch, model): - with monkeypatch.context(): - result_queue: Queue[float] = multiprocessing.Queue() - p = multiprocessing.Process(target=run_test, - args=(result_queue, model, - MAX_MODEL_LEN[model], - MODEL_TYPE[model], MORE_ARGS[model])) - p.start() - p.join() - result = result_queue.get() - print(result) - assert (EXPECTED_VALUE[model] - RTOL < result < EXPECTED_VALUE[model] + RTOL), \ - f"Expected: {EXPECTED_VALUE[model]}±{RTOL} | Measured: {result}" - - -DP_DENSCE_MODEL = ["Qwen/Qwen2.5-0.5B-Instruct"] -DP_MOE_MOEDL = ["Qwen/Qwen3-30B-A3B"] - -DP_MORE_ARGS = { - "Qwen/Qwen2.5-0.5B-Instruct": - "tensor_parallel_size=2,data_parallel_size=2", - "Qwen/Qwen3-30B-A3B": - "tensor_parallel_size=2,data_parallel_size=2,enable_expert_parallel=True,max_model_len=1024,enforce_eager=True", -} - - -@pytest.mark.parametrize("model", DP_DENSCE_MODEL) -def test_lm_eval_accuracy_dp(model): - result_queue: Queue[float] = multiprocessing.Queue() - p = multiprocessing.Process(target=run_test, - args=(result_queue, model, - MAX_MODEL_LEN[model], MODEL_TYPE[model], - DP_MORE_ARGS[model])) - p.start() - p.join() - result = result_queue.get() - print(result) - assert (EXPECTED_VALUE[model] - RTOL < result < EXPECTED_VALUE[model] + RTOL), \ - f"Expected: {EXPECTED_VALUE[model]}±{RTOL} | Measured: {result}" diff --git a/tests/e2e/long_term/accuracy/accuracy_singlecard.py b/tests/e2e/long_term/accuracy/accuracy_singlecard.py deleted file mode 100644 index 2860dd56e7..0000000000 --- a/tests/e2e/long_term/accuracy/accuracy_singlecard.py +++ /dev/null @@ -1,115 +0,0 @@ -# -# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved. -# Copyright 2023 The vLLM team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# This file is a part of the vllm-ascend project. -# Adapted from vllm-project/blob/main/tests/entrypoints/llm/test_accuracy.py -# - -import gc -import multiprocessing -import sys -from multiprocessing import Queue - -import lm_eval -import pytest -import torch - -# pre-trained model path on Hugging Face. -MODEL_NAME = ["Qwen/Qwen2.5-0.5B-Instruct", "Qwen/Qwen2.5-VL-3B-Instruct"] -# Benchmark configuration mapping models to evaluation tasks: -# - Text model: GSM8K (grade school math reasoning) -# - Vision-language model: MMMU Art & Design validation (multimodal understanding) -TASK = { - "Qwen/Qwen2.5-0.5B-Instruct": "gsm8k", - "Qwen/Qwen2.5-VL-3B-Instruct": "mmmu_val_art_and_design" -} -# Answer validation requiring format consistency. -FILTER = { - "Qwen/Qwen2.5-0.5B-Instruct": "exact_match,strict-match", - "Qwen/Qwen2.5-VL-3B-Instruct": "acc,none" -} -# 3% relative tolerance for numerical accuracy. -RTOL = 0.03 -# Baseline accuracy after VLLM optimization. -EXPECTED_VALUE = { - "Qwen/Qwen2.5-0.5B-Instruct": 0.316, - "Qwen/Qwen2.5-VL-3B-Instruct": 0.566 -} -# Maximum context length configuration for each model. -MAX_MODEL_LEN = { - "Qwen/Qwen2.5-0.5B-Instruct": 4096, - "Qwen/Qwen2.5-VL-3B-Instruct": 8192 -} -# Model types distinguishing text-only and vision-language models. -MODEL_TYPE = { - "Qwen/Qwen2.5-0.5B-Instruct": "vllm", - "Qwen/Qwen2.5-VL-3B-Instruct": "vllm-vlm" -} -# wrap prompts in a chat-style template. -APPLY_CHAT_TEMPLATE = {"vllm": False, "vllm-vlm": True} -# Few-shot examples handling as multi-turn dialogues. -FEWSHOT_AS_MULTITURN = {"vllm": False, "vllm-vlm": True} -# batch_size -BATCH_SIZE = { - "Qwen/Qwen2.5-0.5B-Instruct": "auto", - "Qwen/Qwen2.5-VL-3B-Instruct": 1 -} - -multiprocessing.set_start_method("spawn", force=True) - - -def run_test(queue, model, max_model_len, model_type): - try: - if model_type == "vllm-vlm": - model_args = (f"pretrained={model},max_model_len={max_model_len}," - "tensor_parallel_size=1,dtype=auto,max_images=2") - else: - model_args = (f"pretrained={model},max_model_len={max_model_len}," - "tensor_parallel_size=1,dtype=auto") - results = lm_eval.simple_evaluate( - model=model_type, - model_args=model_args, - tasks=TASK[model], - batch_size=BATCH_SIZE[model], - apply_chat_template=APPLY_CHAT_TEMPLATE[model_type], - fewshot_as_multiturn=FEWSHOT_AS_MULTITURN[model_type], - ) - result = results["results"][TASK[model]][FILTER[model]] - print("result:", result) - queue.put(result) - except Exception as e: - queue.put(e) - sys.exit(1) - finally: - gc.collect() - torch.npu.empty_cache() - - -@pytest.mark.parametrize("model", MODEL_NAME) -def test_lm_eval_accuracy(monkeypatch: pytest.MonkeyPatch, model): - with monkeypatch.context(): - result_queue: Queue[float] = multiprocessing.Queue() - p = multiprocessing.Process(target=run_test, - args=(result_queue, model, - MAX_MODEL_LEN[model], - MODEL_TYPE[model])) - p.start() - p.join() - result = result_queue.get() - if isinstance(result, Exception): - pytest.fail(f"Subprocess failed with exception: {str(result)}") - print(result) - assert (EXPECTED_VALUE[model] - RTOL < result < EXPECTED_VALUE[model] + RTOL), \ - f"Expected: {EXPECTED_VALUE[model]}±{RTOL} | Measured: {result}" diff --git a/tests/e2e/models/configs/DeepSeek-V2-Lite.yaml b/tests/e2e/models/configs/DeepSeek-V2-Lite.yaml new file mode 100644 index 0000000000..7df0544d63 --- /dev/null +++ b/tests/e2e/models/configs/DeepSeek-V2-Lite.yaml @@ -0,0 +1,13 @@ +model_name: "deepseek-ai/DeepSeek-V2-Lite" +tasks: +- name: "gsm8k" + metrics: + - name: "exact_match,strict-match" + value: 0.375 + - name: "exact_match,flexible-extract" + value: 0.375 +tensor_parallel_size: 2 +apply_chat_template: False +fewshot_as_multiturn: False +trust_remote_code: True +enforce_eager: True diff --git a/tests/e2e/singlecard/models/configs/Qwen2.5-VL-7B-Instruct.yaml b/tests/e2e/models/configs/Qwen2.5-VL-7B-Instruct.yaml similarity index 100% rename from tests/e2e/singlecard/models/configs/Qwen2.5-VL-7B-Instruct.yaml rename to tests/e2e/models/configs/Qwen2.5-VL-7B-Instruct.yaml diff --git a/tests/e2e/singlecard/models/configs/Qwen3-30B-A3B.yaml b/tests/e2e/models/configs/Qwen3-30B-A3B.yaml similarity index 100% rename from tests/e2e/singlecard/models/configs/Qwen3-30B-A3B.yaml rename to tests/e2e/models/configs/Qwen3-30B-A3B.yaml diff --git a/tests/e2e/singlecard/models/configs/Qwen3-8B-Base.yaml b/tests/e2e/models/configs/Qwen3-8B-Base.yaml similarity index 100% rename from tests/e2e/singlecard/models/configs/Qwen3-8B-Base.yaml rename to tests/e2e/models/configs/Qwen3-8B-Base.yaml diff --git a/tests/e2e/singlecard/models/configs/accuracy.txt b/tests/e2e/models/configs/accuracy.txt similarity index 100% rename from tests/e2e/singlecard/models/configs/accuracy.txt rename to tests/e2e/models/configs/accuracy.txt diff --git a/tests/e2e/singlecard/models/conftest.py b/tests/e2e/models/conftest.py similarity index 53% rename from tests/e2e/singlecard/models/conftest.py rename to tests/e2e/models/conftest.py index 2b25c1a929..a75659f4f4 100644 --- a/tests/e2e/singlecard/models/conftest.py +++ b/tests/e2e/models/conftest.py @@ -21,14 +21,14 @@ def pytest_addoption(parser): parser.addoption( "--config", action="store", - default="./tests/e2e/singlecard/models/configs/Qwen3-8B-Base.yaml", + default="./tests/e2e/models/configs/Qwen3-8B-Base.yaml", help="Path to the model config YAML file", ) parser.addoption( - "--report_output", + "--report-dir", action="store", - default="./benchmarks/accuracy/Qwen3-8B-Base.md", - help="Path to the report output file", + default="./benchmarks/accuracy", + help="Directory to store report files", ) @@ -49,25 +49,24 @@ def config(pytestconfig): @pytest.fixture(scope="session") -def report_output(pytestconfig): - return pytestconfig.getoption("--report_output") +def report_dir(pytestconfig): + return pytestconfig.getoption("report_dir") def pytest_generate_tests(metafunc): if "config_filename" in metafunc.fixturenames: - # If config specified, use the --config directly - single_config = metafunc.config.getoption("--config") - if single_config: - metafunc.parametrize("config_filename", - [Path(single_config).resolve()]) - return - # Otherwise, check --config-list-file - rel_path = metafunc.config.getoption("--config-list-file") - config_list_file = Path(rel_path).resolve() - config_dir = config_list_file.parent - with open(config_list_file, encoding="utf-8") as f: - configs = [ - config_dir / line.strip() for line in f - if line.strip() and not line.startswith("#") - ] - metafunc.parametrize("config_filename", configs) + + if metafunc.config.getoption("--config-list-file"): + rel_path = metafunc.config.getoption("--config-list-file") + config_list_file = Path(rel_path).resolve() + config_dir = config_list_file.parent + with open(config_list_file, encoding="utf-8") as f: + configs = [ + config_dir / line.strip() for line in f + if line.strip() and not line.startswith("#") + ] + metafunc.parametrize("config_filename", configs) + else: + single_config = metafunc.config.getoption("--config") + config_path = Path(single_config).resolve() + metafunc.parametrize("config_filename", [config_path]) diff --git a/tests/e2e/singlecard/models/report_template.md b/tests/e2e/models/report_template.md similarity index 100% rename from tests/e2e/singlecard/models/report_template.md rename to tests/e2e/models/report_template.md diff --git a/tests/e2e/singlecard/models/test_lm_eval_correctness.py b/tests/e2e/models/test_lm_eval_correctness.py similarity index 94% rename from tests/e2e/singlecard/models/test_lm_eval_correctness.py rename to tests/e2e/models/test_lm_eval_correctness.py index 3453a05712..567d3de70f 100644 --- a/tests/e2e/singlecard/models/test_lm_eval_correctness.py +++ b/tests/e2e/models/test_lm_eval_correctness.py @@ -48,7 +48,7 @@ def build_model_args(eval_config, tp_size): } for s in [ "max_images", "gpu_memory_utilization", "enable_expert_parallel", - "tensor_parallel_size" + "tensor_parallel_size", "enforce_eager" ]: val = eval_config.get(s, None) if val is not None: @@ -60,8 +60,7 @@ def build_model_args(eval_config, tp_size): return model_args -def generate_report(tp_size, eval_config, report_data, report_output, - env_config): +def generate_report(tp_size, eval_config, report_data, report_dir, env_config): env = Environment(loader=FileSystemLoader(TEST_DIR)) template = env.get_template("report_template.md") model_args = build_model_args(eval_config, tp_size) @@ -85,12 +84,14 @@ def generate_report(tp_size, eval_config, report_data, report_output, num_fewshot=eval_config.get("num_fewshot", "N/A"), rows=report_data["rows"]) + report_output = os.path.join( + report_dir, f"{os.path.basename(eval_config['model_name'])}.md") os.makedirs(os.path.dirname(report_output), exist_ok=True) with open(report_output, 'w', encoding='utf-8') as f: f.write(report_content) -def test_lm_eval_correctness_param(config_filename, tp_size, report_output, +def test_lm_eval_correctness_param(config_filename, tp_size, report_dir, env_config): eval_config = yaml.safe_load(config_filename.read_text(encoding="utf-8")) model_args = build_model_args(eval_config, tp_size) @@ -143,6 +144,5 @@ def test_lm_eval_correctness_param(config_filename, tp_size, report_output, metric_name.replace(',', '_stderr,') if metric_name == "acc,none" else metric_name.replace(',', '_stderr,')] }) - generate_report(tp_size, eval_config, report_data, report_output, - env_config) + generate_report(tp_size, eval_config, report_data, report_dir, env_config) assert success