From a3e4c924411c0716845dee17148f0f429c2fdc3a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Stolarczuk?= Date: Thu, 5 Feb 2026 15:23:10 +0100 Subject: [PATCH] [Bench] Remove redundant info from results' jsons and fix clang version in metadata. --- devops/scripts/benchmarks/compare.py | 12 +++++------ devops/scripts/benchmarks/history.py | 3 +++ devops/scripts/benchmarks/html/scripts.js | 12 +++++------ devops/scripts/benchmarks/output_html.py | 13 +++++++++++- .../benchmarks/tests/test_integration.py | 7 ++----- devops/scripts/benchmarks/utils/platform.py | 21 ++++++++----------- devops/scripts/benchmarks/utils/result.py | 5 +---- devops/scripts/benchmarks/utils/utils.py | 16 ++++++++------ 8 files changed, 49 insertions(+), 40 deletions(-) diff --git a/devops/scripts/benchmarks/compare.py b/devops/scripts/benchmarks/compare.py index 712b3b9a4594a..6fcb8500df3ef 100644 --- a/devops/scripts/benchmarks/compare.py +++ b/devops/scripts/benchmarks/compare.py @@ -199,23 +199,23 @@ def halfway_round(value: int, n: int): regression = [] for test in target.results: - if test.name not in hist_avg: + if test.label not in hist_avg: continue # TODO compare command args which have an impact on performance # (i.e. ignore --save-name): if command results are incomparable, # skip the result. delta = 1 - ( - test.value / hist_avg[test.name].value + test.value / hist_avg[test.label].value if test.lower_is_better - else hist_avg[test.name].value / test.value + else hist_avg[test.label].value / test.value ) def perf_diff_entry() -> dict: res = asdict(test) res["delta"] = delta - res["hist_avg"] = hist_avg[test.name].value - res["avg_type"] = hist_avg[test.name].average_type + res["hist_avg"] = hist_avg[test.label].value + res["avg_type"] = hist_avg[test.label].average_type return res # Round to 2 decimal places: not going to fail a test on 0.001% over @@ -226,7 +226,7 @@ def perf_diff_entry() -> dict: regression.append(perf_diff_entry()) log.debug( - f"{test.name}: expect {hist_avg[test.name].value}, got {test.value}" + f"{test.label}: expect {hist_avg[test.label].value}, got {test.value}" ) return improvement, regression diff --git a/devops/scripts/benchmarks/history.py b/devops/scripts/benchmarks/history.py index 23090c8c6dae0..6a5707b3c3dc6 100644 --- a/devops/scripts/benchmarks/history.py +++ b/devops/scripts/benchmarks/history.py @@ -186,6 +186,9 @@ def git_info_from_path(path: Path) -> (str, str): # Get platform information platform_info = get_platform_info() + if platform_info.gpu_info is None: + log.warning("GPU information detection failed.") + platform_info.gpu_info = [] return BenchmarkRun( name=name, diff --git a/devops/scripts/benchmarks/html/scripts.js b/devops/scripts/benchmarks/html/scripts.js index a6a1159df9e8f..bc60b1acbe8dd 100644 --- a/devops/scripts/benchmarks/html/scripts.js +++ b/devops/scripts/benchmarks/html/scripts.js @@ -1,4 +1,4 @@ -// Copyright (C) 2024-2025 Intel Corporation +// Copyright (C) 2024-2026 Intel Corporation // Part of the Unified-Runtime Project, under the Apache License v2.0 with LLVM Exceptions. // See LICENSE.TXT // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception @@ -1939,7 +1939,7 @@ function displaySelectedRunsPlatformInfo() { .map(runName => { const run = loadedBenchmarkRuns.find(r => r.name === runName); if (run && run.platform) { - return { name: runName, platform: run.platform }; + return { name: runName, platform: run.platform, date: run.date }; } return null; }) @@ -1959,7 +1959,7 @@ function displaySelectedRunsPlatformInfo() { const platform = runData.platform; const detailsContainer = document.createElement('div'); detailsContainer.className = 'platform-details-compact'; - detailsContainer.innerHTML = createPlatformDetailsHTML(platform); + detailsContainer.innerHTML = createPlatformDetailsHTML(platform, runData.date); runSection.appendChild(detailsContainer); container.appendChild(runSection); }); @@ -1967,9 +1967,9 @@ function displaySelectedRunsPlatformInfo() { // Platform Information Functions -function createPlatformDetailsHTML(platform) { - const formattedTimestamp = platform.timestamp ? - new Date(platform.timestamp).toLocaleString('en-US', { +function createPlatformDetailsHTML(platform, run_date) { + const formattedTimestamp = run_date ? + new Date(run_date).toLocaleString('en-US', { year: 'numeric', month: 'short', day: 'numeric', diff --git a/devops/scripts/benchmarks/output_html.py b/devops/scripts/benchmarks/output_html.py index cad51c94d30c8..e9b7505b5298f 100644 --- a/devops/scripts/benchmarks/output_html.py +++ b/devops/scripts/benchmarks/output_html.py @@ -1,4 +1,4 @@ -# Copyright (C) 2024-2025 Intel Corporation +# Copyright (C) 2024-2026 Intel Corporation # Part of the Unified-Runtime Project, under the Apache License v2.0 with LLVM Exceptions. # See LICENSE.TXT # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception @@ -135,6 +135,17 @@ def generate_html( # Sorted in reverse, such that runs are ordered from newest to oldest current_runs.sort(key=lambda run: run.date or datetime.min, reverse=True) + # The solution below requires the above sort to happen (dashboard also uses the same order). + # + # Don't write "env" & "command" fields for all runs, but first 10 - it spams the output file. + # In dashboard we use only newest commands anyway. We actually pick command for each "label" + # from one of the newest runs; number 10 was picked arbitrarily as most likely all available + # benchmarks was within the last 10 runs (a few Baselines plus potentially a few custom runs). + for run in current_runs[10:]: + for result in run.results: + result.command = [] + result.env = {} + # Create the comprehensive output object output = BenchmarkOutput( runs=current_runs, diff --git a/devops/scripts/benchmarks/tests/test_integration.py b/devops/scripts/benchmarks/tests/test_integration.py index 8cc6cff2bd09f..5149007b16388 100644 --- a/devops/scripts/benchmarks/tests/test_integration.py +++ b/devops/scripts/benchmarks/tests/test_integration.py @@ -20,9 +20,7 @@ DataJson = namedtuple("DataJson", ["runs", "metadata", "tags", "names"]) DataJsonRun = namedtuple("DataJsonRun", ["name", "results"]) -DataJsonResult = namedtuple( - "DataJsonResult", ["name", "label", "suite", "value", "unit"] -) +DataJsonResult = namedtuple("DataJsonResult", ["label", "suite", "value", "unit"]) DataJsonMetatdata = namedtuple( "DataJsonMetatdata", [ @@ -112,7 +110,6 @@ def get_benchmark_output_data(self): name=run["name"], results=[ DataJsonResult( - name=r["name"], label=r["label"], suite=r["suite"], value=r["value"], @@ -167,7 +164,7 @@ def _checkGroup( self.assertEqual(groupMetadata.type, "group") def _checkResultsExist(self, caseName: str, out: DataJson): - self.assertIn(caseName, [r.name for r in out.runs[0].results]) + self.assertIn(caseName, [r.label for r in out.runs[0].results]) def _checkExistsInProcessOutput( self, proc: subprocess.CompletedProcess, expected: str diff --git a/devops/scripts/benchmarks/utils/platform.py b/devops/scripts/benchmarks/utils/platform.py index f134c5f9e34c1..bf432bb508436 100644 --- a/devops/scripts/benchmarks/utils/platform.py +++ b/devops/scripts/benchmarks/utils/platform.py @@ -1,4 +1,4 @@ -# Copyright (C) 2025 Intel Corporation +# Copyright (C) 2025-2026 Intel Corporation # Part of the Unified-Runtime Project, under the Apache License v2.0 with LLVM Exceptions. # See LICENSE.TXT # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception @@ -6,7 +6,6 @@ import platform import subprocess import os -from datetime import datetime from utils.result import Platform from options import options @@ -42,7 +41,11 @@ def get_project_clang_version(bin_dir): [clang_path, "--version"], capture_output=True, text=True ) if result.returncode == 0: - return result.stdout.split("\n")[0] + out = result.stdout.splitlines() + val = out[0] + if len(out) > 1: + val += " " + out[1] + return val except (FileNotFoundError, subprocess.CalledProcessError): pass @@ -98,7 +101,6 @@ def get_compute_runtime_version_detailed(): def get_gpu_info(): """Get GPU information including device list and driver version""" gpu_list = [] - gpu_count = 0 gpu_driver_version = "(unknown)" # Get GPU info from lspci @@ -116,11 +118,8 @@ def get_gpu_info(): if ": " in line: gpu_name = line.split(": ", 1)[1] gpu_list.append(gpu_name) - - gpu_count = len(gpu_list) except Exception: - gpu_list = ["Detection failed"] - gpu_count = 0 + gpu_list = None # Try to get GPU driver version try: @@ -146,7 +145,7 @@ def get_gpu_info(): except Exception: pass - return gpu_list, gpu_count, gpu_driver_version + return gpu_list, gpu_driver_version def get_platform_info() -> Platform: @@ -178,7 +177,7 @@ def get_platform_info() -> Platform: cpu_info = f"Detection failed: {str(e)}" # Get GPU information - gpu_list, gpu_count, gpu_driver_version = get_gpu_info() + gpu_list, gpu_driver_version = get_gpu_info() # Compiler versions - GCC from system, clang project-built gcc_version = "gcc (unknown)" @@ -230,12 +229,10 @@ def get_platform_info() -> Platform: level_zero_version = f"{adapter_name} | level-zero (version unknown)" return Platform( - timestamp=datetime.now().isoformat(), os=os_info, python=python_info, cpu_count=cpu_count, cpu_info=cpu_info, - gpu_count=gpu_count, gpu_info=gpu_list, gpu_driver_version=gpu_driver_version, gcc_version=gcc_version, diff --git a/devops/scripts/benchmarks/utils/result.py b/devops/scripts/benchmarks/utils/result.py index 36f2f67e8b44d..9f9f7e25f6aae 100644 --- a/devops/scripts/benchmarks/utils/result.py +++ b/devops/scripts/benchmarks/utils/result.py @@ -1,4 +1,4 @@ -# Copyright (C) 2024-2025 Intel Corporation +# Copyright (C) 2024-2026 Intel Corporation # Part of the Unified-Runtime Project, under the Apache License v2.0 with LLVM Exceptions. # See LICENSE.TXT # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception @@ -11,12 +11,10 @@ @dataclass_json @dataclass class Platform: - timestamp: str = "" os: str = "" python: str = "" cpu_count: int = 0 cpu_info: str = "" - gpu_count: int = 0 gpu_info: list[str] = field(default_factory=list) gpu_driver_version: str = "" # Add GPU driver version gcc_version: str = "" @@ -39,7 +37,6 @@ class Result: git_url: str = "" git_hash: str = "" # values below should not be set by the benchmark - name: str = "" lower_is_better: bool = True suite: str = "Unknown" diff --git a/devops/scripts/benchmarks/utils/utils.py b/devops/scripts/benchmarks/utils/utils.py index 2a2daf07646a3..cde1429b212e1 100644 --- a/devops/scripts/benchmarks/utils/utils.py +++ b/devops/scripts/benchmarks/utils/utils.py @@ -80,13 +80,17 @@ def run( # order is important, we want provided sycl rt libraries to be first if add_sycl: sycl_bin_path = os.path.join(options.sycl, "bin") - env_vars["PATH"] = os.pathsep.join( - filter(None, [sycl_bin_path, env_vars.get("PATH", "")]) - ) sycl_lib_path = os.path.join(options.sycl, "lib") - env_vars["LD_LIBRARY_PATH"] = os.pathsep.join( - filter(None, [sycl_lib_path, env_vars.get("LD_LIBRARY_PATH", "")]) - ) + + # add them only if not already added + if sycl_bin_path not in env_vars.get("PATH", ""): + env_vars["PATH"] = os.pathsep.join( + filter(None, [sycl_bin_path, env_vars.get("PATH", "")]) + ) + if sycl_lib_path not in env_vars.get("LD_LIBRARY_PATH", ""): + env_vars["LD_LIBRARY_PATH"] = os.pathsep.join( + filter(None, [sycl_lib_path, env_vars.get("LD_LIBRARY_PATH", "")]) + ) command_str = " ".join(command) env_str = " ".join(f"{key}={value}" for key, value in env_vars.items())