From 2fd651d539faeef97e776f33b3db3aad3cd1b90a Mon Sep 17 00:00:00 2001 From: Cameron Aavik Date: Fri, 26 Sep 2025 10:42:51 +1000 Subject: [PATCH 1/6] Enable strict type checking + python code cleanup --- .vscode/settings.json | 37 +++-- requirements.txt | 1 - scripts/benchmarks_ci.py | 24 ++- scripts/benchmarks_local.py | 33 ++-- scripts/benchmarks_monthly.py | 10 +- scripts/benchmarks_monthly_upload.py | 23 +-- scripts/build_runtime_payload.py | 33 +++- scripts/channel_map.py | 8 +- scripts/ci_setup.py | 33 ++-- scripts/dotnet.py | 184 ++++++++++------------- scripts/micro_benchmarks.py | 42 +++--- scripts/performance/common.py | 80 +++++++--- scripts/performance/logger.py | 19 +-- scripts/performance/tracer.py | 83 ++++++---- scripts/run_performance_job.py | 44 +++--- scripts/send_to_helix.py | 37 ++--- scripts/upload.py | 53 ++++--- src/scenarios/shared/codefixes.py | 9 +- src/scenarios/shared/crossgen.py | 4 +- src/scenarios/shared/mauisharedpython.py | 2 +- src/scenarios/shared/precommands.py | 14 +- src/scenarios/shared/testtraits.py | 3 +- src/scenarios/shared/versionmanager.py | 5 +- 23 files changed, 403 insertions(+), 378 deletions(-) diff --git a/.vscode/settings.json b/.vscode/settings.json index af2389ffc02..537ce8cddcf 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,15 +1,30 @@ { "files.exclude": { - "**/__pycache__/": true, - "**/BenchmarkDotNet.Artifacts/": true, - "**/.vs/": true, - "**/artifacts/": true, - "**/bin/": true, - "**/logs/": true, - "**/obj/": true, - "**/packages/": true, - "/tools/": true, + "**/__pycache__/**": true, + "**/.vs/**": true, + "**/bin/**": true, + "**/obj/**": true, + "**/packages/**": true, + "**/.venv/**": true, + "**/.dotnet/**": true, }, - "python.analysis.typeCheckingMode": "standard", - "python.analysis.extraPaths": ["scripts"] + "files.readonlyInclude": { + "/eng/common/**": true, + }, + "search.exclude": { + "**/artifacts/**": true, + "**/weblarge3.0/**": true, + }, + "python.analysis.typeCheckingMode": "strict", + "python.analysis.diagnosticMode": "workspace", + "python.analysis.exclude": [ + "**/__pycache__/**", + "**/artifacts/**", + "**/.venv/**", + "**/eng/common/**", + "**/weblarge3.0/**" + ], + "python.analysis.extraPaths": [ + "./scripts" + ] } \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index e6622cc82d5..efc66dc1db9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,6 @@ azure.storage.blob==12.13.0 azure.storage.queue==12.4.0 azure.identity==1.16.1 -dataclasses==0.8 gitpython<=3.1.41 urllib3==1.26.19 opentelemetry-api==1.23.0 diff --git a/scripts/benchmarks_ci.py b/scripts/benchmarks_ci.py index ea344a7c2b2..af1891da96a 100755 --- a/scripts/benchmarks_ci.py +++ b/scripts/benchmarks_ci.py @@ -21,14 +21,13 @@ ''' from argparse import ArgumentParser, ArgumentTypeError -from datetime import datetime import json from logging import getLogger import os import shutil import sys -from typing import Any, List, Optional +from typing import Any, Optional from performance.common import get_repo_root_path, validate_supported_runtime, get_artifacts_directory, helixuploadroot from performance.logger import setup_loggers @@ -44,11 +43,11 @@ setup_tracing() tracer = get_tracer() -@tracer.start_as_current_span(name="benchmarks_ci_init_tools") # type: ignore +@tracer.start_as_current_span(name="benchmarks_ci_init_tools") def init_tools( architecture: str, - dotnet_versions: List[str], - target_framework_monikers: List[str], + dotnet_versions: list[str], + target_framework_monikers: list[str], verbose: bool, azure_feed_url: Optional[str] = None, internal_build_key: Optional[str] = None) -> None: @@ -78,9 +77,6 @@ def init_tools( def add_arguments(parser: ArgumentParser) -> ArgumentParser: '''Adds new arguments to the specified ArgumentParser object.''' - if not isinstance(parser, ArgumentParser): - raise TypeError('Invalid parser.') - # Download DotNet Cli dotnet.add_arguments(parser) @@ -199,7 +195,7 @@ def __is_valid_dotnet_path(dp: str) -> str: return parser -def __process_arguments(args: List[str]): +def __process_arguments(args: list[str]): parser = ArgumentParser( description='Tool to run .NET micro benchmarks', allow_abbrev=False, @@ -209,8 +205,8 @@ def __process_arguments(args: List[str]): add_arguments(parser) return parser.parse_args(args) -@tracer.start_as_current_span("benchmarks_ci_main") # type: ignore -def main(argv: List[str]): +@tracer.start_as_current_span("benchmarks_ci_main") +def main(argv: list[str]): validate_supported_runtime() args = __process_arguments(argv) verbose = not args.quiet @@ -223,9 +219,7 @@ def main(argv: List[str]): if not args.frameworks: raise Exception("Framework version (-f) must be specified.") - target_framework_monikers = dotnet \ - .FrameworkAction \ - .get_target_framework_monikers(args.frameworks) + target_framework_monikers = dotnet.get_target_framework_monikers(args.frameworks) # Acquire necessary tools (dotnet) if not args.dotnet_path: init_tools( @@ -303,7 +297,7 @@ def main(argv: List[str]): # Create a combined JSON file that contains all the reports combined_file_prefix = "" if args.partition is None else f"Partition{args.partition}-" with open(os.path.join(helix_upload_root, f"{combined_file_prefix}combined-perf-lab-report.json"), "w", encoding="utf8") as all_reports_file: - all_reports: List[Any] = [] + all_reports: list[Any] = [] for file in glob(reports_globpath, recursive=True): with open(file, 'r', encoding="utf8") as report_file: try: diff --git a/scripts/benchmarks_local.py b/scripts/benchmarks_local.py index 28452606345..747467762b9 100644 --- a/scripts/benchmarks_local.py +++ b/scripts/benchmarks_local.py @@ -3,7 +3,7 @@ import platform import shutil import sys -from typing import List, Optional +from typing import Optional import xml.etree.ElementTree as xmlTree from argparse import ArgumentParser, ArgumentTypeError, Namespace from datetime import datetime @@ -72,10 +72,10 @@ def enum_name_to_enum(enum_type: EnumMeta, enum_name: str): except KeyError as exc: raise ArgumentTypeError(f"Invalid run type name {enum_name}.") from exc -def enum_name_list_to_enum_list(enum_type: EnumMeta, enum_name_list: List[str]): +def enum_name_list_to_enum_list(enum_type: EnumMeta, enum_name_list: list[str]): return [enum_name_to_enum(enum_type, enum_name) for enum_name in enum_name_list] -def check_for_runtype_specified(parsed_args: Namespace, run_types_to_check: List[RunType]) -> bool: +def check_for_runtype_specified(parsed_args: Namespace, run_types_to_check: list[RunType]) -> bool: for run_type in run_types_to_check: if run_type.name in parsed_args.run_type_names: return True @@ -93,7 +93,7 @@ def copy_directory_contents(src_dir: str, dest_dir: str): shutil.copy2(os.path.join(src_dirpath, src_filename), dest_dirpath) # Builds libs and corerun by default -def build_runtime_dependency(parsed_args: Namespace, repo_path: str, subset: str = "clr+libs", configuration: str = "Release", os_override = "", arch_override = "", additional_args: Optional[List[str]] = None): +def build_runtime_dependency(parsed_args: Namespace, repo_path: str, subset: str = "clr+libs", configuration: str = "Release", os_override: str = "", arch_override: str = "", additional_args: Optional[list[str]] = None): if additional_args is None: additional_args = [] @@ -117,13 +117,13 @@ def build_runtime_dependency(parsed_args: Namespace, repo_path: str, subset: str ] + additional_args RunCommand(build_libs_and_corerun_command, verbose=True).run(os.path.join(repo_path, "eng")) -def run_runtime_dotnet(repo_path: str, args: Optional[List[str]] = None): +def run_runtime_dotnet(repo_path: str, args: Optional[list[str]] = None): if args is None: args = [] dotnet_command = ["./dotnet.sh"] + args RunCommand(dotnet_command, verbose=True).run(repo_path) -def generate_layout(parsed_args: Namespace, repo_path: str, additional_args: Optional[List[str]] = None): +def generate_layout(parsed_args: Namespace, repo_path: str, additional_args: Optional[list[str]] = None): if additional_args is None: additional_args = [] @@ -264,10 +264,10 @@ def generate_all_runtype_dependencies(parsed_args: Namespace, repo_path: str, co getLogger().info("Finished generating dependencies for %s run types in %s and stored in %s.", ' '.join(map(str, parsed_args.run_type_names)), repo_path, parsed_args.artifact_storage_path) -def generate_combined_benchmark_ci_args(parsed_args: Namespace, specific_run_type: RunType, all_commits: List[str]) -> List[str]: +def generate_combined_benchmark_ci_args(parsed_args: Namespace, specific_run_type: RunType, all_commits: list[str]) -> list[str]: getLogger().info("Generating benchmark_ci.py arguments for %s run type using artifacts in %s.", specific_run_type.name, parsed_args.artifact_storage_path) bdn_args_unescaped: list[str] = [] - benchmark_ci_args = [ + benchmark_ci_args: list[str] = [ '--architecture', parsed_args.architecture, '--frameworks', parsed_args.framework, '--dotnet-path', parsed_args.dotnet_dir_path, @@ -330,10 +330,10 @@ def generate_combined_benchmark_ci_args(parsed_args: Namespace, specific_run_typ getLogger().info("Finished generating benchmark_ci.py arguments for %s run type using artifacts in %s.", specific_run_type.name, parsed_args.artifact_storage_path) return benchmark_ci_args -def generate_single_benchmark_ci_args(parsed_args: Namespace, specific_run_type: RunType, commit: str) -> List[str]: +def generate_single_benchmark_ci_args(parsed_args: Namespace, specific_run_type: RunType, commit: str) -> list[str]: getLogger().info("Generating benchmark_ci.py arguments for %s run type using artifacts in %s.", specific_run_type.name, parsed_args.artifact_storage_path) bdn_args_unescaped: list[str] = [] - benchmark_ci_args = [ + benchmark_ci_args: list[str] = [ '--architecture', parsed_args.architecture, '--frameworks', parsed_args.framework, '--csproj', parsed_args.csproj, @@ -445,7 +445,7 @@ def generate_artifacts_for_commit(parsed_args: Namespace, repo_url: str, repo_di getLogger().info("Running for %s at %s.", repo_path, commit) if not os.path.exists(repo_path): - repo = Repo.clone_from(repo_url, repo_path) # type: ignore 'Type of "clone_from" is partially unknown', we know it is a method and returns a Repo + repo = Repo.clone_from(repo_url, repo_path) repo.git.checkout(commit, '-f') repo.git.show('HEAD') else: @@ -458,7 +458,7 @@ def generate_artifacts_for_commit(parsed_args: Namespace, repo_url: str, repo_di generate_all_runtype_dependencies(parsed_args, repo_path, commit, (is_local and not parsed_args.skip_local_rebuild) or parsed_args.rebuild_artifacts) # Run tests on the local machine -def run_benchmarks(parsed_args: Namespace, commits: List[str]) -> None: +def run_benchmarks(parsed_args: Namespace, commits: list[str]) -> None: # Generate the correct benchmarks_ci.py arguments for the run type for run_type_meta in enum_name_list_to_enum_list(RunType, parsed_args.run_type_names): # Run the benchmarks_ci.py test and save results @@ -505,7 +505,7 @@ def check_references_exist_and_add_branch_commits(repo_url: str, references: lis repo_combined_path = os.path.join(repo_storage_path, repo_dir) if not os.path.exists(repo_combined_path): getLogger().debug("Cloning %s to %s.", repo_url, repo_combined_path) - repo = Repo.clone_from(repo_url, repo_combined_path) # type: ignore 'Type of "clone_from" is partially unknown', we know it is a method and returns a Repo + repo = Repo.clone_from(repo_url, repo_combined_path) else: repo = Repo(repo_combined_path) repo.remotes.origin.fetch() @@ -564,11 +564,12 @@ def get_default_os(): else: raise NotImplementedError(f"Unsupported operating system: {system}.") -def __main(args: List[str]): +def __main(args: list[str]): # Define the ArgumentParser parser = ArgumentParser(description='Run local benchmarks for the Performance repo.', conflict_handler='resolve') add_arguments(parser) parsed_args = parser.parse_args(args) + assert isinstance(parsed_args.artifact_storage_path, str) parsed_args.dotnet_dir_path = os.path.join(parsed_args.artifact_storage_path, "dotnet") setup_loggers(verbose=parsed_args.verbose) @@ -587,9 +588,9 @@ def __main(args: List[str]): # If list cached builds is specified, list the cached builds and exit if parsed_args.list_cached_builds: - for folder in os.listdir(parsed_args.artifact_storage_path): # type: ignore warning about folder type being unknown, we know it is a string + for folder in os.listdir(parsed_args.artifact_storage_path): if any(run_type.name in folder for run_type in RunType): - getLogger().info(folder) # type: ignore We know folder is a string + getLogger().info(folder) return # Check to make sure we have something specified to test diff --git a/scripts/benchmarks_monthly.py b/scripts/benchmarks_monthly.py index a4d17477292..67a0636a6dd 100644 --- a/scripts/benchmarks_monthly.py +++ b/scripts/benchmarks_monthly.py @@ -6,7 +6,6 @@ monthly manual performance runs. ''' -from typing import Dict, List from performance.common import get_machine_architecture from performance.logger import setup_loggers from argparse import ArgumentParser @@ -36,7 +35,7 @@ 'net6.0': { 'tfm': 'net6.0' } } -def get_version_from_name(name: str) -> Dict[str, str]: +def get_version_from_name(name: str) -> dict[str, str]: if name in VERSIONS: return VERSIONS[name] @@ -45,9 +44,6 @@ def get_version_from_name(name: str) -> Dict[str, str]: def add_arguments(parser: ArgumentParser) -> ArgumentParser: # Adds new arguments to the specified ArgumentParser object. - if not isinstance(parser, ArgumentParser): - raise TypeError('Invalid parser.') - parser.add_argument( 'versions', nargs='+', @@ -114,7 +110,7 @@ def add_arguments(parser: ArgumentParser) -> ArgumentParser: return parser -def __process_arguments(args: List[str]): +def __process_arguments(args: list[str]): parser = ArgumentParser( description='Tool to execute the monthly manual micro benchmark performance runs', allow_abbrev=False @@ -123,7 +119,7 @@ def __process_arguments(args: List[str]): add_arguments(parser) return parser.parse_args(args) -def __main(argv: List[str]): +def __main(argv: list[str]): setup_loggers(verbose=True) args = __process_arguments(argv) diff --git a/scripts/benchmarks_monthly_upload.py b/scripts/benchmarks_monthly_upload.py index dc3ff65af28..4341b80e174 100644 --- a/scripts/benchmarks_monthly_upload.py +++ b/scripts/benchmarks_monthly_upload.py @@ -1,5 +1,5 @@ from json import loads, dumps -from typing import Optional +from typing import Any, Optional from urllib.request import urlopen, Request from urllib.parse import urlencode from urllib.error import HTTPError @@ -34,9 +34,9 @@ def get_token() -> str: if token: try: with urlopen(Request(authDetailsEndpoint, - headers = { "X-ZUMO-AUTH": token })) as response: + headers = { "X-ZUMO-AUTH": token })): print("Using cached credentials.") - except HTTPError as error: + except HTTPError: token = None if not token: @@ -62,7 +62,7 @@ def authenticate() -> str: print(devicecodeResponse["message"]) - authBody2 = { + authBody2: dict[str, Any] = { "tenant": tenantId, "grant_type": "urn:ietf:params:oauth:grant-type:device_code", "client_id": appId, @@ -71,29 +71,30 @@ def authenticate() -> str: authBody2Encoded = urlencode(authBody2).encode() - authStatus = "waiting" print("waiting", end="", flush=True) - while (authStatus == "waiting"): + tokenResponse = None + while True: # Try to get the access token. if we encounter an error check the reason. # If the reason is we are waiting then sleep for some time. # If the reason is the user has declined or we timed out then quit. try: with urlopen(Request(f"{aadUrl}/oauth2/v2.0/token", data = authBody2Encoded)) as response: tokenResponse = loads(response.read().decode('utf-8')) - authStatus = "done" - except Exception as ex: + break + except HTTPError as ex: reason = loads(ex.read().decode('utf-8'))["error"] if reason == "authorization_pending": print(".", end="", flush=True) time.sleep(5) elif reason == "authorization_declined": - authStatus = "failed" + break elif reason == "expired_token": - authStatus = "failed" + break print() - if authStatus == "failed": raise "Authentication failed" + if tokenResponse is None: + raise Exception("Authentication failed") idToken = tokenResponse["id_token"] diff --git a/scripts/build_runtime_payload.py b/scripts/build_runtime_payload.py index 2bc3a25b814..279bc83b3df 100644 --- a/scripts/build_runtime_payload.py +++ b/scripts/build_runtime_payload.py @@ -15,6 +15,7 @@ __all__ = [ "extract_archive_or_copy", "build_coreroot_payload", + "build_coreroot_payload_simple", "build_mono_payload", "build_monoaot_payload", "build_wasm_payload", @@ -64,7 +65,7 @@ def extract_archive_or_copy(archive_path_or_dir: str, dest_dir: str, prefix: Opt if prefix and not prefix.endswith("/") and "/" in prefix: prefix_folder = prefix[:prefix.rfind("/") + 1] - getLogger().debug( + getLogger().info( "extract_archive_or_copy: source=%s dest=%s prefix=%s (folder=%s)", archive_path_or_dir, dest_dir, @@ -118,7 +119,7 @@ def extract_archive_or_copy(archive_path_or_dir: str, dest_dir: str, prefix: Opt os.makedirs(os.path.dirname(output_path), exist_ok=True) source = tar_ref.extractfile(member) if source is not None: - with source and open(output_path, "wb") as target: + with source, open(output_path, "wb") as target: target.write(source.read()) else: raise Exception("Unsupported archive format") @@ -204,6 +205,34 @@ def build_coreroot_payload( ignore=shutil.ignore_patterns("*.pdb"), # Exclude PDBs (not needed in payloads) ) +def build_coreroot_payload_simple( + core_root_dest: str, + tfm: str, + os_group: str, + architecture: str, + coreclr_archive_or_dir: str, + libraries_config: str = "Release" +): + """Generate a CoreCLR `Core_Root` manually. + + This is a simplified version of `build_coreroot_payload` that does not require a clone of the runtime + repository. If the runtime repository is available, build_coreroot_payload should be used instead to ensure + accuracy in case the layout generation code changes. + + This Core_Root can't be used to run the tests in the dotnet/runtime repository as it does not create the targeting + pack, however the targeting pack is not needed to run the performance tests. + """ + libraries_path_segment = f"{tfm}-{os_group}-{libraries_config}-{architecture}" + extract_archive_or_copy(coreclr_archive_or_dir, core_root_dest, prefix=f"runtime/{libraries_path_segment}/") + extract_archive_or_copy(coreclr_archive_or_dir, core_root_dest, prefix=f"native/{libraries_path_segment}/") + extract_archive_or_copy(coreclr_archive_or_dir, core_root_dest, prefix=f"coreclr/{os_group}.{architecture}.{libraries_config}/") + + # chmod +x corerun + if os_group != "windows": + corerun_executable = os.path.join(core_root_dest, "corerun") + if os.path.exists(corerun_executable): + os.chmod(corerun_executable, 0o755) + def build_mono_payload( mono_payload_dst: str, os_group: str, diff --git a/scripts/channel_map.py b/scripts/channel_map.py index 04acdc026b2..10fc98a170f 100644 --- a/scripts/channel_map.py +++ b/scripts/channel_map.py @@ -1,4 +1,4 @@ -from typing import List, Optional, Set +from typing import Optional class ChannelMap(): channel_map = { @@ -174,12 +174,12 @@ class ChannelMap(): } } @staticmethod - def get_supported_channels() -> List[str]: + def get_supported_channels() -> list[str]: '''List of supported channels.''' return list(ChannelMap.channel_map.keys()) @staticmethod - def get_supported_frameworks() -> Set[str]: + def get_supported_frameworks() -> set[str]: '''List of supported frameworks''' frameworks = [ChannelMap.channel_map[channel]['tfm'] for channel in ChannelMap.channel_map] return set(frameworks) @@ -192,7 +192,7 @@ def get_branch(channel: str) -> str: raise Exception('Channel %s is not supported. Supported channels %s' % (channel, ChannelMap.get_supported_channels())) @staticmethod - def get_target_framework_monikers(channels: List[str]) -> List[str]: + def get_target_framework_monikers(channels: list[str]) -> list[str]: ''' Translates channel names to Target Framework Monikers (TFMs). ''' diff --git a/scripts/ci_setup.py b/scripts/ci_setup.py index 462600faffd..18bea0d5b4e 100644 --- a/scripts/ci_setup.py +++ b/scripts/ci_setup.py @@ -5,10 +5,10 @@ import os import sys -import datetime +from datetime import datetime, timezone from subprocess import check_output -from typing import Any, Optional, List +from typing import Optional from performance.common import get_machine_architecture, get_repo_root_path, set_environment_variable from performance.common import get_tools_directory @@ -22,7 +22,7 @@ def init_tools( architecture: str, - dotnet_versions: List[str], + dotnet_versions: list[str], channel: str, verbose: bool, install_dir: Optional[str]=None) -> None: @@ -44,9 +44,6 @@ def init_tools( def add_arguments(parser: ArgumentParser) -> ArgumentParser: '''Adds new arguments to the specified ArgumentParser object.''' - if not isinstance(parser, ArgumentParser): - raise TypeError('Invalid parser.') - # Download DotNet Cli dotnet.add_arguments(parser) @@ -270,7 +267,7 @@ def __is_valid_dotnet_path(dp: str) -> str: return parser -def __process_arguments(args: List[str]): +def __process_arguments(args: list[str]): parser = ArgumentParser( description='Tool to generate a machine setup script', allow_abbrev=False, @@ -290,9 +287,9 @@ def __init__( repository: Optional[str] = None, architecture: str = get_machine_architecture(), dotnet_path: Optional[str] = None, - dotnet_versions: List[str] = [], + dotnet_versions: list[str] = [], install_dir: Optional[str] = None, - build_configs: List[str] = [], + build_configs: list[str] = [], pgo_status: Optional[str] = None, get_perf_hash: bool = False, perf_hash: str = 'testSha', @@ -307,7 +304,7 @@ def __init__( locale: str = 'en-US', maui_version: str = '', affinity: Optional[str] = None, - run_env_vars: Optional[List[str]] = None, + run_env_vars: Optional[list[str]] = None, target_windows: bool = True, physical_promotion_status: Optional[str] = None, r2r_status: Optional[str] = None, @@ -341,7 +338,7 @@ def __init__( self.r2r_status = r2r_status self.experiment_name = experiment_name -def main(args: Any): +def main(args: CiSetupArgs): verbose = not args.quiet setup_loggers(verbose=verbose) @@ -396,7 +393,7 @@ def main(args: Any): # When running on internal repos, the repository comes to us incorrectly # (ie https://github.com/dotnet-coreclr). Replace dashes with slashes in that case. - repo_url = None if use_core_sdk else args.repository.replace('-','/') + repo_url = None if args.repository is None else args.repository.replace('-','/') variable_format = 'set "%s=%s"\n' if args.target_windows else 'export %s="%s"\n' path_variable = 'set PATH=%s;%%PATH%%\n' if args.target_windows else 'export PATH=%s:$PATH\n' @@ -427,7 +424,7 @@ def main(args: Any): with push_dir(get_repo_root_path()): output = check_output(['git', 'rev-parse', 'HEAD']) - decoded_lines: List[str] = [] + decoded_lines: list[str] = [] for line in output.splitlines(): decoded_lines = decoded_lines + [line.decode('utf-8')] @@ -446,15 +443,17 @@ def main(args: Any): os.makedirs(dir_path, exist_ok=True) if not framework.startswith('net4'): - target_framework_moniker = dotnet.FrameworkAction.get_target_framework_moniker(framework) + target_framework_moniker = dotnet.get_target_framework_moniker(framework) dotnet_version = dotnet.get_dotnet_version_precise(target_framework_moniker, args.cli) if args.dotnet_versions == [] else args.dotnet_versions[0] commit_sha = dotnet.get_dotnet_sdk(target_framework_moniker, args.cli) if use_core_sdk else args.commit_sha + assert commit_sha is not None # verified at start of main + if args.local_build: - source_timestamp = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ') + source_timestamp = datetime.now(timezone.utc).strftime('%Y-%m-%dT%H:%M:%SZ') elif(args.commit_time is not None): try: - parsed_timestamp = datetime.datetime.strptime(args.commit_time, '%Y-%m-%d %H:%M:%S %z').astimezone(datetime.timezone.utc) + parsed_timestamp = datetime.strptime(args.commit_time, '%Y-%m-%d %H:%M:%S %z').astimezone(timezone.utc) source_timestamp = parsed_timestamp.strftime('%Y-%m-%dT%H:%M:%SZ') except ValueError: getLogger().warning('Invalid commit_time format. Please use YYYY-MM-DD HH:MM:SS +/-HHMM. Attempting to get commit time from api.github.com.') @@ -513,7 +512,7 @@ def main(args: Any): # The '_Framework' is needed for specifying frameworks in proj files and for building tools later in the pipeline set_environment_variable('PERFLAB_Framework', framework) -def __main(argv: List[str]): +def __main(argv: list[str]): validate_supported_runtime() args = __process_arguments(argv) main(CiSetupArgs(**vars(args))) diff --git a/scripts/dotnet.py b/scripts/dotnet.py index 176218f5eed..d6671a3de0c 100755 --- a/scripts/dotnet.py +++ b/scripts/dotnet.py @@ -4,9 +4,9 @@ Contains the functionality around DotNet Cli. """ -import ssl +import re import datetime -from argparse import Action, ArgumentParser, ArgumentTypeError +from argparse import ArgumentParser, ArgumentTypeError from glob import iglob from logging import getLogger from os import chmod, environ, listdir, makedirs, path, pathsep, system @@ -15,7 +15,7 @@ from stat import S_IRWXU from subprocess import CalledProcessError, check_output from sys import argv, platform -from typing import Any, List, NamedTuple, Optional, Tuple +from typing import Any, NamedTuple, Optional from urllib.error import URLError from urllib.parse import urlparse from urllib.request import urlopen @@ -34,7 +34,7 @@ setup_tracing() tracer = get_tracer() -@tracer.start_as_current_span(name="info") # type: ignore +@tracer.start_as_current_span(name="info") def info(verbose: bool) -> None: """ Executes `dotnet --info` in order to get the .NET Core information from the @@ -43,8 +43,8 @@ def info(verbose: bool) -> None: cmdline = ['dotnet', '--info'] RunCommand(cmdline, verbose=verbose).run() -@tracer.start_as_current_span(name="exec") # type: ignore -def exec(asm_path: str, success_exit_codes: List[int], verbose: bool, *args: str) -> int: +@tracer.start_as_current_span(name="exec") +def exec(asm_path: str, success_exit_codes: list[int], verbose: bool, *args: str) -> int: """ Executes `dotnet exec` which can be used to execute assemblies """ @@ -66,68 +66,49 @@ def __log_script_header(message: str): CSharpProjFile = NamedTuple('CSharpProjFile', file_name=str, working_directory=str) -class FrameworkAction(Action): +@tracer.start_as_current_span("get_target_framework_moniker") +def get_target_framework_moniker(framework: str) -> str: ''' - Used by the ArgumentParser to represent the information needed to parse the - supported .NET frameworks argument from the command line. + Translates framework name to target framework moniker (TFM) + To run NativeAOT benchmarks we need to run the host BDN process as latest + .NET the host process will build and run AOT benchmarks ''' + if framework == 'nativeaot6.0': + return 'net6.0' + if framework == 'nativeaot7.0': + return 'net7.0' + if framework == 'nativeaot8.0': + return 'net8.0' + if framework == 'nativeaot9.0': + return 'net9.0' + if framework == 'nativeaot10.0': + return 'net10.0' + else: + return framework - def __call__(self, parser, namespace, values, option_string=None): - if values: - setattr(namespace, self.dest, list(set(values))) - - @staticmethod - @tracer.start_as_current_span("frameworkaction_get_target_framework_moniker") # type: ignore - def get_target_framework_moniker(framework: str) -> str: - ''' - Translates framework name to target framework moniker (TFM) - To run NativeAOT benchmarks we need to run the host BDN process as latest - .NET the host process will build and run AOT benchmarks - ''' - if framework == 'nativeaot6.0': - return 'net6.0' - if framework == 'nativeaot7.0': - return 'net7.0' - if framework == 'nativeaot8.0': - return 'net8.0' - if framework == 'nativeaot9.0': - return 'net9.0' - if framework == 'nativeaot10.0': - return 'net10.0' - else: - return framework - - @staticmethod - @tracer.start_as_current_span("frameworkaction_get_supported_frameworks") # type: ignore - def get_target_framework_monikers(frameworks: List[str]) -> List[str]: - ''' - Translates framework names to target framework monikers (TFM) - Required to run AOT benchmarks where the host process must be .NET - , not NativeAOT. - ''' - monikers = [ - FrameworkAction.get_target_framework_moniker(framework) - for framework in frameworks - ] - - # ['net6.0', 'nativeaot6.0'] should become ['net6.0'] - return list(set(monikers)) - -class VersionsAction(Action): +@tracer.start_as_current_span("get_supported_frameworks") +def get_target_framework_monikers(frameworks: list[str]) -> list[str]: ''' - Argument parser helper class used to validates the dotnet-versions input. + Translates framework names to target framework monikers (TFM) + Required to run AOT benchmarks where the host process must be .NET + , not NativeAOT. ''' + monikers = [ + get_target_framework_moniker(framework) + for framework in frameworks + ] + + # ['net6.0', 'nativeaot6.0'] should become ['net6.0'] + return list(set(monikers)) - def __call__(self, parser, namespace, values, option_string=None): - if values: - for version in values: - if not search(r'^\d+\.\d+\.\d+', version): - raise ArgumentTypeError( - 'Version "{}" is in the wrong format'.format(version)) - setattr(namespace, self.dest, values) +_VERSION_RE = re.compile(r"\d+\.\d+\.\d+$") +def version_type(value: str) -> str: + if not _VERSION_RE.fullmatch(value): + raise ArgumentTypeError(f'Version "{value}" is in the wrong format') + return value -class CompilationAction(Action): +class CompilationAction: ''' Tiered: (Default) @@ -160,14 +141,8 @@ class CompilationAction(Action): FULLY_JITTED_NO_TIERING = 'FullyJittedNoTiering' MIN_OPT = 'MinOpt' - def __call__(self, parser, namespace, values, option_string=None): - if values: - if values not in CompilationAction.modes(): - raise ArgumentTypeError('Unknown mode: {}'.format(values)) - setattr(namespace, self.dest, values) - @staticmethod - @tracer.start_as_current_span("compilationaction_set_mode") # type: ignore + @tracer.start_as_current_span("compilationaction_set_mode") def __set_mode(mode: str) -> None: # Remove potentially set environments. COMPLUS_ENVIRONMENTS = [ @@ -196,7 +171,7 @@ def __set_mode(mode: str) -> None: raise ArgumentTypeError('Unknown mode: {}'.format(mode)) @staticmethod - @tracer.start_as_current_span("compilationaction_validate") # type: ignore + @tracer.start_as_current_span("compilationaction_validate") def validate(usr_mode: str) -> str: '''Validate user input.''' requested_mode = None @@ -210,8 +185,8 @@ def validate(usr_mode: str) -> str: return requested_mode @staticmethod - @tracer.start_as_current_span("compilationaction_modes") # type: ignore - def modes() -> List[str]: + @tracer.start_as_current_span("compilationaction_modes") + def modes() -> list[str]: '''Available .NET Performance modes.''' return [ CompilationAction.DEFAULT, @@ -222,13 +197,13 @@ def modes() -> List[str]: ] @staticmethod - @tracer.start_as_current_span("compilationaction_noenv") # type: ignore + @tracer.start_as_current_span("compilationaction_noenv") def noenv() -> str: '''Default .NET performance mode.''' return CompilationAction.modes()[0] # No environment set @staticmethod - @tracer.start_as_current_span("compilationaction_help_text") # type: ignore + @tracer.start_as_current_span("compilationaction_help_text") def help_text() -> str: '''Gets the help string describing the different compilation modes.''' return '''Different compilation modes that can be set to change the @@ -302,12 +277,12 @@ def bin_path(self) -> str: '''Gets the directory in which the built binaries will be placed.''' return self.__bin_directory - @tracer.start_as_current_span("csharpproject_restore") # type: ignore + @tracer.start_as_current_span("csharpproject_restore") def restore(self, packages_path: str, verbose: bool, runtime_identifier: Optional[str] = None, - args: Optional[List[str]] = None) -> None: + args: Optional[list[str]] = None) -> None: ''' Calls dotnet to restore the dependencies and tools of the specified project. @@ -337,15 +312,15 @@ def restore(self, RunCommand(cmdline, verbose=verbose, retry=1).run( self.working_directory) - @tracer.start_as_current_span("csharpproject_build") # type: ignore + @tracer.start_as_current_span("csharpproject_build") def build(self, configuration: str, verbose: bool, packages_path: str, - target_framework_monikers: Optional[List[str]] = None, + target_framework_monikers: Optional[list[str]] = None, output_to_bindir: bool = False, runtime_identifier: Optional[str] = None, - args: Optional[List[str]] = None) -> None: + args: Optional[list[str]] = None) -> None: '''Calls dotnet to build the specified project.''' if not target_framework_monikers: # Build all supported frameworks. cmdline = [ @@ -395,7 +370,7 @@ def build(self, RunCommand(cmdline, verbose=verbose).run( self.working_directory) @staticmethod - @tracer.start_as_current_span("csharpproject_new") # type: ignore + @tracer.start_as_current_span("csharpproject_new") def new(template: str, output_dir: str, bin_dir: str, @@ -406,7 +381,7 @@ def new(template: str, language: Optional[str] = None, no_https: bool = False, no_restore: bool = True, - extra_args: Optional[List[str]] = None + extra_args: Optional[list[str]] = None ): ''' Creates a new project with the specified template @@ -446,7 +421,7 @@ def new(template: str, working_directory), bin_dir) - @tracer.start_as_current_span("csharpproject_publish") # type: ignore + @tracer.start_as_current_span("csharpproject_publish") def publish(self, configuration: str, output_dir: str, @@ -454,7 +429,7 @@ def publish(self, packages_path: str, target_framework_moniker: Optional[str] = None, runtime_identifier: Optional[str] = None, - msbuildprops: Optional[List[str]] = None, + msbuildprops: Optional[list[str]] = None, *args: str ) -> None: ''' @@ -485,7 +460,7 @@ def publish(self, self.working_directory ) - def __get_output_build_arg(self, outdir: str) -> List[str]: + def __get_output_build_arg(self, outdir: str) -> list[str]: # dotnet build/publish does not support `--output` with sln files if path.splitext(self.csproj_file)[1] == '.sln': outdir = outdir if path.isabs(outdir) else path.abspath(outdir) @@ -506,11 +481,11 @@ def __print_complus_environment() -> None: getLogger().info(' "%s=%s"', env, environ[env]) getLogger().info('-' * 50) - @tracer.start_as_current_span("csharpproject_run") # type: ignore + @tracer.start_as_current_span("csharpproject_run") def run(self, configuration: str, target_framework_moniker: str, - success_exit_codes: List[int], + success_exit_codes: list[int], verbose: bool, *args: str) -> int: ''' @@ -532,7 +507,7 @@ def run(self, FrameworkVersion = NamedTuple('FrameworkVersion', major=int, minor=int) -@tracer.start_as_current_span("dotnet_get_framework_version") # type: ignore +@tracer.start_as_current_span("dotnet_get_framework_version") def get_framework_version(framework: str) -> FrameworkVersion: groups = search(r".*?(\d+)\.(\d+)$", framework) if not groups: @@ -542,7 +517,7 @@ def get_framework_version(framework: str) -> FrameworkVersion: return version -@tracer.start_as_current_span("dotnet_get_base_path") # type: ignore +@tracer.start_as_current_span("dotnet_get_base_path") def get_base_path(dotnet_path: Optional[str] = None) -> str: """Gets the dotnet Host version from the `dotnet --info` command.""" if not dotnet_path: @@ -578,7 +553,7 @@ def get_dotnet_path() -> str: dotnet_path = path.abspath(path.join(base_path, '..', '..')) return dotnet_path -@tracer.start_as_current_span("dotnet_get_dotnet_version_from_path") # type: ignore +@tracer.start_as_current_span("dotnet_get_dotnet_version_from_path") def get_dotnet_version_from_path( framework: str, dotnet_path: Optional[str] = None, @@ -614,7 +589,7 @@ def get_dotnet_version_from_path( return sdk -@tracer.start_as_current_span("dotnet_get_dotnet_version_precise") # type: ignore +@tracer.start_as_current_span("dotnet_get_dotnet_version_precise") def get_dotnet_version_precise( framework: str, dotnet_path: Optional[str] = None, @@ -627,7 +602,7 @@ def get_dotnet_version_precise( with open(path.join(sdk_path, sdk, '.version')) as sdk_version_file: return sdk_version_file.readlines()[3].strip() -@tracer.start_as_current_span("dotnet_get_dotnet_sdk") # type: ignore +@tracer.start_as_current_span("dotnet_get_dotnet_sdk") def get_dotnet_sdk( framework: str, dotnet_path: Optional[str] = None, @@ -639,8 +614,8 @@ def get_dotnet_sdk( with open(path.join(sdk_path, sdk, '.version')) as sdk_version_file: return sdk_version_file.readline().strip() -@tracer.start_as_current_span("dotnet_get_repository") # type: ignore -def get_repository(repository: str) -> Tuple[str, str]: +@tracer.start_as_current_span("dotnet_get_repository") +def get_repository(repository: str) -> tuple[str, str]: url_path = urlparse(repository).path tokens = url_path.split("/") if len(tokens) != 3: @@ -650,7 +625,7 @@ def get_repository(repository: str) -> Tuple[str, str]: return owner, repo -@tracer.start_as_current_span("dotnet_get_commit_date") # type: ignore +@tracer.start_as_current_span("dotnet_get_commit_date") def get_commit_date( framework: str, commit_sha: str, @@ -670,7 +645,7 @@ def get_commit_date( if repository is None: core_sdk_frameworks = ChannelMap.get_supported_frameworks() - urls = [] + urls: list[str] = [] if framework in core_sdk_frameworks: # Try dotnet/dotnet first, then dotnet/sdk, then dotnet/core-sdk @@ -775,7 +750,7 @@ def __get_directory(architecture: str) -> str: '''Gets the default directory where dotnet is to be installed.''' return path.join(get_tools_directory(), 'dotnet', architecture) -@tracer.start_as_current_span("dotnet_remove_dotnet") # type: ignore +@tracer.start_as_current_span("dotnet_remove_dotnet") def remove_dotnet(architecture: str) -> None: ''' Removes the dotnet installed in the tools directory associated with the @@ -785,7 +760,7 @@ def remove_dotnet(architecture: str) -> None: if path.isdir(dotnet_path): rmtree(dotnet_path) -@tracer.start_as_current_span("dotnet_shutdown_server") # type: ignore +@tracer.start_as_current_span("dotnet_shutdown_server") def shutdown_server(verbose:bool) -> None: ''' Shuts down the dotnet server @@ -804,11 +779,11 @@ def shutdown_server(verbose:bool) -> None: else: system('killall -9 dotnet 2> /dev/null || killall -9 VSTest.Console 2> /dev/null || killall -9 msbuild 2> /dev/null') -@tracer.start_as_current_span("dotnet_install") # type: ignore +@tracer.start_as_current_span("dotnet_install") def install( architecture: str, - channels: List[str], - versions: List[str], + channels: list[str], + versions: list[str], verbose: bool, install_dir: Optional[str] = None, azure_feed_url: Optional[str] = None, @@ -837,7 +812,7 @@ def install( max_count = 10 while count < max_count: try: - with urlopen(dotnetInstallScriptUrl, context=ssl._create_unverified_context()) as response: + with urlopen(dotnetInstallScriptUrl) as response: if "html" in response.info()['Content-Type']: count = count + 1 sleep(count ** 2) @@ -908,7 +883,7 @@ def install( setup_dotnet(install_dir) -@tracer.start_as_current_span(name="dotnet_setup_dotnet") # type: ignore +@tracer.start_as_current_span(name="dotnet_setup_dotnet") def setup_dotnet(dotnet_path: str): # Set DotNet Cli environment variables. environ['DOTNET_CLI_TELEMETRY_OPTOUT'] = '1' @@ -929,9 +904,6 @@ def __add_arguments(parser: ArgumentParser) -> ArgumentParser: Adds new arguments to the specified ArgumentParser object. ''' - if not isinstance(parser, ArgumentParser): - raise TypeError('Invalid parser.') - SUPPORTED_ARCHITECTURES = [ 'x64', 'x86', @@ -953,7 +925,7 @@ def __add_arguments(parser: ArgumentParser) -> ArgumentParser: required=False, nargs='+', default=[], - action=VersionsAction, + type=version_type, help='Version of the dotnet cli to install in the A.B.C format' ) @@ -968,7 +940,7 @@ def add_arguments(parser: ArgumentParser) -> ArgumentParser: return parser -def __process_arguments(args: List[str]) -> Any: +def __process_arguments(args: list[str]) -> Any: parser = ArgumentParser( description='DotNet Cli wrapper.', allow_abbrev=False @@ -1018,7 +990,7 @@ def __process_arguments(args: List[str]) -> Any: return parser.parse_args(args) -def __main(argv: List[str]) -> None: +def __main(argv: list[str]) -> None: validate_supported_runtime() args = __process_arguments(argv) setup_loggers(verbose=args.verbose) diff --git a/scripts/micro_benchmarks.py b/scripts/micro_benchmarks.py index 086b426706a..40a4b821e55 100755 --- a/scripts/micro_benchmarks.py +++ b/scripts/micro_benchmarks.py @@ -12,7 +12,7 @@ from os import path from subprocess import CalledProcessError from traceback import format_exc -from typing import Any, List +from typing import Any import csv import sys @@ -31,8 +31,8 @@ setup_tracing() tracer = get_tracer() -@tracer.start_as_current_span(name="micro_benchmarks_get_supported_configurations") # type: ignore -def get_supported_configurations() -> List[str]: +@tracer.start_as_current_span(name="micro_benchmarks_get_supported_configurations") +def get_supported_configurations() -> list[str]: ''' The configuration to use for building the project. The default for most projects is 'Release' @@ -124,7 +124,7 @@ def __valid_file_path(file_path: str) -> str: help='Full path to dotnet.exe', ) - def __get_bdn_arguments(user_input: str) -> List[str]: + def __get_bdn_arguments(user_input: str) -> list[str]: file = StringIO(user_input) reader = csv.reader(file, delimiter=' ') for args in reader: @@ -167,13 +167,6 @@ def __get_bdn_arguments(user_input: str) -> List[str]: help='Move the binaries to a different directory for running', ) - def __valid_dir_path(file_path: str) -> str: - '''Verifies that specified file path exists.''' - file_path = path.abspath(file_path) - if not path.isdir(file_path): - raise ArgumentTypeError('{} does not exist.'.format(file_path)) - return file_path - def __csproj_file_path(file_path: str) -> dotnet.CSharpProjFile: file_path = __valid_file_path(file_path) return dotnet.CSharpProjFile( @@ -209,7 +202,7 @@ def __absolute_path(file_path: str) -> str: '--bin-directory', dest='bin_directory', required=False, - default=path.join(get_repo_root_path(), 'artifacts', 'bin'), + default=path.join(get_artifacts_directory(), 'bin'), type=__absolute_path, help='Root of the bin directory', ) @@ -217,7 +210,7 @@ def __absolute_path(file_path: str) -> str: return parser -def __process_arguments(args: List[str]): +def __process_arguments(args: list[str]): parser = ArgumentParser( description="Builds the benchmarks.", allow_abbrev=False) @@ -234,8 +227,8 @@ def __process_arguments(args: List[str]): return parser.parse_args(args) -def __get_benchmarkdotnet_arguments(framework: str, args: Any) -> List[str]: - run_args: List[str] = [] +def __get_benchmarkdotnet_arguments(framework: str, args: Any) -> list[str]: + run_args: list[str] = [] if args.corerun: run_args += ['--coreRun'] + args.corerun if args.cli: @@ -285,7 +278,7 @@ def __get_benchmarkdotnet_arguments(framework: str, args: Any) -> List[str]: return run_args -@tracer.start_as_current_span(name="micro_benchmarks_get_bin_dir_to_use") # type: ignore +@tracer.start_as_current_span(name="micro_benchmarks_get_bin_dir_to_use") def get_bin_dir_to_use(csprojfile: dotnet.CSharpProjFile, bin_directory: str, run_isolated: bool) -> str: ''' Gets the bin_directory, which might be different if run_isolate=True @@ -295,11 +288,11 @@ def get_bin_dir_to_use(csprojfile: dotnet.CSharpProjFile, bin_directory: str, ru else: return bin_directory -@tracer.start_as_current_span(name="micro_benchmarks_build") # type: ignore +@tracer.start_as_current_span(name="micro_benchmarks_build") def build( BENCHMARKS_CSPROJ: dotnet.CSharpProject, configuration: str, - target_framework_monikers: List[str], + target_framework_monikers: list[str], incremental: str, run_isolated: bool, for_wasm: bool, @@ -321,7 +314,7 @@ def build( __log_script_header("Restoring .NET micro benchmarks") BENCHMARKS_CSPROJ.restore(packages_path=packages, verbose=verbose) - build_args: List[str] = [] + build_args: list[str] = [] if for_wasm: build_args += ['/p:BuildingForWasm=true'] @@ -344,7 +337,7 @@ def build( objDir = path.join(get_artifacts_directory(), 'obj', BENCHMARKS_CSPROJ.project_name) remove_directory(objDir) -@tracer.start_as_current_span(name="micro_benchmarks_run") # type: ignore +@tracer.start_as_current_span(name="micro_benchmarks_run") def run( BENCHMARKS_CSPROJ: dotnet.CSharpProject, configuration: str, @@ -359,7 +352,7 @@ def run( # dotnet exec run_args = __get_benchmarkdotnet_arguments(framework, args) - target_framework_moniker = dotnet.FrameworkAction.get_target_framework_moniker( + target_framework_moniker = dotnet.get_target_framework_moniker( framework ) @@ -388,8 +381,8 @@ def __log_script_header(message: str): getLogger().info(message) getLogger().info('-' * len(message)) -@tracer.start_as_current_span("micro_benchmarks_main") # type: ignore -def __main(argv: List[str]) -> int: +@tracer.start_as_current_span("micro_benchmarks_main") +def __main(argv: list[str]) -> int: try: validate_supported_runtime() args = __process_arguments(argv) @@ -398,8 +391,7 @@ def __main(argv: List[str]) -> int: frameworks = args.frameworks incremental = args.incremental verbose = args.verbose - target_framework_monikers = dotnet.FrameworkAction. \ - get_target_framework_monikers(frameworks) + target_framework_monikers = dotnet.get_target_framework_monikers(frameworks) setup_loggers(verbose=verbose) diff --git a/scripts/performance/common.py b/scripts/performance/common.py index 80ace0c09fc..0c27dde045c 100644 --- a/scripts/performance/common.py +++ b/scripts/performance/common.py @@ -18,7 +18,7 @@ import sys import time import base64 -from typing import Callable, List, Optional, Tuple, Type, TypeVar +from typing import Any, Callable, Optional, TypeVar def get_machine_architecture(): @@ -46,14 +46,14 @@ def extension(): return '.exe' if iswin() else '' def __is_supported_version() -> bool: - '''Checks if the script is running on the supported version (>=3.5).''' - return sys.version_info >= (3, 5) + '''Checks if the script is running on the supported version (>=3.9).''' + return sys.version_info >= (3, 9) def validate_supported_runtime(): '''Raises a RuntimeError exception when the runtime is not supported.''' if not __is_supported_version(): - raise RuntimeError('Python 3.5 or newer is required.') + raise RuntimeError('Python 3.9 or newer is required.') def get_python_executable() -> str: @@ -76,11 +76,9 @@ def remove_directory(path: str) -> None: '''Recursively deletes a directory tree.''' if not path: raise TypeError('Undefined path.') - if not isinstance(path, str): - raise TypeError('Invalid type.') if os.path.isdir(path): - def handle_rmtree_errors(func: Callable[[str], None], path: str, excinfo: Exception): + def handle_rmtree_errors(func: Callable[[str], None], path: str, excinfo: Any): """ Helper function to handle long path errors on Windows. """ @@ -168,11 +166,11 @@ def push_dir(path: Optional[str] = None): TRet = TypeVar('TRet') def retry_on_exception( function: Callable[[], TRet], - retry_count = 3, - retry_delay = 5, - retry_delay_multiplier = 1, - retry_exceptions: List[Type[Exception]]=[Exception], - raise_exceptions: List[Type[Exception]]=[]) -> Optional[TRet]: + retry_count: int = 3, + retry_delay: float = 5, + retry_delay_multiplier: float = 1, + retry_exceptions: list[type[Exception]]=[Exception], + raise_exceptions: list[type[Exception]]=[]) -> Optional[TRet]: ''' Retries the specified function if it throws an exception. @@ -209,12 +207,12 @@ def retry_on_exception( time.sleep(retry_delay) retry_delay *= retry_delay_multiplier -def get_certificates() -> List[str]: +def get_certificates() -> list[str]: ''' Gets the certificates from the certhelper tool and on Mac uses find-certificate. ''' if ismac(): - certs: List[str] = [] + certs: list[str] = [] with open("/Users/helix-runner/certs/LabCert1.pfx", "rb") as f: certs.append(base64.b64encode(f.read()).decode()) with open("/Users/helix-runner/certs/LabCert2.pfx", "rb") as f: @@ -224,8 +222,7 @@ def get_certificates() -> List[str]: cmd_line = [(os.path.join(str(helixpayload()), 'certhelper', "CertHelper%s" % extension()))] cert_helper = RunCommand(cmd_line, None, True, False, 0) try: - cert_helper.run() - return cert_helper.stdout.splitlines() + return cert_helper.run_and_get_stdout().splitlines() except Exception as ex: getLogger().error("Failed to get certificates") getLogger().error('{0}: {1}'.format(type(ex), str(ex))) @@ -253,6 +250,28 @@ def set_environment_variable(name: str, value: str, save_to_pipeline: bool = Tru __write_pipeline_variable(name, value) os.environ[name] = value +def run_msbuild_command(args: list[str], verbose: bool=True, warn_as_error: bool=True, perf_repo_dir: Optional[str] = None) -> str: + if perf_repo_dir is None: + perf_repo_dir = get_repo_root_path() + msbuild_dir = os.path.join(perf_repo_dir, 'eng', 'common') + + if iswin(): + cmdline = ["powershell.exe", os.path.join(msbuild_dir, "msbuild.ps1")] + if not warn_as_error: + cmdline += ["-warnaserror", "0"] + else: + msbuild_sh_path = os.path.join(msbuild_dir, "msbuild.sh") + RunCommand(["chmod", "+x", msbuild_sh_path]).run() + cmdline = [msbuild_sh_path] + if not warn_as_error: + cmdline += ["--warnaserror", "false"] + + cmdline += args + return RunCommand(cmdline, verbose=verbose).run_and_get_stdout().strip() + +def get_msbuild_property(props_path: str, property_name: str) -> str: + return run_msbuild_command([props_path, f"/getProperty:{property_name}"]) + class RunCommand: ''' This is a class wrapper around `subprocess.Popen` with an additional set @@ -261,13 +280,11 @@ class RunCommand: def __init__( self, - cmdline: List[str], - success_exit_codes: Optional[List[int]] = None, + cmdline: list[str], + success_exit_codes: Optional[list[int]] = None, verbose: bool = False, echo: bool = True, retry: int = 0): - if cmdline is None: - raise TypeError('Unspecified command line to be executed.') if not cmdline: raise ValueError('Specified command line is empty.') @@ -282,12 +299,12 @@ def __init__( self.__success_exit_codes = success_exit_codes @property - def cmdline(self) -> List[str]: + def cmdline(self) -> list[str]: '''Command-line to use when starting the application.''' return self.__cmdline @property - def success_exit_codes(self) -> List[int]: + def success_exit_codes(self) -> list[int]: ''' The successful exit codes that the associated process specifies when it terminated. @@ -308,7 +325,7 @@ def verbose(self) -> bool: def stdout(self) -> str: return self.__stdout.getvalue() - def __runinternal(self, working_directory: Optional[str] = None) -> Tuple[int, str]: + def __runinternal(self, working_directory: Optional[str] = None) -> tuple[int, str]: should_pipe = self.verbose with push_dir(working_directory): quoted_cmdline = '$ ' @@ -356,3 +373,20 @@ def run(self, working_directory: Optional[str] = None) -> int: returncode, quoted_cmdline) return returncode + + def run_and_get_stdout(self, working_directory: Optional[str] = None) -> str: + '''Executes specified shell command and returns its stdout.''' + prev_verbose, prev_echo = self.__verbose, self.__echo + + # stdout only is logged if verbose is enabled, so we temporarily enable it but with echo disabled + if not self.__verbose: + self.__verbose = True + self.__echo = False + + try: + self.run(working_directory) + finally: + self.__verbose = prev_verbose + self.__echo = prev_echo + + return self.stdout \ No newline at end of file diff --git a/scripts/performance/logger.py b/scripts/performance/logger.py index dcb303c1c50..b5f03c44823 100644 --- a/scripts/performance/logger.py +++ b/scripts/performance/logger.py @@ -17,23 +17,12 @@ class LoggerStateManager: def __init__(self): self.logger_initialized = False - self.logger_opentelemetry_imported = False def set_initialized(self, value: bool): self.logger_initialized = value - def set_opentelemetry_imported(self, value: bool): self.logger_opentelemetry_imported = value def get_initialized(self) -> bool: return self.logger_initialized - def get_opentelemetry_imported(self) -> bool: return self.logger_opentelemetry_imported logger_state_manager = LoggerStateManager() -try: - from opentelemetry._logs import set_logger_provider - from opentelemetry.sdk._logs import LoggerProvider, LoggingHandler - from opentelemetry.sdk._logs.export import BatchLogRecordProcessor, ConsoleLogExporter - logger_state_manager.set_opentelemetry_imported(True) -except ImportError: - pass - def setup_loggers(verbose: bool, enable_open_telemetry_logger: bool = False): '''Setup the root logger for the performance scripts.''' def __formatter() -> Formatter: @@ -51,7 +40,11 @@ def __initialize(verbose: bool): getLogger().addHandler(__get_console_handler(verbose)) if enable_open_telemetry_logger: - if logger_state_manager.get_opentelemetry_imported(): + try: + from opentelemetry._logs import set_logger_provider # pyright: ignore[reportMissingTypeStubs] + from opentelemetry.sdk._logs import LoggerProvider, LoggingHandler + from opentelemetry.sdk._logs.export import BatchLogRecordProcessor, ConsoleLogExporter + logger_provider = LoggerProvider() set_logger_provider(logger_provider) logger_provider.add_log_record_processor(BatchLogRecordProcessor(ConsoleLogExporter())) @@ -59,7 +52,7 @@ def __initialize(verbose: bool): # Attach OTel handler to logger getLogger().addHandler(handler) - else: + except ImportError: getLogger().warning('OpenTelemetry not imported. Skipping OpenTelemetry logger initialization.') # Log file handler diff --git a/scripts/performance/tracer.py b/scripts/performance/tracer.py index 064b20d0560..0628f8ce328 100644 --- a/scripts/performance/tracer.py +++ b/scripts/performance/tracer.py @@ -1,56 +1,57 @@ +import functools from logging import getLogger +from typing import TYPE_CHECKING, Callable, Optional, TypeVar +from typing_extensions import ParamSpec class TracingStateManager: '''A class to manage the state of tracing.''' def __init__(self): self.trace_provider_initialized = False self.trace_provider_console_exporter_enabled = False - self.trace_opentelemetry_imported = False def set_initialized(self, value : bool): self.trace_provider_initialized = value def set_console_exporter_enabled(self, value : bool): self.trace_provider_console_exporter_enabled = value - def set_opentelemetry_imported(self, value : bool): self.trace_opentelemetry_imported = value def get_initialized(self): return self.trace_provider_initialized def get_console_exporter_enabled(self): return self.trace_provider_console_exporter_enabled - def get_opentelemetry_imported(self): return self.trace_opentelemetry_imported tracing_state_manager = TracingStateManager() -try: - from opentelemetry import trace - from opentelemetry.sdk.trace import TracerProvider - from opentelemetry.sdk.trace.export import BatchSpanProcessor, ConsoleSpanExporter - tracing_state_manager.set_opentelemetry_imported(True) - -except ImportError: - pass def setup_tracing(): '''Set up the OpenTelemetry trace provider.''' - if tracing_state_manager.get_initialized() or not tracing_state_manager.get_opentelemetry_imported(): + if tracing_state_manager.get_initialized(): + return + + try: + from opentelemetry import trace + from opentelemetry.sdk.trace import TracerProvider + except: return + provider = TracerProvider() trace.set_tracer_provider(provider) tracing_state_manager.set_initialized(True) def enable_trace_console_exporter(): '''Enable the console exporter for trace spans.''' - if tracing_state_manager.get_console_exporter_enabled() or not tracing_state_manager.get_opentelemetry_imported(): - if not tracing_state_manager.get_opentelemetry_imported(): - getLogger().warning('OpenTelemetry not imported. Skipping OpenTelemetry console logger initialization.') + if tracing_state_manager.get_console_exporter_enabled(): + return + + try: + from opentelemetry import trace + from opentelemetry.sdk.trace.export import BatchSpanProcessor, ConsoleSpanExporter + except: + getLogger().warning('OpenTelemetry not imported. Skipping OpenTelemetry console logger initialization.') return + provider = trace.get_tracer_provider() processor = BatchSpanProcessor(ConsoleSpanExporter()) - provider.add_span_processor(processor) + provider.add_span_processor(processor) # pyright: ignore[reportUnknownMemberType, reportAttributeAccessIssue] -- exists but not in type stubs tracing_state_manager.set_console_exporter_enabled(True) -def get_tracer(name="dotnet.performance"): +def get_tracer(name: str="dotnet.performance"): '''Return a tracer with the specified name.''' return AwareTracer(name) -def is_opentelemetry_imported() -> bool: - '''Return whether OpenTelemetry has been imported.''' - return tracing_state_manager.get_opentelemetry_imported() - def is_provider_initialized() -> bool: '''Return whether the trace provider has been initialized.''' return tracing_state_manager.get_initialized() @@ -59,17 +60,26 @@ def is_console_exporter_enabled() -> bool: '''Return whether the console exporter has been enabled.''' return tracing_state_manager.get_console_exporter_enabled() +P = ParamSpec("P") +R = TypeVar("R") class AwareTracer: """ A OpenTelemetry aware tracer implementation that is used as a wrapper for OpenTelemetry calls where OpenTelemetry is not guaranteed be installed. When not installed, the tracer is a no-op and the decorated functions are executed as if the decorator was not there.. """ - tracer = None - def __init__(self, name="dotnet.performance"): - if is_opentelemetry_imported(): - self.tracer = trace.get_tracer(name) + def __init__(self, name: str = "dotnet.performance") -> None: + if TYPE_CHECKING: + from opentelemetry.trace import Tracer # pyright: ignore[reportMissingTypeStubs] + self._tracer: Optional[Tracer] + + try: + from opentelemetry import trace + except ImportError: + self._tracer = None + else: + self._tracer = trace.get_tracer(name) - def start_as_current_span(self, *top_args, **top_kwargs): + def start_as_current_span(self, name: str) -> Callable[[Callable[P, R]], Callable[P, R]]: """ Decorator that starts a new span as the current span if OpenTelemetry is imported. If OpenTelemetry is not imported, the function is executed without starting a new span. @@ -81,13 +91,20 @@ def start_as_current_span(self, *top_args, **top_kwargs): Returns: The result of executing the decorated function. """ - def decorator(func): - def wrapper(*args, **kwargs): - if self.tracer is not None: - with self.tracer.start_as_current_span(*top_args, **top_kwargs): - return func(*args, **kwargs) - else: + def decorator(func: Callable[P, R]) -> Callable[P, R]: + # If no tracer, just return the original function unchanged (best for typing & introspection). + if self._tracer is None: + return func + + @functools.wraps(func) + def wrapped(*args: P.args, **kwargs: P.kwargs) -> R: + if self._tracer is None: return func(*args, **kwargs) - return wrapper + + with self._tracer.start_as_current_span(name): + return func(*args, **kwargs) + + return wrapped + return decorator \ No newline at end of file diff --git a/scripts/run_performance_job.py b/scripts/run_performance_job.py index 64d18b14c2f..07bbde45d7e 100644 --- a/scripts/run_performance_job.py +++ b/scripts/run_performance_job.py @@ -1,5 +1,4 @@ from logging import getLogger -import re from dataclasses import dataclass, field from datetime import timedelta from glob import glob @@ -12,15 +11,15 @@ from traceback import format_exc import urllib.request import xml.etree.ElementTree as ET -from typing import Any, Dict, List, Optional +from typing import Any, Optional from build_runtime_payload import * import ci_setup -from performance.common import RunCommand, set_environment_variable +from performance.common import RunCommand, get_msbuild_property, set_environment_variable from performance.logger import setup_loggers from send_to_helix import PerfSendToHelixArgs, perf_send_to_helix -def output_counters_for_crank(reports: List[Any]): +def output_counters_for_crank(reports: list[Any]): print("#StartJobStatistics") statistics: dict[str, list[Any]] = { @@ -92,7 +91,7 @@ class RunPerformanceJobArgs: linking_type: str = "dynamic" runtime_type: str = "coreclr" affinity: Optional[str] = "0" - run_env_vars: Dict[str, str] = field(default_factory=dict) # type: ignore + run_env_vars: dict[str, str] = field(default_factory=dict[str, str]) is_scenario: bool = False runtime_flavor: Optional[str] = None local_build: bool = False @@ -264,12 +263,15 @@ def get_pre_commands( return helix_pre_commands -def get_post_commands(os_group: str, runtime_type: str): +def get_post_commands(os_group: str, internal: bool, runtime_type: str): if os_group == "windows": helix_post_commands = ["set PYTHONPATH=%ORIGPYPATH%"] else: helix_post_commands = ["export PYTHONPATH=$ORIGPYPATH"] + if internal: + helix_post_commands += ["deactivate"] # deactivate venv + if runtime_type == "wasm" and os_group != "windows": helix_post_commands += [ """test -d "$HELIX_WORKITEM_UPLOAD_ROOT" && ( @@ -670,8 +672,7 @@ def run_performance_job(args: RunPerformanceJobArgs): if args.use_local_commit_time: get_commit_time_command = RunCommand(["git", "show", "-s", "--format=%ci", args.perf_repo_hash], verbose=True) - get_commit_time_command.run(args.runtime_repo_dir) - ci_setup_arguments.commit_time = f"{get_commit_time_command.stdout.strip()}" + ci_setup_arguments.commit_time = get_commit_time_command.run_and_get_stdout(args.runtime_repo_dir).strip() # not_in_lab should stay False for internal dotnet performance CI runs if not args.internal and not args.performance_repo_ci: @@ -687,13 +688,8 @@ def run_performance_job(args: RunPerformanceJobArgs): raise Exception("Please provide either the product version, a path to Versions.props, or a runtime repo directory") args.versions_props_path = os.path.join(args.runtime_repo_dir, "eng", "Versions.props") - with open(args.versions_props_path) as f: - for line in f: - match = re.search(r"ProductVersion>([^<]*)<", line) - if match: - product_version = match.group(1) - break - if product_version is None: + product_version = get_msbuild_property(args.versions_props_path, "ProductVersion") + if not product_version: raise Exception("Unable to find ProductVersion in Versions.props") mono_dotnet_path = os.path.join(payload_dir, "dotnet-mono") @@ -726,15 +722,9 @@ def run_performance_job(args: RunPerformanceJobArgs): raise Exception("BrowserVersions.props must be present for wasm runs") args.browser_versions_props_path = os.path.join(args.runtime_repo_dir, "eng", "testing", "BrowserVersions.props") - with open(args.browser_versions_props_path) as f: - for line in f: - match = re.search(r"linux_V8Version>([^<]*)<", line) - if match: - v8_version = match.group(1) - v8_version = ".".join(v8_version.split(".")[:3]) - break - else: - raise Exception("Unable to find v8 version in BrowserVersions.props") + v8_version = get_msbuild_property(args.browser_versions_props_path, "linux_V8Version") + if not v8_version: + raise Exception("Unable to find v8 version in BrowserVersions.props") if args.javascript_engine_path is None: args.javascript_engine_path = f"/home/helixbot/.jsvu/bin/v8-{v8_version}" @@ -879,7 +869,7 @@ def run_performance_job(args: RunPerformanceJobArgs): agent_python = "python3" helix_pre_commands = get_pre_commands(args.os_group, args.internal, args.runtime_type, args.codegen_type, v8_version) - helix_post_commands = get_post_commands(args.os_group, args.runtime_type) + helix_post_commands = get_post_commands(args.os_group, args.internal, args.runtime_type) ci_setup_arguments.local_build = args.local_build @@ -931,7 +921,7 @@ def run_performance_job(args: RunPerformanceJobArgs): dotnet_executable_path = os.path.join(ci_setup_arguments.dotnet_path or ci_setup_arguments.install_dir, "dotnet") ci_artifacts_log_dir = os.path.join(args.performance_repo_dir, 'artifacts', 'log', build_config) - def publish_dotnet_app_to_payload(payload_dir_name, csproj_path, self_contained=True): + def publish_dotnet_app_to_payload(payload_dir_name: str, csproj_path: str, self_contained: bool = True): RunCommand([ dotnet_executable_path, "publish", "-c", "Release", @@ -1178,7 +1168,7 @@ def get_work_item_command_for_artifact_dir(artifact_dir: str): -def main(argv: List[str]): +def main(argv: list[str]): setup_loggers(verbose=True) try: diff --git a/scripts/send_to_helix.py b/scripts/send_to_helix.py index 45b1f0a2068..f3266623e3a 100644 --- a/scripts/send_to_helix.py +++ b/scripts/send_to_helix.py @@ -1,8 +1,8 @@ -from typing import List, Optional, Union +from typing import Optional, Union from dataclasses import dataclass, field from datetime import timedelta import os -from performance.common import RunCommand, iswin, set_environment_variable +from performance.common import run_msbuild_command, set_environment_variable @dataclass class PerfSendToHelixArgs: @@ -24,7 +24,7 @@ class PerfSendToHelixArgs: helix_type: str = "tests/default/" build_config: str = "" helix_build: str = os.environ.get("BUILD_BUILDNUMBER", "") - helix_target_queues: List[str] = field(default_factory=list) # type: ignore + helix_target_queues: list[str] = field(default_factory=list[str]) # Environment variables that need to be set env_build_reason: str = os.environ.get("BUILD_REASON", "pr") @@ -35,8 +35,8 @@ class PerfSendToHelixArgs: # Optional for Helix SDK helix_access_token: Optional[str] = None - helix_pre_commands: List[str] = field(default_factory=list) # type: ignore - helix_post_commands: List[str] = field(default_factory=list) # type: ignore + helix_pre_commands: list[str] = field(default_factory=list[str]) + helix_post_commands: list[str] = field(default_factory=list[str]) include_dotnet_cli: bool = False dotnet_cli_package_type: str = "" dotnet_cli_version: str = "" @@ -56,13 +56,13 @@ class PerfSendToHelixArgs: targets_windows: bool = True # Used by BDN projects - work_item_command: Optional[List[str]] = None - baseline_work_item_command: Optional[List[str]] = None + work_item_command: Optional[list[str]] = None + baseline_work_item_command: Optional[list[str]] = None partition_count: Optional[int] = None - bdn_arguments: Optional[List[str]] = None - baseline_bdn_arguments: Optional[List[str]] = None + bdn_arguments: Optional[list[str]] = None + baseline_bdn_arguments: Optional[list[str]] = None compare: bool = False - compare_command: Optional[List[str]] = None + compare_command: Optional[list[str]] = None only_sanity_check: bool = False # Used by scenarios projects @@ -73,10 +73,10 @@ class PerfSendToHelixArgs: affinity: Optional[str] = None ios_strip_symbols: Optional[bool] = None ios_llvm_build: Optional[bool] = None - scenario_arguments: Optional[List[str]] = None + scenario_arguments: Optional[list[str]] = None def set_environment_variables(self, save_to_pipeline: bool = True): - def set_env_var(name: str, value: Union[str, bool, List[str], timedelta, int, None], sep = " ", save_to_pipeline=save_to_pipeline): + def set_env_var(name: str, value: Union[str, bool, list[str], timedelta, int, None], sep: str = " ", save_to_pipeline: bool=save_to_pipeline): if value is None: # None means don't set it return @@ -136,22 +136,11 @@ def set_env_var(name: str, value: Union[str, bool, List[str], timedelta, int, No set_env_var("SYSTEM_TEAMPROJECT", self.env_system_team_project, save_to_pipeline=False) set_env_var("SYSTEM_ACCESSTOKEN", self.env_system_access_token, save_to_pipeline=False) -def run_shell(script: str, args: List[str]): - RunCommand(["chmod", "+x", script]).run() - RunCommand([script, *args], verbose=True).run() - -def run_powershell(script: str, args: List[str]): - RunCommand(["powershell.exe", script, *args], verbose=True).run() - def perf_send_to_helix(args: PerfSendToHelixArgs): args.set_environment_variables(save_to_pipeline=False) binlog_dest = os.path.join(args.performance_repo_dir, "artifacts", "log", args.build_config, "SendToHelix.binlog") send_params = [args.project_file, "/restore", "/t:Test", f"/bl:{binlog_dest}"] - common_dir = os.path.join(args.performance_repo_dir, "eng", "common") - if iswin(): - run_powershell(os.path.join(common_dir, "msbuild.ps1"), ["-warnaserror", "0", *send_params]) - else: - run_shell(os.path.join(common_dir, "msbuild.sh"), ["--warnaserror", "false", *send_params]) + run_msbuild_command(send_params, warn_as_error=False) diff --git a/scripts/upload.py b/scripts/upload.py index 49f5c809cc4..9d5bbc12b0e 100644 --- a/scripts/upload.py +++ b/scripts/upload.py @@ -1,4 +1,5 @@ from random import randint +from typing import Optional import uuid from azure.storage.blob import BlobClient, ContentSettings from azure.storage.queue import QueueClient, TextBase64EncodePolicy @@ -6,7 +7,7 @@ from azure.identity import DefaultAzureCredential, ClientAssertionCredential, CertificateCredential from traceback import format_exc from glob import glob -from performance.common import retry_on_exception, RunCommand, helixpayload, base64_to_bytes, extension, get_certificates +from performance.common import retry_on_exception, base64_to_bytes, get_certificates from performance.constants import TENANT_ID, ARC_CLIENT_ID, CERT_CLIENT_ID import os import json @@ -27,31 +28,29 @@ def get_unique_name(filename: str, unique_id: str) -> str: newname = "{0}-perf-lab-report.json".format(randint(1000, 9999)) return newname -def upload(globpath: str, container: str, queue: str, storage_account_uri: str): +def get_credential(): try: - credential = None - try: - dac = DefaultAzureCredential() - credential = ClientAssertionCredential(TENANT_ID, ARC_CLIENT_ID, lambda: dac.get_token("api://AzureADTokenExchange/.default").token) - credential.get_token("https://storage.azure.com/.default") - except ClientAuthenticationError as ex: - credential = None - getLogger().info("Unable to use managed identity. Falling back to certificate.") + dac = DefaultAzureCredential() + credential = ClientAssertionCredential(TENANT_ID, ARC_CLIENT_ID, lambda: dac.get_token("api://AzureADTokenExchange/.default").token) + credential.get_token("https://storage.azure.com/.default") + return credential + except ClientAuthenticationError as ex: + getLogger().info("Unable to use managed identity. Falling back to certificate.") + certs = get_certificates() + for cert in certs: + credential = CertificateCredential(TENANT_ID, CERT_CLIENT_ID, certificate_data=base64_to_bytes(cert), send_certificate_chain=True) try: - certs = get_certificates() - for cert in certs: - credential = CertificateCredential(TENANT_ID, CERT_CLIENT_ID, certificate_data=base64_to_bytes(cert), send_certificate_chain=True) - try: - credential.get_token("https://storage.azure.com/.default") - except ClientAuthenticationError as ex: - getLogger().error(ex.message) - credential = None - continue - except Exception as ex: - credential = None - if credential is None: - raise RuntimeError("Authentication failed with managed identity and certificates. No valid authentication method available.") + credential.get_token("https://storage.azure.com/.default") + return credential + except ClientAuthenticationError as ex: + getLogger().error(ex.message) + continue + + raise RuntimeError("Authentication failed with managed identity and certificates. No valid authentication method available.") +def upload(globpath: str, container: str, queue: Optional[str], storage_account_uri: str): + try: + credential = get_credential() files = glob(globpath, recursive=True) any_upload_or_queue_failed = False for infile in files: @@ -64,7 +63,13 @@ def upload(globpath: str, container: str, queue: str, storage_account_uri: str): upload_succeded = False with open(infile, "rb") as data: try: - retry_on_exception(lambda: blob_client.upload_blob(data, blob_type="BlockBlob", content_settings=ContentSettings(content_type="application/json")), raise_exceptions=[ResourceExistsError]) + def _upload(): + blob_client.upload_blob( # pyright: ignore[reportUnknownMemberType] -- type stub contains Unknown kwargs + data, + blob_type="BlockBlob", + content_settings=ContentSettings(content_type="application/json")) + + retry_on_exception(_upload, raise_exceptions=[ResourceExistsError]) upload_succeded = True except Exception as ex: any_upload_or_queue_failed = True diff --git a/src/scenarios/shared/codefixes.py b/src/scenarios/shared/codefixes.py index c8a3c6de64e..3fd447c8571 100644 --- a/src/scenarios/shared/codefixes.py +++ b/src/scenarios/shared/codefixes.py @@ -7,16 +7,15 @@ ''' from re import sub -from typing import List -def readfile(file: str) -> List[str]: - ret: List[str] = [] +def readfile(file: str) -> list[str]: + ret: list[str] = [] with open(file, "r") as opened: for line in opened: ret.append(line) return ret -def writefile(file: str, lines: List[str]): +def writefile(file: str, lines: list[str]): with open(file, "w") as opened: opened.writelines(lines) @@ -30,7 +29,7 @@ def insert_after(file: str, search: str, insert: str): writefile(file, lines) def replace_line(file: str, search: str, replace: str): - lines: List[str] = [] + lines: list[str] = [] for line in readfile(file): lines.append(sub(search, replace, line)) writefile(file, lines) diff --git a/src/scenarios/shared/crossgen.py b/src/scenarios/shared/crossgen.py index d3c693d244d..fed6dfd8c27 100644 --- a/src/scenarios/shared/crossgen.py +++ b/src/scenarios/shared/crossgen.py @@ -7,7 +7,7 @@ from logging import getLogger from argparse import ArgumentParser -from typing import Any, List, Optional +from typing import Any, Optional from shared import const class CrossgenArguments: @@ -123,7 +123,7 @@ def parse_crossgen2_args(self, args: Any): getLogger().error("Please specify either --single or --composite ") sys.exit(1) - def get_crossgen_command_line(self) -> List[str]: + def get_crossgen_command_line(self) -> list[str]: "Returns the computed crossgen command line arguments" filename, ext = os.path.splitext(self.singlefile) outputdir = os.path.join(os.getcwd(), const.CROSSGENDIR) diff --git a/src/scenarios/shared/mauisharedpython.py b/src/scenarios/shared/mauisharedpython.py index 0c889931430..bd90b9029e9 100644 --- a/src/scenarios/shared/mauisharedpython.py +++ b/src/scenarios/shared/mauisharedpython.py @@ -55,7 +55,7 @@ def generate_maui_rollback_dict(): dependencies = root.findall(".//Dependency[@Name]") for rollback_name, xml_name in rollback_name_to_xml_name_mappings.items(): for dependency in dependencies: - if dependency.get("Name").startswith(xml_name): # type: ignore we know Name is present + if dependency.attrib['Name'].startswith(xml_name): workload_version = dependency.get("Version") if workload_version is None: raise ValueError(f"Unable to find {xml_name} with proper version in the provided xml file") diff --git a/src/scenarios/shared/precommands.py b/src/scenarios/shared/precommands.py index 952a0e62cc3..503fe107320 100644 --- a/src/scenarios/shared/precommands.py +++ b/src/scenarios/shared/precommands.py @@ -8,7 +8,7 @@ import subprocess from logging import getLogger from argparse import ArgumentParser -from typing import List, Optional +from typing import Optional from dotnet import CSharpProject, CSharpProjFile from shared import const from shared.crossgen import CrossgenArguments @@ -122,7 +122,7 @@ def new(self, language: Optional[str] = None, no_https: bool = False, no_restore: bool = True, - extra_args: Optional[List[str]] = None): + extra_args: Optional[list[str]] = None): 'makes a new app with the given template' self.project = CSharpProject.new(template=template, output_dir=output_dir, @@ -197,7 +197,7 @@ def existing(self, projectdir: str, projectfile: str): self.project = CSharpProject(csproj, const.BINDIR) self._updateframework(csproj.file_name) - def execute(self, build_args: List[str] = []): + def execute(self, build_args: list[str] = []): 'Parses args and runs precommands' if self.operation == DEFAULT: pass @@ -271,7 +271,7 @@ def add_perflab_file(self, language_file_extension: str = 'cs'): staticpath = os.path.join(helixpayload(), "staticdeps") shutil.copyfile(os.path.join(staticpath, f"PerfLab.{language_file_extension}"), os.path.join(projpath, f"PerfLab.{language_file_extension}")) - def install_workload(self, workloadid: str, install_args: List[str] = ["--skip-manifest-update"]): + def install_workload(self, workloadid: str, install_args: list[str] = ["--skip-manifest-update"]): 'Installs the workload, if needed' if not self.has_workload: if self.readonly_dotnet: @@ -336,7 +336,7 @@ def _updateframework(self, projectfile: str): else: replace_line(projectfile, r'.*?', f'{self.framework}') - def _publish(self, configuration: str, framework: str, runtime_identifier: Optional[str] = None, output: Optional[str] = None, build_args: List[str] = []): + def _publish(self, configuration: str, framework: str, runtime_identifier: Optional[str] = None, output: Optional[str] = None, build_args: list[str] = []): self.project.publish(configuration, output or const.PUBDIR, True, @@ -347,12 +347,12 @@ def _publish(self, configuration: str, framework: str, runtime_identifier: Optio *['-bl:%s' % self.binlog] if self.binlog else [], *build_args) - def _restore(self, restore_args: List[str] = ["/p:EnableWindowsTargeting=true"]): + def _restore(self, restore_args: list[str] = ["/p:EnableWindowsTargeting=true"]): self.project.restore(packages_path=get_packages_directory(), verbose=True, args=(['-bl:%s-restore.binlog' % self.binlog] if self.binlog else []) + restore_args) - def _build(self, configuration: str, framework: str, output: Optional[str] = None, build_args: List[str] = []): + def _build(self, configuration: str, framework: str, output: Optional[str] = None, build_args: list[str] = []): self.project.build(configuration, True, get_packages_directory(), diff --git a/src/scenarios/shared/testtraits.py b/src/scenarios/shared/testtraits.py index 8c6dab8a366..3f2acef0fb9 100644 --- a/src/scenarios/shared/testtraits.py +++ b/src/scenarios/shared/testtraits.py @@ -1,3 +1,4 @@ +from typing import Any from shared.util import const # These are the kinds of scenarios we run. Default here indicates whether ALL @@ -62,7 +63,7 @@ def __init__(self, **kwargs): raise Exception("exename cannot be empty") # add traits if not present or overwrite existing traits if overwrite=True - def add_traits(self, overwrite=True, **kwargs): + def add_traits(self, overwrite=True, **kwargs: Any): for keyword in kwargs: if not self.is_valid_trait(keyword): raise Exception("%s is not a valid trait." % keyword) diff --git a/src/scenarios/shared/versionmanager.py b/src/scenarios/shared/versionmanager.py index 3371a1af345..d4120ffa5fb 100644 --- a/src/scenarios/shared/versionmanager.py +++ b/src/scenarios/shared/versionmanager.py @@ -5,10 +5,9 @@ import os import subprocess from performance.logger import getLogger -from typing import Dict from datetime import datetime -def versions_write_json(versiondict: Dict[str, str], outputfile: str = 'versions.json'): +def versions_write_json(versiondict: dict[str, str], outputfile: str = 'versions.json'): with open(outputfile, 'w', encoding='utf-8') as file: json.dump(versiondict, file) @@ -16,7 +15,7 @@ def versions_read_json(inputfile: str = 'versions.json'): with open(inputfile, 'r', encoding='utf-8') as file: return json.load(file) -def versions_write_env(versiondict: Dict[str, str]): +def versions_write_env(versiondict: dict[str, str]): for key, value in versiondict.items(): os.environ[key.upper()] = value # Windows automatically converts environment variables to uppercase, match this behavior everywhere From 0d5706f06aad1d62a495de226d393cabe8370e2c Mon Sep 17 00:00:00 2001 From: Cameron Aavik Date: Fri, 26 Sep 2025 12:31:05 +1000 Subject: [PATCH 2/6] Only import typing_extensions when type checking --- scripts/performance/tracer.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/scripts/performance/tracer.py b/scripts/performance/tracer.py index 0628f8ce328..a1ad254f33a 100644 --- a/scripts/performance/tracer.py +++ b/scripts/performance/tracer.py @@ -1,7 +1,6 @@ import functools from logging import getLogger from typing import TYPE_CHECKING, Callable, Optional, TypeVar -from typing_extensions import ParamSpec class TracingStateManager: '''A class to manage the state of tracing.''' @@ -60,7 +59,18 @@ def is_console_exporter_enabled() -> bool: '''Return whether the console exporter has been enabled.''' return tracing_state_manager.get_console_exporter_enabled() -P = ParamSpec("P") +# ParamSpec was added in Python 3.10, so we need to use typing_extensions for older versions. +# But, to avoid needing to install this to run the script, we define a placeholder if not type checking. +if TYPE_CHECKING: + from typing_extensions import ParamSpec + P = ParamSpec("P") +else: + from typing import Any + class _ParamSpecPlaceholder: + args: Any = object() + kwargs: Any = object() + P = _ParamSpecPlaceholder() + R = TypeVar("R") class AwareTracer: """ From bfd7eba6e8ba53eef23868180db60b3716d02b1f Mon Sep 17 00:00:00 2001 From: Cameron Aavik Date: Fri, 26 Sep 2025 13:20:13 +1000 Subject: [PATCH 3/6] Fix dotnet version regex --- scripts/dotnet.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/dotnet.py b/scripts/dotnet.py index d6671a3de0c..e12e1163fd6 100755 --- a/scripts/dotnet.py +++ b/scripts/dotnet.py @@ -101,9 +101,9 @@ def get_target_framework_monikers(frameworks: list[str]) -> list[str]: # ['net6.0', 'nativeaot6.0'] should become ['net6.0'] return list(set(monikers)) -_VERSION_RE = re.compile(r"\d+\.\d+\.\d+$") +_VERSION_RE = re.compile(r'^\d+\.\d+\.\d+') def version_type(value: str) -> str: - if not _VERSION_RE.fullmatch(value): + if not _VERSION_RE.search(value): raise ArgumentTypeError(f'Version "{value}" is in the wrong format') return value From ab28ff89d2aabac3d13dac53d471f27efeb01289 Mon Sep 17 00:00:00 2001 From: Cameron Aavik Date: Wed, 1 Oct 2025 05:45:50 +1000 Subject: [PATCH 4/6] Address PR feedback --- scripts/ci_setup.py | 5 ++--- scripts/performance/tracer.py | 4 ---- scripts/run_performance_job.py | 1 + 3 files changed, 3 insertions(+), 7 deletions(-) diff --git a/scripts/ci_setup.py b/scripts/ci_setup.py index 18bea0d5b4e..18952438226 100644 --- a/scripts/ci_setup.py +++ b/scripts/ci_setup.py @@ -344,8 +344,7 @@ def main(args: CiSetupArgs): # if repository is not set, then we are doing a sdk in performance repo run # if repository is set, user needs to supply the commit_sha - use_core_sdk = args.repository is None - if not ((args.commit_sha is None) == use_core_sdk): + if not ((args.commit_sha is None) == (args.repository is None)): raise ValueError('Either both commit_sha and repository should be set or neither') # for CI pipelines, use the agent OS @@ -445,7 +444,7 @@ def main(args: CiSetupArgs): if not framework.startswith('net4'): target_framework_moniker = dotnet.get_target_framework_moniker(framework) dotnet_version = dotnet.get_dotnet_version_precise(target_framework_moniker, args.cli) if args.dotnet_versions == [] else args.dotnet_versions[0] - commit_sha = dotnet.get_dotnet_sdk(target_framework_moniker, args.cli) if use_core_sdk else args.commit_sha + commit_sha = dotnet.get_dotnet_sdk(target_framework_moniker, args.cli) if args.repository is None else args.commit_sha assert commit_sha is not None # verified at start of main diff --git a/scripts/performance/tracer.py b/scripts/performance/tracer.py index a1ad254f33a..709e9cf301d 100644 --- a/scripts/performance/tracer.py +++ b/scripts/performance/tracer.py @@ -102,10 +102,6 @@ def start_as_current_span(self, name: str) -> Callable[[Callable[P, R]], Callabl The result of executing the decorated function. """ def decorator(func: Callable[P, R]) -> Callable[P, R]: - # If no tracer, just return the original function unchanged (best for typing & introspection). - if self._tracer is None: - return func - @functools.wraps(func) def wrapped(*args: P.args, **kwargs: P.kwargs) -> R: if self._tracer is None: diff --git a/scripts/run_performance_job.py b/scripts/run_performance_job.py index 07bbde45d7e..d6bbf9f145f 100644 --- a/scripts/run_performance_job.py +++ b/scripts/run_performance_job.py @@ -725,6 +725,7 @@ def run_performance_job(args: RunPerformanceJobArgs): v8_version = get_msbuild_property(args.browser_versions_props_path, "linux_V8Version") if not v8_version: raise Exception("Unable to find v8 version in BrowserVersions.props") + v8_version = ".".join(v8_version.split(".")[:3]) if args.javascript_engine_path is None: args.javascript_engine_path = f"/home/helixbot/.jsvu/bin/v8-{v8_version}" From d0e9e0c1955149eda574c96a7eb122590318f656 Mon Sep 17 00:00:00 2001 From: Cameron Aavik Date: Sat, 4 Oct 2025 12:17:19 +1000 Subject: [PATCH 5/6] Revert get_msbuild_property, add pyrightconfig.json --- .vscode/settings.json | 13 +---- pyrightconfig.json | 93 ++++++++++++++++++++++++++++++++ scripts/benchmarks_local.py | 2 +- scripts/build_runtime_payload.py | 3 +- scripts/performance/common.py | 10 ++-- scripts/performance/tracer.py | 3 +- scripts/run_performance_job.py | 29 ++++++---- 7 files changed, 123 insertions(+), 30 deletions(-) create mode 100644 pyrightconfig.json diff --git a/.vscode/settings.json b/.vscode/settings.json index 537ce8cddcf..d4e92821931 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -15,16 +15,5 @@ "**/artifacts/**": true, "**/weblarge3.0/**": true, }, - "python.analysis.typeCheckingMode": "strict", - "python.analysis.diagnosticMode": "workspace", - "python.analysis.exclude": [ - "**/__pycache__/**", - "**/artifacts/**", - "**/.venv/**", - "**/eng/common/**", - "**/weblarge3.0/**" - ], - "python.analysis.extraPaths": [ - "./scripts" - ] + "python.analysis.diagnosticMode": "workspace" } \ No newline at end of file diff --git a/pyrightconfig.json b/pyrightconfig.json new file mode 100644 index 00000000000..ec342f2e1a8 --- /dev/null +++ b/pyrightconfig.json @@ -0,0 +1,93 @@ +{ + "typeCheckingMode": "strict", + "include": ["src/scenarios", "scripts"], + "exclude": [ + "**/__pycache__/**", + "**/artifacts/**", + "**/.venv/**", + "**/eng/common/**", + "**/weblarge3.0/**" + ], + "pythonVersion": "3.9", + "pythonPlatform": "All", + "deprecateTypingAliases": true, + "reportImplicitOverride": "error", + "reportImportCycles": "error", + "reportPropertyTypeMismatch": "error", + "reportUnnecessaryTypeIgnoreComment": "error", + "reportUnreachable": "information", + // Ensure that src/scenarios and scripts are on the path by default for all execution environments + "extraPaths": ["./src/scenarios", "./scripts"], + "executionEnvironments": [ + // scripts/tests run relative to the root of the repository + { + "root": "./scripts/tests", + "extraPaths": [ "." ] + }, + // Set extra paths to empty as we don't want to accidentally reference src/scenarios from here + { + "root": "./scripts", + "extraPaths": [ ] + }, + // All of our scenarios are run by executing python files pre.py, test.py, and post.py from their own directory + // This means we need to mark each scenario folder as a separate execution environment so that pyright is able + // to properly resolve relative imports inside each scenario folder. + { "root": "./src/scenarios/aspwebtemplate" }, + { "root": "./src/scenarios/bdnandroid" }, + { "root": "./src/scenarios/blazor" }, + { "root": "./src/scenarios/blazoraot" }, + { "root": "./src/scenarios/blazorlocalized" }, + { "root": "./src/scenarios/blazorminapp" }, + { "root": "./src/scenarios/blazorminappaot" }, + { "root": "./src/scenarios/blazorpizza" }, + { "root": "./src/scenarios/blazorpizzaaot" }, + { "root": "./src/scenarios/blazorserverinnerloop" }, + { "root": "./src/scenarios/blazorservertemplate" }, + { "root": "./src/scenarios/blazorwasmdotnetwatch" }, + { "root": "./src/scenarios/blazorwasminnerloop" }, + { "root": "./src/scenarios/classlibtemplate" }, + { "root": "./src/scenarios/crossgen" }, + { "root": "./src/scenarios/crossgen2" }, + { "root": "./src/scenarios/emptyconsolenativeaot" }, + { "root": "./src/scenarios/emptyconsoletemplate" }, + { "root": "./src/scenarios/emptyconsoletemplateinnerloop" }, + { "root": "./src/scenarios/emptyconsoletemplateinnerloopmsbuild" }, + { "root": "./src/scenarios/emptyfsconsoletemplate" }, + { "root": "./src/scenarios/emptyvbconsoletemplate" }, + { "root": "./src/scenarios/fsharpcompilerservice" }, + { "root": "./src/scenarios/genericandroidstartup" }, + { "root": "./src/scenarios/grpctemplate" }, + { "root": "./src/scenarios/helloandroid" }, + { "root": "./src/scenarios/helloios" }, + { "root": "./src/scenarios/mauiandroid" }, + { "root": "./src/scenarios/mauiblazorandroid" }, + { "root": "./src/scenarios/mauiblazordesktop" }, + { "root": "./src/scenarios/mauiblazorios" }, + { "root": "./src/scenarios/mauidesktop" }, + { "root": "./src/scenarios/mauiios" }, + { "root": "./src/scenarios/mauimaccatalyst" }, + { "root": "./src/scenarios/mauisamplecontentandroid" }, + { "root": "./src/scenarios/mstesttemplate" }, + { "root": "./src/scenarios/mvcapptemplate" }, + { "root": "./src/scenarios/mvcdotnetwatch" }, + { "root": "./src/scenarios/mvcinnerloop" }, + { "root": "./src/scenarios/netandroid" }, + { "root": "./src/scenarios/netios" }, + { "root": "./src/scenarios/netstandard2.0" }, + { "root": "./src/scenarios/nunittesttemplate" }, + { "root": "./src/scenarios/paintdotnet" }, + { "root": "./src/scenarios/razorclasslibtemplate" }, + { "root": "./src/scenarios/staticconsoletemplate" }, + { "root": "./src/scenarios/staticfsconsoletemplate" }, + { "root": "./src/scenarios/staticvbconsoletemplate" }, + { "root": "./src/scenarios/staticwinformstemplate" }, + { "root": "./src/scenarios/webapitemplate" }, + { "root": "./src/scenarios/webapptemplate" }, + { "root": "./src/scenarios/weblarge3.0" }, + { "root": "./src/scenarios/windowsforms" }, + { "root": "./src/scenarios/windowsformslarge" }, + { "root": "./src/scenarios/wpf" }, + { "root": "./src/scenarios/wpfsfc" }, + { "root": "./src/scenarios/xunittesttemplate" } + ] +} \ No newline at end of file diff --git a/scripts/benchmarks_local.py b/scripts/benchmarks_local.py index 747467762b9..61a860a556c 100644 --- a/scripts/benchmarks_local.py +++ b/scripts/benchmarks_local.py @@ -53,7 +53,7 @@ def is_running_as_admin(parsed_args: Namespace) -> bool: if is_windows(parsed_args): import ctypes return ctypes.windll.shell32.IsUserAnAdmin() - return os.getuid() == 0 # type: ignore We know that os.getuid() is a method on Unix-like systems, ignore the pylance unknown type error for getuid. + return os.getuid() == 0 def kill_dotnet_processes(parsed_args: Namespace): if not parsed_args.kill_dotnet_processes: diff --git a/scripts/build_runtime_payload.py b/scripts/build_runtime_payload.py index 279bc83b3df..1217d6deb57 100644 --- a/scripts/build_runtime_payload.py +++ b/scripts/build_runtime_payload.py @@ -2,12 +2,13 @@ This file contains helper methods for turning build artifacts from the build step of our CI pipeline and the Build Caching Service into a payload that can be used locally or in Helix jobs. """ +from collections.abc import Iterable from logging import getLogger import os from pathlib import Path import shutil import tarfile -from typing import Optional, Iterable +from typing import Optional import zipfile from performance.common import RunCommand, iswin diff --git a/scripts/performance/common.py b/scripts/performance/common.py index 0c27dde045c..0be3b93fc2a 100644 --- a/scripts/performance/common.py +++ b/scripts/performance/common.py @@ -2,6 +2,7 @@ Common functionality used by the repository scripts. ''' +from collections.abc import Callable from contextlib import contextmanager from logging import getLogger from os import environ @@ -11,6 +12,7 @@ from subprocess import list2cmdline from subprocess import PIPE, STDOUT, DEVNULL from subprocess import Popen +from typing import Any, Optional, TypeVar from io import StringIO from platform import machine @@ -18,7 +20,6 @@ import sys import time import base64 -from typing import Any, Callable, Optional, TypeVar def get_machine_architecture(): @@ -250,7 +251,7 @@ def set_environment_variable(name: str, value: str, save_to_pipeline: bool = Tru __write_pipeline_variable(name, value) os.environ[name] = value -def run_msbuild_command(args: list[str], verbose: bool=True, warn_as_error: bool=True, perf_repo_dir: Optional[str] = None) -> str: +def run_msbuild_command(args: list[str], verbose: bool=True, warn_as_error: bool=True, perf_repo_dir: Optional[str] = None) -> int: if perf_repo_dir is None: perf_repo_dir = get_repo_root_path() msbuild_dir = os.path.join(perf_repo_dir, 'eng', 'common') @@ -267,10 +268,7 @@ def run_msbuild_command(args: list[str], verbose: bool=True, warn_as_error: bool cmdline += ["--warnaserror", "false"] cmdline += args - return RunCommand(cmdline, verbose=verbose).run_and_get_stdout().strip() - -def get_msbuild_property(props_path: str, property_name: str) -> str: - return run_msbuild_command([props_path, f"/getProperty:{property_name}"]) + return RunCommand(cmdline, verbose=verbose).run() class RunCommand: ''' diff --git a/scripts/performance/tracer.py b/scripts/performance/tracer.py index 709e9cf301d..29e87f8f268 100644 --- a/scripts/performance/tracer.py +++ b/scripts/performance/tracer.py @@ -1,6 +1,7 @@ import functools +from collections.abc import Callable from logging import getLogger -from typing import TYPE_CHECKING, Callable, Optional, TypeVar +from typing import TYPE_CHECKING, Optional, TypeVar class TracingStateManager: '''A class to manage the state of tracing.''' diff --git a/scripts/run_performance_job.py b/scripts/run_performance_job.py index 02ef6d8c5a7..ec93ec68e24 100644 --- a/scripts/run_performance_job.py +++ b/scripts/run_performance_job.py @@ -1,4 +1,5 @@ from logging import getLogger +import re from dataclasses import dataclass, field from datetime import timedelta from glob import glob @@ -15,7 +16,7 @@ from build_runtime_payload import * import ci_setup -from performance.common import RunCommand, get_msbuild_property, set_environment_variable +from performance.common import RunCommand, set_environment_variable from performance.logger import setup_loggers from send_to_helix import PerfSendToHelixArgs, perf_send_to_helix @@ -333,8 +334,7 @@ def get_bdn_arguments( javascript_engine_path: Optional[str] = None, product_version: Optional[str] = None, corerun_payload_dir: Optional[str] = None, - extra_bdn_args: Optional[str] = None): - + extra_bdn_args: Optional[str] = None) -> list[str]: bdn_arguments = ["--anyCategories", run_categories] if affinity is not None and not "0": @@ -381,6 +381,7 @@ def get_bdn_arguments( if javascript_engine == "v8": wasm_args += ["--module"] + assert javascript_engine_path is not None bdn_arguments += [ "--wasmEngine", javascript_engine_path, f"\\\"--wasmArgs={' '.join(wasm_args)}\\\"", @@ -691,8 +692,13 @@ def run_performance_job(args: RunPerformanceJobArgs): raise Exception("Please provide either the product version, a path to Versions.props, or a runtime repo directory") args.versions_props_path = os.path.join(args.runtime_repo_dir, "eng", "Versions.props") - product_version = get_msbuild_property(args.versions_props_path, "ProductVersion") - if not product_version: + with open(args.versions_props_path) as f: + for line in f: + match = re.search(r"ProductVersion>([^<]*)<", line) + if match: + product_version = match.group(1) + break + if product_version is None: raise Exception("Unable to find ProductVersion in Versions.props") mono_dotnet_path = os.path.join(payload_dir, "dotnet-mono") @@ -725,10 +731,15 @@ def run_performance_job(args: RunPerformanceJobArgs): raise Exception("BrowserVersions.props must be present for wasm runs") args.browser_versions_props_path = os.path.join(args.runtime_repo_dir, "eng", "testing", "BrowserVersions.props") - v8_version = get_msbuild_property(args.browser_versions_props_path, "linux_V8Version") - if not v8_version: - raise Exception("Unable to find v8 version in BrowserVersions.props") - v8_version = ".".join(v8_version.split(".")[:3]) + with open(args.browser_versions_props_path) as f: + for line in f: + match = re.search(r"linux_V8Version>([^<]*)<", line) + if match: + v8_version = match.group(1) + v8_version = ".".join(v8_version.split(".")[:3]) + break + else: + raise Exception("Unable to find v8 version in BrowserVersions.props") if args.javascript_engine_path is None: args.javascript_engine_path = f"/home/helixbot/.jsvu/bin/v8-{v8_version}" From 7263690ee1f1793261ad573bb20dc1602d46152d Mon Sep 17 00:00:00 2001 From: Cameron Aavik Date: Sat, 4 Oct 2025 15:11:54 +1000 Subject: [PATCH 6/6] Fix tracer errors --- pyrightconfig.json | 1 + scripts/performance/logger.py | 2 +- scripts/performance/tracer.py | 33 ++++++++++----------------------- 3 files changed, 12 insertions(+), 24 deletions(-) diff --git a/pyrightconfig.json b/pyrightconfig.json index ec342f2e1a8..750382e86ba 100644 --- a/pyrightconfig.json +++ b/pyrightconfig.json @@ -11,6 +11,7 @@ "pythonVersion": "3.9", "pythonPlatform": "All", "deprecateTypingAliases": true, + "reportMissingTypeStubs": "none", "reportImplicitOverride": "error", "reportImportCycles": "error", "reportPropertyTypeMismatch": "error", diff --git a/scripts/performance/logger.py b/scripts/performance/logger.py index b5f03c44823..9bec4df5d21 100644 --- a/scripts/performance/logger.py +++ b/scripts/performance/logger.py @@ -41,7 +41,7 @@ def __initialize(verbose: bool): if enable_open_telemetry_logger: try: - from opentelemetry._logs import set_logger_provider # pyright: ignore[reportMissingTypeStubs] + from opentelemetry._logs import set_logger_provider from opentelemetry.sdk._logs import LoggerProvider, LoggingHandler from opentelemetry.sdk._logs.export import BatchLogRecordProcessor, ConsoleLogExporter diff --git a/scripts/performance/tracer.py b/scripts/performance/tracer.py index 29e87f8f268..ce056b6cb1d 100644 --- a/scripts/performance/tracer.py +++ b/scripts/performance/tracer.py @@ -1,7 +1,7 @@ import functools from collections.abc import Callable from logging import getLogger -from typing import TYPE_CHECKING, Optional, TypeVar +from typing import TYPE_CHECKING, Any, Optional, TypeVar, cast class TracingStateManager: '''A class to manage the state of tracing.''' @@ -60,19 +60,8 @@ def is_console_exporter_enabled() -> bool: '''Return whether the console exporter has been enabled.''' return tracing_state_manager.get_console_exporter_enabled() -# ParamSpec was added in Python 3.10, so we need to use typing_extensions for older versions. -# But, to avoid needing to install this to run the script, we define a placeholder if not type checking. -if TYPE_CHECKING: - from typing_extensions import ParamSpec - P = ParamSpec("P") -else: - from typing import Any - class _ParamSpecPlaceholder: - args: Any = object() - kwargs: Any = object() - P = _ParamSpecPlaceholder() - -R = TypeVar("R") +_F = TypeVar("_F", bound=Callable[..., Any]) + class AwareTracer: """ A OpenTelemetry aware tracer implementation that is used as a wrapper for OpenTelemetry calls where OpenTelemetry is not guaranteed be installed. @@ -80,7 +69,7 @@ class AwareTracer: """ def __init__(self, name: str = "dotnet.performance") -> None: if TYPE_CHECKING: - from opentelemetry.trace import Tracer # pyright: ignore[reportMissingTypeStubs] + from opentelemetry.trace import Tracer self._tracer: Optional[Tracer] try: @@ -90,28 +79,26 @@ def __init__(self, name: str = "dotnet.performance") -> None: else: self._tracer = trace.get_tracer(name) - def start_as_current_span(self, name: str) -> Callable[[Callable[P, R]], Callable[P, R]]: + def start_as_current_span(self, name: str) -> Callable[[_F], _F]: """ Decorator that starts a new span as the current span if OpenTelemetry is imported. If OpenTelemetry is not imported, the function is executed without starting a new span. Args: - *top_args: Variable length argument list that will be passed to OpenTelemetry Tracer.start_as_current_span. - **top_kwargs: Arbitrary keyword arguments. + name: The name of the span to start. Returns: - The result of executing the decorated function. + A decorator that preserves the original function's signature. """ - def decorator(func: Callable[P, R]) -> Callable[P, R]: + def decorator(func: _F) -> _F: @functools.wraps(func) - def wrapped(*args: P.args, **kwargs: P.kwargs) -> R: + def wrapped(*args: Any, **kwargs: Any) -> Any: if self._tracer is None: return func(*args, **kwargs) with self._tracer.start_as_current_span(name): return func(*args, **kwargs) - return wrapped + return cast(_F, wrapped) return decorator - \ No newline at end of file