diff --git a/BENCHMARKS_FORMAT.md b/BENCHMARKS_FORMAT.md new file mode 100644 index 00000000..ee4dd558 --- /dev/null +++ b/BENCHMARKS_FORMAT.md @@ -0,0 +1,335 @@ +# The pyperformance File Formats + +`pyperformance` uses two file formats to identify benchmarks: + +* manifest - a set of benchmarks +- metadata - a single benchmark + +For each benchmark, there are two required files and several optional +ones. Those files are expected to be in a specific directory structure +(unless customized in the metadata). + +The structure (see below) is such that it's easy to maintain +a benchmark (or set of benchmarks) on GitHub and distribute it on PyPI. +It also simplifies publishing a Python project's benchmarks. +The alternative is pointing people at a repo. + +Benchmarks can inherit metadata from other metadata files. +This is useful for keeping common metadata for a set of benchmarks +(e.g. "version") in one file. Likewise, benchmarks for a Python +project can inherit metadata from the project's pyproject.toml. + +Sometimes a benchmark will have one or more variants that run using +the same script. Variants like this are supported by `pyperformance` +without requiring much extra effort. + + +## Benchmark Directory Structure + +Normally a benchmark is structured like this: + +``` +bm_NAME/ + data/ # if needed + requirements.txt # lock file, if any + pyproject.toml + run_benchmark.py +``` + +(Note the "bm\_" prefix on the directory name.) + +"pyproject.toml" holds the metadata. "run_benchmark.py" holds +the actual benchmark code. Both are necessary. + +`pyperformance` treats the metadata file as the fundamental source of +information about a benchmark. A manifest for a set of benchmarks is +effectively a mapping of names to metadata files. So a metadata file +is essential. It can be located anywhere on disk. However, if it +isn't located in the structure described above then the metadata must +identify where to find the other files. + +Other than that, only a benchmark script (e.g. "run_benchmark.py" above) +is required. All other files are optional. + +When a benchmark has variants, each has its own metadata file next to +the normal "pyproject.toml", named "bm_NAME.toml". (Note the "bm\_" prefix.) +The format of variant metadata files is exactly the same. `pyperformance` +treats them the same, except that the sibling "pyproject.toml" is +inherited by default. + + +## Manifest Files + +A manifest file identifies a set of benchmarks, as well as (optionally) +how they should be grouped. `pyperformance` uses the manifest to +determine which benchmarks are available to run (and thus which to run +by default). + +A manifest normally looks like this: + +``` +[benchmarks] + +name metafile +bench1 somedir/bm_bench1/pyproject.toml +bench2 somedir/pyproject.toml +bench3 ../anotherdir +``` + +The "benchmarks" section is a table with rows of tab-separated-values. +The "name" value is how `pyperformance` will identify the benchmark. +The "metafile" value is where `pyperformance` will look for the +benchmark's metadata. If a metafile is a directory then it looks +for "pyproject.toml" in that directory. + + +### Benchmark Groups + +The other sections in the manifest file relate to grouping: + +``` +[benchmarks] + +name metafile +bench1 somedir/bm_bench1 +bench2 somedir/bm_bench2 +bench3 anotherdir/mybench.toml + +[groups] +tag1 +tag2 + +[group default] +bench2 +bench3 + +[group tricky] +bench2 +``` + +The "groups" section specifies available groups that may be identified +by benchmark tags (see about tags in the metadata section below). Any +other group sections in the manifest are automatically added to the list +of available groups. + +If no "default" group is specified then one is automatically added with +all benchmarks from the "benchmarks" section in it. If there is no +"groups" section and no individual group sections (other than "default") +then the set of all tags of the known benchmarks is treated as "groups". +A group named "all" as also automatically added which has all known +benchmarks in it. + +Benchmarks can be excluded from a group by using a `-` (minus) prefix. +Any benchmark alraedy in the list (at that point) that matches will be +dropped from the list. If the first entry in the section is an +exclusion then all known benchmarks are first added to the list +before the exclusion is applied. + +For example: + +``` +[benchmarks] + +name metafile +bench1 somedir/bm_bench1 +bench2 somedir/bm_bench2 +bench3 anotherdir/mybench.toml + +[group default] +-bench1 +``` + +This means by default only "bench2" and "bench3" are run. + + +### Merging Manifests + +To combine manifests, use the `[includes]` section in the manifest: + +``` +[includes] +project1/benchmarks/MANIFEST +project2/benchmarks/MANIFEST + +``` + +Note that `` is the same as including the manifest file +for the default pyperformance benchmarks. + + +### A Local Benchmark Suite + +Often a project will have more than one benchmark that it will treat +as a suite. `pyperformance` handles this without any extra work. + +In the dirctory holding the manifest file put all the benchmarks. Then +put `` in the "metafile" column, like this: + +``` +[benchmarks] + +name metafile +bench1 +bench2 +bench3 +bench4 +bench5 +``` + +It will look for `DIR/bm_NAME/pyproject.toml`. + +If there are also variants, identify the main benchmark +in the "metafile" value, like this: + +``` +[benchmarks] + +name metafile +bench1 +bench2 +bench3 +variant1 +variant2 +``` + +`pyperformance` will look for `DIR/bm_BASE/bm_NAME.toml`, where "BASE" +is the part after "local:". + + +### A Project's Benchmark Suite + +A Python project can identify its benchmark suite by putting the path +to the manifest file in the project's top-level pyproject.toml. +Additional manifests can be identified as well. + +``` +[tool.pyperformance] +manifest = "..." +manifests = ["...", "..."] +``` + +(Reminder: that is the pyproject.toml, not the manifest file.) + + +## Benchmark Metadata Files + +A benchmark's metadata file (usually pyproject.toml) follows the format +specified in [PEP 621](https://www.python.org/dev/peps/pep-0621) and +[PEP 518](https://www.python.org/dev/peps/pep-0518). So there are two +supported sections in the file: "project" and "tool.pyperformance". + +A typical metadata file will look something like this: + +``` +[project] +version = "0.9.1" +dependencies = ["pyperf"] +dynamic = ["name"] + +[tool.pyperformance] +name = "my_benchmark" +``` + +A highly detailed one might look like this: + +``` +[project] +name = "pyperformance_bm_json_dumps" +version = "0.9.1" +description = "A benchmark for json.dumps()" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "json_dumps" +tags = "serialize" +runscript = "bench.py" +datadir = ".data-files/extras" +extra_opts = ["--special"] +``` + + +### Inheritance + +For one benchmark to inherit from another (or from common metadata), +the "inherits" field is available: + +``` +[project] +dependencies = ["pyperf"] +dynamic = ["name", "version"] + +[tool.pyperformance] +name = "my_benchmark" +inherits = "../common.toml" +``` + +All values in either section of the inherited metadata are treated +as defaults, on top of which the current metadata is applied. In the +above example, for instance, a value for "version" in common.toml would +be used here. + +If the "inherits" value is a directory (even for "..") then +"base.toml" in that directory will be inherited. + +For variants, the base pyproject.toml is the default value for "inherits". + + +### Inferred Values + +In some situations, omitted values will be inferred from other available +data (even for required fields). + +* `project.name` <= `tool.pyperformance.name` +* `project.*` <= inherited metadata (except for "name" and "dynamic") +* `tool.pyperformance.name` <= metadata filename +* `tool.pyperformance.*` <= inherited metadata (except for "name" and "inherits") + +When the name is inferred from the filename for a regularly structured +benchmark, the "bm\_" prefix is removed from the benchmark's directory. +If it is a variant that prefix is removed from the metadata filename, +as well as the .toml suffix. + + +### The `[project]` Section + +| field | type | R | T | B | D | +|----------------------|-------|---|---|---|---| +| project.name | str | X | X | | | +| project.version | ver | X | | X | X | +| project.dependencies | [str] | | | X | | +| project.dynamic | [str] | | | | | + +"R": required +"T": inferred from the tool section +"B": inferred from the inherited metadata +"D": for default benchmarks, inferred from pyperformance + +"dynamic" is required by PEP 621 for when a field will be filled in +dynamically by the tool. This is especially important for required +fields. + +All other PEP 621 fields are optional (e.g. `requires-python = ">=3.8"`, +`{repository = "https://github.com/..."}`). + + +### The `[tool.pyperformance]` Section + +| field | type | R | B | F | +|-----------------|-------|---|---|---| +| tool.name | str | X | | X | +| tool.tags | [str] | | X | | +| tool.extra_opts | [str] | | X | | +| tool.inherits | file | | | | +| tool.runscript | file | | X | | +| tool.datadir | file | | X | | + +"R": required +"B": inferred from the inherited metadata +"F": inferred from filename + +* tags: optional list of names to group benchmarks +* extra_opts: optional list of args to pass to `tool.runscript` +* runscript: the benchmark script to use instead of run_benchmark.py. diff --git a/MANIFEST.in b/MANIFEST.in index 3bda47f9..46553807 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -3,6 +3,7 @@ include COPYING include MANIFEST.in include README.rst include TODO.rst +include requirements.in include requirements.txt include runtests.py include pyperformance @@ -10,3 +11,8 @@ include tox.ini include doc/*.rst doc/images/*.png doc/images/*.jpg include doc/conf.py doc/Makefile doc/make.bat + +include pyperformance/data-files/requirements.txt +include pyperformance/data-files/benchmarks/MANIFEST +include pyperformance/data-files/benchmarks/base.toml +recursive-include pyperformance/data-files/benchmarks/bm_*/* * diff --git a/benchmarks b/benchmarks new file mode 120000 index 00000000..5a5d7851 --- /dev/null +++ b/benchmarks @@ -0,0 +1 @@ +pyperformance/data-files/benchmarks \ No newline at end of file diff --git a/doc/benchmark.conf.sample b/doc/benchmark.conf.sample index 3f4da356..ebf377e8 100644 --- a/doc/benchmark.conf.sample +++ b/doc/benchmark.conf.sample @@ -64,6 +64,9 @@ install = True # Run "sudo python3 -m pyperf system tune" before running benchmarks? system_tune = True +# --manifest option for 'pyperformance run' +manifest = + # --benchmarks option for 'pyperformance run' benchmarks = diff --git a/pyperformance/__init__.py b/pyperformance/__init__.py index 81598dbc..e77c9201 100644 --- a/pyperformance/__init__.py +++ b/pyperformance/__init__.py @@ -1,2 +1,14 @@ +import os.path + + VERSION = (1, 0, 3) __version__ = '.'.join(map(str, VERSION)) + + +PKG_ROOT = os.path.dirname(__file__) +DATA_DIR = os.path.join(PKG_ROOT, 'data-files') + + +def is_installed(): + parent = os.path.dirname(PKG_ROOT) + return os.path.exists(os.path.join(parent, 'setup.py')) diff --git a/pyperformance/_benchmark.py b/pyperformance/_benchmark.py new file mode 100644 index 00000000..cd51dfe6 --- /dev/null +++ b/pyperformance/_benchmark.py @@ -0,0 +1,280 @@ + +__all__ = [ + 'BenchmarkSpec', + 'Benchmark' + 'check_name', + 'parse_benchmark', +] + + +from collections import namedtuple +import os +import os.path +import sys + +import pyperf + +from . import _utils, _benchmark_metadata + + +def check_name(name): + _utils.check_name('_' + name) + + +def parse_benchmark(entry, *, fail=True): + name = entry + version = None + origin = None + metafile = None + + if not f'_{name}'.isidentifier(): + if not fail: + return None + raise ValueError(f'unsupported benchmark name in {entry!r}') + + bench = BenchmarkSpec(name, version, origin) + return bench, metafile + + +class BenchmarkSpec(namedtuple('BenchmarkSpec', 'name version origin')): + __slots__ = () + + @classmethod + def from_raw(cls, raw): + if isinstance(raw, BenchmarkSpec): + return raw, None + elif isinstance(raw, str): + return parse_benchmark(raw) + else: + raise ValueError(f'unsupported raw spec {raw!r}') + + def __new__(cls, name, version=None, origin=None): + self = super().__new__(cls, name, version or None, origin or None) + return self + + +class Benchmark: + + _metadata = None + + def __init__(self, spec, metafile): + spec, _metafile = BenchmarkSpec.from_raw(spec) + if not metafile: + if not _metafile: + raise ValueError(f'missing metafile for {spec!r}') + metafile = _metafile + + self.spec = spec + self.metafile = metafile + + def __repr__(self): + return f'{type(self).__name__}(spec={self.spec!r}, metafile={self.metafile!r})' + + def __hash__(self): + return hash(self.spec) + + def __eq__(self, other): + try: + other_spec = other.spec + except AttributeError: + return NotImplemented + return self.spec == other_spec + + def __gt__(self, other): + try: + other_spec = other.spec + except AttributeError: + return NotImplemented + return self.spec > other_spec + + # __getattr__() gets weird when AttributeError comes out of + # properties so we spell out all the aliased attributes. + + @property + def name(self): + return self.spec.name + + @property + def version(self): + version = self.spec.version + if version is None: + version = self._get_metadata_value('version', None) + return version + + @property + def origin(self): + return self.spec.origin + + def _get_rootdir(self): + try: + return self._rootdir + except AttributeError: + script = self.runscript + self._rootdir = os.path.dirname(script) if script else None + return self._rootdir + + def _init_metadata(self): + #assert self._metadata is None + defaults = { + 'name': self.spec.name, + 'version': self.spec.version, + } + self._metadata, _ = _benchmark_metadata.load_metadata( + self.metafile, + defaults, + ) + + def _get_metadata_value(self, key, default): + try: + return self._metadata[key] + except TypeError: + if self._metadata is not None: + raise # re-raise + self._init_metadata() + except KeyError: + pass + return self._metadata.setdefault(key, default) + + @property + def tags(self): + return self._get_metadata_value('tags', ()) + + @property + def datadir(self): + return self._get_metadata_value('datadir', None) + + @property + def requirements_lockfile(self): + try: + return self._lockfile + except AttributeError: + lockfile = self._get_metadata_value('requirements_lockfile', None) + if not lockfile: + rootdir = self._get_rootdir() + if rootdir: + lockfile = os.path.join(rootdir, 'requirements.txt') + self._lockfile = lockfile + return self._lockfile + + @property + def runscript(self): + return self._get_metadata_value('runscript', None) + + @property + def extra_opts(self): + return self._get_metadata_value('extra_opts', ()) + + # Other metadata keys: + # * base + # * python + # * dependencies + # * requirements + + def run(self, python, runid=None, pyperf_opts=None, *, + venv=None, + verbose=False, + ): + if venv and python == sys.executable: + python = venv.get_python_program() + + if not runid: + from ..run import get_run_id + runid = get_run_id(python, self) + + runscript = self.runscript + bench = _run_perf_script( + python, + runscript, + runid, + extra_opts=self.extra_opts, + pyperf_opts=pyperf_opts, + verbose=verbose, + ) + + return bench + + +####################################### +# internal implementation + +def _run_perf_script(python, runscript, runid, *, + extra_opts=None, + pyperf_opts=None, + verbose=False, + ): + if not runscript: + raise ValueError('missing runscript') + if not isinstance(runscript, str): + raise TypeError(f'runscript must be a string, got {runscript!r}') + + with _utils.temporary_file() as tmp: + opts = [ + *(extra_opts or ()), + *(pyperf_opts or ()), + '--output', tmp, + ] + if pyperf_opts and '--copy-env' in pyperf_opts: + argv, env = _prep_cmd(python, runscript, opts, runid, NOOP) + else: + opts, inherit_envvar = _resolve_restricted_opts(opts) + argv, env = _prep_cmd(python, runscript, opts, runid, inherit_envvar) + _utils.run_command(argv, env=env, hide_stderr=not verbose) + + return pyperf.BenchmarkSuite.load(tmp) + + +def _prep_cmd(python, script, opts, runid, on_set_envvar=None): + # Populate the environment variables. + env = dict(os.environ) + def set_envvar(name, value): + env[name] = value + if on_set_envvar is not None: + on_set_envvar(name) + # on_set_envvar() may update "opts" so all calls to set_envvar() + # must happen before building argv. + set_envvar('PYPERFORMANCE_RUNID', str(runid)) + + # Build argv. + argv = [ + python, '-u', script, + *(opts or ()), + ] + + return argv, env + + +def _resolve_restricted_opts(opts): + # Deal with --inherit-environ. + FLAG = '--inherit-environ' + resolved = [] + idx = None + for i, opt in enumerate(opts): + if opt.startswith(FLAG + '='): + idx = i + 1 + resolved.append(FLAG) + resolved.append(opt.partition('=')[-2]) + resolved.extend(opts[idx:]) + break + elif opt == FLAG: + idx = i + 1 + resolved.append(FLAG) + resolved.append(opts[idx]) + resolved.extend(opts[idx + 1:]) + break + else: + resolved.append(opt) + else: + resolved.extend(['--inherit-environ', '']) + idx = len(resolved) - 1 + inherited = set(resolved[idx].replace(',', ' ').split()) + def inherit_env_var(name): + inherited.add(name) + resolved[idx] = ','.join(inherited) + + return resolved, inherit_env_var + + +def _insert_on_PYTHONPATH(entry, env): + PYTHONPATH = env.get('PYTHONPATH', '').split(os.pathsep) + PYTHONPATH.insert(0, entry) + env['PYTHONPATH'] = os.pathsep.join(PYTHONPATH) diff --git a/pyperformance/_benchmark_metadata.py b/pyperformance/_benchmark_metadata.py new file mode 100644 index 00000000..94273f67 --- /dev/null +++ b/pyperformance/_benchmark_metadata.py @@ -0,0 +1,250 @@ + +__all__ = [ + 'load_metadata', +] + + +import os.path + +from . import _utils, _pyproject_toml +from . import _benchmark + + +METADATA = 'pyproject.toml' +DEPENDENCIES = 'requirements.in' +REQUIREMENTS = 'requirements.txt' +DATA = 'data' +RUN = 'run_benchmark.py' + +PEP_621_FIELDS = { + 'name': None, + 'version': None, + 'requires-python': 'python', + 'dependencies': None, + #'optional-dependencies': '', + #'urls': '', +} +TOOL_FIELDS = { + #'inherits': None, + 'metafile': None, + 'name': None, + 'tags': None, + 'datadir': None, + 'runscript': None, + 'extra_opts': None, +} + + +#class BenchmarkMetadata: +# spec +# base +# metafile +# tags +# python +# dependencies # (from requirements.in) +# requirements # (from lock file or requirements.txt) +# datadir +# runscript +# extra_opts + + +def load_metadata(metafile, defaults=None): + if isinstance(metafile, str): + name, rootdir = _name_from_filename(metafile) + data, filename = _pyproject_toml.load_pyproject_toml( + metafile, + name=name or None, + requirefiles=False, + ) + else: + text = metafile.read() + filename = metafile.name + name, rootdir = _name_from_filename(filename) + data = _pyproject_toml.parse_pyproject_toml( + text, rootdir, name, + requirefiles=False, + ) + project = data.get('project') + tool = data.get('tool', {}).get('pyperformance', {}) + + defaults = _ensure_defaults(defaults, rootdir) + base, basefile = _resolve_base( + tool.get('inherits'), # XXX Pop it? + project, + filename, + defaults, + ) + top = _resolve(project or {}, tool, filename) + merged = _merge_metadata(top, base, defaults) + + if not merged.get('name'): + raise ValueError('missing benchmark name') + if not merged.get('version'): + print('====================') + from pprint import pprint + print('top:') + pprint(top) + print('base:') + pprint(base) + print('defaults:') + pprint(defaults) + print('merged:') + pprint(merged) + print('====================') + raise ValueError('missing benchmark version') + + metafile = merged.pop('metafile') + merged['spec'] = _benchmark.BenchmarkSpec( + merged.pop('name'), + merged.pop('version'), + # XXX Should we leave this (origin) blank? + metafile, + ) + if basefile: + merged['base'] = basefile + + return merged, filename + + +####################################### +# internal implementation + +def _name_from_filename(metafile): + rootdir, basename = os.path.split(metafile) + if basename == 'pyproject.toml': + dirname = os.path.dirname(rootdir) + name = dirname[3:] if dirname.startswith('bm_') else None + elif basename.startswith('bm_') and basename.endswith('.toml'): + name = basename[3:-5] + else: + name = None + return name, rootdir + + +def _ensure_defaults(defaults, rootdir): + if not defaults: + defaults = {} + + if not defaults.get('datadir'): + datadir = os.path.join(rootdir, DATA) + if os.path.isdir(datadir): + defaults['datadir'] = datadir + + if not defaults.get('runscript'): + runscript = os.path.join(rootdir, RUN) + if os.path.isfile(runscript): + defaults['runscript'] = runscript + + return defaults + + +def _resolve_base(metabase, project, filename, defaults, *, + minimalwithbase=False): + rootdir, basename = os.path.split(filename) + + if not metabase: + if basename == 'pyproject.toml': + return None, None + elif not (basename.startswith('bm_') and basename.endswith('.toml')): + return None, None + elif not os.path.basename(rootdir).startswith('bm_'): + return None, None + else: + metabase = os.path.join(rootdir, 'pyproject.toml') + if not os.path.isfile(metabase): + return None, None + + if project is not None and minimalwithbase: + unexpected = set(project) - {'name', 'dynamic', 'dependencies'} + if unexpected: + raise ValueError(f'[project] should be minimal if "inherits" is provided, got extra {sorted(unexpected)}') + + if metabase == '..': + metabase = os.path.join( + os.path.dirname(rootdir), + 'base.toml', + ) + if metabase == filename: + raise Exception('circular') + + if not os.path.isabs(metabase): + metabase = os.path.join(rootdir, metabase) + if metabase == filename: + raise Exception('circular') + + defaults = dict(defaults, name='_base_') + return load_metadata(metabase, defaults) + + +def _resolve(project, tool, filename): + resolved = { + 'metafile': filename, + } + + rootdir = os.path.dirname(filename) + for field, target in TOOL_FIELDS.items(): + if target is None: + target = field + if not resolved.get(target): + value = tool.get(field) + if value is not None: + resolved[target] = _resolve_value(field, value, rootdir) + + for field, target in PEP_621_FIELDS.items(): + if target is None: + target = field + if field == 'url': + repo = project.get('urls', {}).get('repository') + raise NotImplementedError + elif not resolved.get(target): + value = project.get(field) + if value is not None: + resolved[target] = value + + return resolved + + +def _resolve_value(field, value, rootdir): + if field == 'name': + _utils.check_name(value, allownumeric=True) + elif field == 'metafile': + assert False, 'unreachable' + elif field == 'tags': + if isinstance(value, str): + value = value.replace(',', ' ').split() + for tag in value: + _utils.check_name(tag) + elif field == 'datadir': + if not os.path.isabs(value): + value = os.path.join(rootdir, value) + _utils.check_dir(value) + elif field == 'runscript': + if not os.path.isabs(value): + value = os.path.join(rootdir, value) + _utils.check_file(value) + elif field == 'extra_opts': + if isinstance(value, str): + raise TypeError(f'extra_opts should be a list of strings, got {value!r}') + for opt in value: + if not opt or not isinstance(opt, str): + raise TypeError(f'extra_opts should be a list of strings, got {value!r}') + else: + raise NotImplementedError(field) + return value + + +def _merge_metadata(*tiers): + merged = {} + for data in tiers: + if not data: + continue + for field, value in data.items(): + if field == 'spec': + field = 'version' + value = value.version + if merged.get(field): + # XXX Merge containers? + continue + if value or isinstance(value, int): + merged[field] = value + return merged diff --git a/pyperformance/_benchmark_selections.py b/pyperformance/_benchmark_selections.py new file mode 100644 index 00000000..194f856d --- /dev/null +++ b/pyperformance/_benchmark_selections.py @@ -0,0 +1,118 @@ + +__all__ = [ + 'parse_selection', + 'iter_selections', +] + + +from . import _utils, _manifest, _benchmark + + +def parse_selection(selection, *, op=None): + # "selection" is one of the following: + # * a benchmark string + # * a benchmark name + # * a benchmark pattern + # * a tag + # * a tag pattern + parsed = _benchmark.parse_benchmark(selection, fail=False) + spec, metafile = parsed if parsed else (None, None) + if parsed and spec.version: + kind = 'benchmark' + spec, metafile = parsed + if metafile: + parsed = _benchmark.Benchmark(spec, metafile) + else: + parsed = spec + elif parsed and (spec.origin or metafile): + raise NotImplementedError(selection) + else: + parsed = _utils.parse_tag_pattern(selection) + if parsed: + kind = 'tag' + else: + kind = 'name' + parsed = _utils.parse_name_pattern(selection, fail=True) +# parsed = _utils.parse_name_pattern(selection, fail=False) + if not parsed: + raise ValueError(f'unsupported selection {selection!r}') + return op or '+', selection, kind, parsed + + +def iter_selections(manifest, selections, *, unique=True): + byname = {b.name: b for b in manifest.benchmarks} + + # Compose the expanded include/exclude lists. + seen = set() + included = [] + excluded = set() + for op, _, kind, parsed in selections: + matches = _match_selection(manifest, kind, parsed, byname) + if op == '+': + for bench in matches: + if bench not in seen or not unique: + included.append(bench) + seen.add(bench) + elif op == '-': + for bench in matches: + excluded.add(bench) + else: + raise NotImplementedError(op) + if not included: + included = list(_match_selection(manifest, 'tag', 'default', byname)) + + for bench in included: + if bench not in excluded: + yield bench + + +####################################### +# internal implementation + +def _match_selection(manifest, kind, parsed, byname): + if kind == 'benchmark': + bench = parsed + # XXX Match bench.metafile too? + spec = getattr(bench, 'spec', bench) + # For now we only support selection by name. + # XXX Support selection by version? + # XXX Support selection by origin? + if spec.version or spec.origin: + raise NotImplementedError(spec) + if spec.name in byname: + yield bench + else: + # No match! The caller can handle this as they like. + yield str(bench) + elif kind == 'tag': + groups = [] + if callable(parsed): + match_tag = parsed + for group in manifest.groups: + if match_tag(group): + groups.append(group) + elif parsed in manifest.groups: + groups.append(parsed) + else: + raise ValueError(f'unsupported selection {parsed!r}') + for group in groups: + yield from manifest.resolve_group(group) + elif kind == 'name': + if callable(parsed): + match_bench = parsed + for bench in manifest.benchmarks: + if match_bench(bench.name): + yield bench + else: + name = parsed + if name in byname: + yield byname[name] + # We also check the groups, for backward compatibility. + elif name in manifest.groups: + yield from _match_selection(manifest, 'tag', name, byname) + else: + _utils.check_name(name) + # No match! The caller can handle this as they like. + yield name + else: + raise NotImplementedError(kind) diff --git a/pyperformance/_manifest.py b/pyperformance/_manifest.py new file mode 100644 index 00000000..831472f4 --- /dev/null +++ b/pyperformance/_manifest.py @@ -0,0 +1,434 @@ + +__all__ = [ + 'BenchmarksManifest', + 'load_manifest', + 'parse_manifest', +] + + +from collections import namedtuple +import os.path + + +from . import __version__, DATA_DIR +from . import _benchmark, _utils + + +DEFAULTS_DIR = os.path.join(DATA_DIR, 'benchmarks') +DEFAULT_MANIFEST = os.path.join(DEFAULTS_DIR, 'MANIFEST') + +BENCH_COLUMNS = ('name', 'metafile') +BENCH_HEADER = '\t'.join(BENCH_COLUMNS) + + +def load_manifest(filename, *, resolve=None): + if not filename: + filename = DEFAULT_MANIFEST + sections = _parse_manifest_file(filename) + return BenchmarksManifest._from_sections(sections, resolve, filename) + + +def parse_manifest(lines, *, resolve=None, filename=None): + if isinstance(lines, str): + lines = lines.splitlines() + else: + if not filename: + # Try getting the filename from a file. + filename = getattr(lines, 'name', None) + sections = _parse_manifest(lines, filename) + return BenchmarksManifest._from_sections(sections, resolve, filename) + + +def resolve_default_benchmark(bench): + if isinstance(bench, _benchmark.Benchmark): + spec = bench.spec + else: + spec = bench + bench = _benchmark.Benchmark(spec, '') + bench.metafile = None + + if not spec.version: + spec = spec._replace(version=__version__) + if not spec.origin: + spec = spec._replace(origin='') + bench.spec = spec + + if not bench.metafile: + metafile = os.path.join(DEFAULTS_DIR, + f'bm_{bench.name}', + 'pyproject.toml') + bench.metafile = metafile + return bench + + +class BenchmarksManifest: + + @classmethod + def _from_sections(cls, sections, resolve=None, filename=None): + self = cls(filename=filename) + self._add_sections(sections, resolve) + return self + + def __init__(self, benchmarks=None, groups=None, filename=None): + self._raw_benchmarks = [] + # XXX Support disabling all groups (except all and default)? + self._raw_groups = {} + self._raw_filename = filename + self._byname = {} + self._groups = None + self._tags = None + + if benchmarks: + self._add_benchmarks(benchmarks) + if groups: + self._add_groups(groups) + + def __repr__(self): + args = (f'{n}={getattr(self, "_raw_" + n)}' + for n in ('benchmarks', 'groups', 'filename')) + return f'{type(self).__name__}({", ".join(args)})' + + @property + def benchmarks(self): + return list(self._byname.values()) + + @property + def groups(self): + names = self._custom_groups() + if not names: + names = set(self._get_tags()) + return names | {'all', 'default'} + + @property + def filename(self): + return self._raw_filename + + def _add_sections(self, sections, resolve): + filename = self._raw_filename + _resolve = resolve + if resolve is None and filename == DEFAULT_MANIFEST: + _resolve = default_resolve = resolve_default_benchmark + sections_seen = {filename: set()} + lastfile = None + for filename, section, data in sections: + if filename != lastfile: + _resolve = resolve + if _resolve is None and filename == DEFAULT_MANIFEST: + _resolve = resolve_default_benchmark + lastfile = filename + + if filename not in sections_seen: + sections_seen[filename] = {section} + elif section in sections_seen[filename]: + # For now each section can only show up once. + raise NotImplementedError((section, data)) + else: + sections_seen[filename].add(section) + + if section == 'includes': + pass + elif section == 'benchmarks': + entries = ((s, m, filename) for s, m in data) + self._add_benchmarks(entries, _resolve) + elif section == 'groups': + for name in data: + self._add_group(name, None) + elif section == 'group': + name, entries = data + self._add_group(name, entries) + else: + raise NotImplementedError((section, data)) + + def _add_benchmarks(self, entries, resolve): + for spec, metafile, filename in entries: + # XXX Ignore duplicates? + self._add_benchmark(spec, metafile, resolve, filename) + + def _add_benchmark(self, spec, metafile, resolve, filename): + if spec.name in self._raw_groups: + raise ValueError(f'a group and a benchmark have the same name ({spec.name})') + if metafile: + if filename: + localdir = os.path.dirname(filename) + metafile = os.path.join(localdir, metafile) + bench = _benchmark.Benchmark(spec, metafile) + else: + metafile = None + bench = spec + self._raw_benchmarks.append((spec, metafile, filename)) + if resolve is not None: + bench = resolve(bench) + self._byname[bench.name] = bench + self._groups = None # Force re-resolution. + self._tags = None # Force re-resolution. + + def _add_group(self, name, entries): + if name in self._byname: + raise ValueError(f'a group and a benchmark have the same name ({name})') + if name == 'all': + # XXX Emit a warning? + return + if entries: + raw = self._raw_groups.get(name) + if raw is None: + raw = self._raw_groups[name] = list(entries) if entries else None + elif entries is not None: + raw.extend(entries) + elif name in self._raw_groups: + return + else: + self._raw_groups[name] = None + self._groups = None # Force re-resolution. + + def _custom_groups(self): + return set(self._raw_groups) - {'all', 'default'} + + def _get_tags(self): + if self._tags is None: + self._tags = _get_tags(self._byname.values()) + self._tags.pop('all', None) # It is manifest-specific. + self._tags.pop('default', None) # It is manifest-specific. + return self._tags + + def _resolve_groups(self): + if self._groups is not None: + return self._groups + + raw = {} + for name, entries in self._raw_groups.items(): + if entries and entries[0][0] == '-': + entries = list(entries) + entries.insert(0, ('+', '')) + raw[name] = entries + self._groups = _resolve_groups(raw, self._byname) + return self._groups + + def resolve_group(self, name, *, fail=True): + if name == 'all': + benchmarks = self._byname.values() + elif name == 'default': + if 'default' not in self._raw_groups: + benchmarks = self._byname.values() + else: + groups = self._resolve_groups() + benchmarks = groups.get(name) + elif not self._custom_groups(): + benchmarks = self._get_tags().get(name) + if benchmarks is None and fail: + raise KeyError(name) + else: + groups = self._resolve_groups() + benchmarks = groups.get(name) + if not benchmarks: + if name in (set(self._raw_groups) - {'default'}): + benchmarks = self._get_tags().get(name, ()) + elif fail: + raise KeyError(name) + yield from benchmarks or () + + def show(self, *, raw=True, resolved=True): + yield self.filename + yield 'groups:' + if raw: + yield f' {self._raw_groups}' + if resolved: + yield f' {self.groups}' + yield 'default:' + if resolved: + for i, bench in enumerate(self.resolve_group('default')): + yield f' {i:>2} {bench}' + if raw: + yield 'benchmarks (raw):' + for i, bench in enumerate(self._raw_benchmarks): + yield f' {i:>2} {bench}' + if resolved: + yield 'benchmarks:' + for i, bench in enumerate(self.benchmarks): + yield f' {i:>2} {bench}' + + +####################################### +# internal implementation + +def _iter_sections(lines): + lines = (line.split('#')[0].strip() + for line in lines) + + name = None + section = None + for line in lines: + if not line: + continue + if line.startswith('[') and line.endswith(']'): + if name: + yield name, section + name = line[1:-1].strip() + section = [] + else: + if not name: + raise ValueError(f'expected new section, got {line!r}') + section.append(line) + if name: + yield name, section + else: + raise ValueError('invalid manifest file, no sections found') + + +def _parse_manifest_file(filename): + relroot = os.path.dirname(filename) + filename = _utils.resolve_file(filename, relroot) + with open(filename) as infile: + yield from _parse_manifest(infile, filename) + + +def _parse_manifest(lines, filename): + relroot = os.path.dirname(filename) + for section, seclines in _iter_sections(lines): + if section == 'includes': + yield filename, section, list(seclines) + for line in seclines: + if line == '': + line = DEFAULT_MANIFEST + else: + line = _utils.resolve_file(line, relroot) + yield from _parse_manifest_file(line) + elif section == 'benchmarks': + yield filename, section, list(_parse_benchmarks_section(seclines)) + elif section == 'groups': + yield filename, section, list(_parse_groups_section(seclines)) + elif section.startswith('group '): + section, _, group = section.partition(' ') + entries = list(_parse_group_section(seclines)) + yield filename, section, (group, entries) + else: + raise ValueError(f'unsupported section {section!r}') + + +def _parse_benchmarks_section(lines): + if not lines: + lines = [''] + lines = iter(lines) + if next(lines) != BENCH_HEADER: + raise ValueError('invalid manifest file, expected benchmarks table header') + + version = origin = None + for line in lines: + try: + name, metafile = (None if l == '-' else l + for l in line.split('\t')) + except ValueError: + raise ValueError(f'bad benchmark line {line!r}') + spec = _benchmark.BenchmarkSpec(name or None, version, origin) + metafile = _parse_metafile(metafile, name) + yield spec, metafile + + +def _parse_metafile(metafile, name): + if not metafile: + return None + elif metafile.startswith('<') and metafile.endswith('>'): + directive, _, extra = metafile[1:-1].partition(':') + if directive == 'local': + if extra: + rootdir = f'bm_{extra}' + basename = f'bm_{name}.toml' + else: + rootdir = f'bm_{name}' + basename = 'pyproject.toml' + # A relative path will be resolved against the manifset file. + return os.path.join(rootdir, basename) + else: + raise ValueError(f'unsupported metafile directive {metafile!r}') + else: + return os.path.abspath(metafile) + + +def _parse_groups_section(lines): + for name in seclines: + _utils.check_name(name) + yield name + + +def _parse_group_section(lines): + yielded = False + for line in lines: + if line.startswith('-'): + # Exclude a benchmark or group. + op = '-' + name = line[1:] + elif line.startswith('+'): + op = '+' + name = line[1:] + else: + name = line + _benchmark.check_name(name) + yield op, name + yielded = True + + +def _get_tags(benchmarks): + # Fill in groups from benchmark tags. + tags = {} + for bench in benchmarks: + for tag in getattr(bench, 'tags', ()): + if tag in tags: + tags[tag].append(bench) + else: + tags[tag] = [bench] + return tags + + +def _resolve_groups(rawgroups, byname): + benchmarks = set(byname.values()) + tags = None + groups = { + 'all': list(benchmarks), + } + unresolved = {} + for groupname, entries in rawgroups.items(): + if groupname == 'all': + continue + if not entries: + if groupname == 'default': + groups[groupname] = list(benchmarks) + else: + if tags is None: + tags = _get_tags(benchmarks) + groups[groupname] = tags.get(groupname, ()) + continue + assert entries[0][0] == '+', (groupname, entries) + unresolved[groupname] = names = set() + for op, name in entries: + if op == '+': + if name == '': + names.update(byname) + elif name in byname or name in rawgroups: + names.add(name) + elif op == '-': + if name == '': + raise NotImplementedError((groupname, op, name)) + elif name in byname or name in rawgroups: + if name in names: + names.remove(name) + else: + raise NotImplementedError((groupname, op, name)) + while unresolved: + for groupname, names in list(unresolved.items()): + benchmarks = set() + for name in names: + if name in byname: + benchmarks.add(byname[name]) + elif name in groups: + benchmarks.update(groups[name]) + names.remove(name) + elif name == groupname: + names.remove(name) + break + else: # name in unresolved + names.remove(name) + names.extend(unresolved[name]) + break + else: + groups[groupname] = benchmarks + del unresolved[groupname] + return groups diff --git a/pyperformance/_pyproject_toml.py b/pyperformance/_pyproject_toml.py new file mode 100644 index 00000000..bfde1530 --- /dev/null +++ b/pyperformance/_pyproject_toml.py @@ -0,0 +1,329 @@ +# This module should be replaced with the equivalent functionality +# in the PyPI "packaging" package (once it's added there). + +__all__ = [ + 'parse_person', + 'parse_classifier', + 'parse_entry_point', + 'parse_pyproject_toml', + 'load_pyproject_toml', +] + + +import os.path +import re +import urllib.parse + +import packaging.requirements +import packaging.specifiers +import packaging.utils +import packaging.version +import toml + +from ._utils import check_name + + +NAME_RE = re.compile('^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$', re.IGNORECASE) + + +def parse_person(text): + # XXX + return text + + +def parse_classifier(text): + # XXX Use https://pypi.org/project/packaging-classifiers. + return text + + +def parse_entry_point(text): + # See: + # * https://packaging.python.org/specifications/entry-points/#data-model + # * https://www.python.org/dev/peps/pep-0517/#source-trees + module, sep, qualname = text.parition(':') + if all(p.isidentifier() for p in module.split('.')): + if not sep or all(p.isidentifier() for p in qualname.split('.')): + return module, qualname + + raise ValueError(f'invalid entry point {text!r}') + + +def parse_pyproject_toml(text, rootdir, name=None, *, + tools=None, + requirefiles=True, + ): + data = toml.loads(text) + unused = list(data) + + for section, normalize in SECTIONS.items(): + try: + secdata = data[section] + except KeyError: + data[section] = None + else: + data[section] = normalize(secdata, + name=name, + tools=tools, + rootdir=rootdir, + requirefiles=requirefiles, + ) + unused.remove(section) + + if unused: + raise ValueError(f'unsupported sections ({", ".join(sorted(unused))})') + + return data + + +def load_pyproject_toml(filename, *, name=None, tools=None, requirefiles=True): + if os.path.isdir(filename): + rootdir = filename + filename = os.path.join(rootdir, 'pyproject.toml') + else: + rootdir = os.path.dirname(filename) + + with open(filename) as infile: + text = infile.read() + data = parse_pyproject_toml(text, rootdir, name, + tools=tools, + requirefiles=requirefiles, + ) + return data, filename + + +####################################### +# internal implementation + +def _check_relfile(relname, rootdir, kind): + if os.path.isabs(relname): + raise ValuError(f'{relname!r} is absolute, expected relative') + actual = os.path.join(rootdir, relname) + if kind == 'dir': + if not os.path.isdir(actual): + raise ValueError(f'directory {actual!r} does not exist') + elif kind == 'file': + if not os.path.isfile(actual): + raise ValueError(f'file {actual!r} does not exist') + elif kind == 'any': + if not os.path.exists(actual): + raise ValueError(f'{actual!r} does not exist') + elif kind: + raise NotImplementedError(kind) + + +def _check_file_or_text(table, rootdir, requirefiles, extra=None): + unsupported = set(table) - set(['file', 'text']) - set(extra or ()) + if unsupported: + raise ValueError(f'unsupported license data {table!r}') + + if 'file' in table: + if 'text' in table: + raise ValueError(f'"file" and "text" are mutually exclusive') + kind = 'file' if requirefiles else None + _check_relfile(table['file'], rootdir, kind) + else: + text = table['text'] + # XXX Validate it? + + +def _normalize_project(data, rootdir, name, requirefiles, **_ignored): + # See PEP 621. + unused = set(data) + + ########## + # First handle the required fields. + + name = data.get('name', name) + if name: + if not NAME_RE.match(name): + raise ValueError(f'invalid name {name!r}') + name = packaging.utils.canonicalize_name(name) + data['name'] = name + if 'name' in unused: + unused.remove('name') + else: + if 'name' not in data.get('dynamic', []): + raise ValueError('missing required "name" field') + + try: + version = data['version'] + except KeyError: + if 'version' not in data.get('dynamic', []): + raise ValueError('missing required "version" field') + else: + # We keep the full version string rather than + # the canonicalized form. However, we still validate and + # (effectively) normalize it. + version = packaging.version.parse(version) + data['version'] = str(version) + unused.remove('version') + + ########## + # Now we handle the optional fields. + + # We leave "description" as-is. + + key = 'readme' + if key in data: + readme = data[key] + if isinstance(readme, 'str'): + readme = data[key] = {'file': readme} + # XXX Check the suffix. + # XXX Handle 'content-type'. + # XXX Handle "charset" parameter. + _check_file_or_text(data[key], rootdir, requirefiles, + ['content-type', 'charset']) + unused.remove(key) + + key = 'requires-python' + if key in data: + # We keep it as a string. + data[key] = str(packaging.specifiers.SpecifierSet(data[key])) + unused.remove(key) + + key = 'license' + if key in data: + _check_file_or_text(data[key], rootdir, requirefiles) + unused.remove(key) + + key = 'keywords' + if key in data: + for keyword in data[key]: + # XXX Is this the right check? + check_name(name, loose=True) + unused.remove(key) + + key = 'authors' + if key in data: + for person in data[key]: + # We only make sure it is valid. + parse_person(person) + unused.remove(key) + + key = 'maintainers' + if key in data: + for person in data[key]: + # We only make sure it is valid. + parse_person(person) + unused.remove(key) + + key = 'classifiers' + if key in data: + for classifier in data[key]: + # We only make sure it is valid. + parse_classifier(classifier) + unused.remove(key) + + key = 'dependencies' + if key in data: + for dep in data[key]: + # We only make sure it is valid. + packaging.requirements.Requirement(dep) + unused.remove(key) + + key = 'optional-dependencies' + if key in data: + # XXX + unused.remove(key) + + key = 'urls' + if key in data: + for name, url in data[key].items(): + # XXX Is there a stricter check? + check_name(name, loose=True) + # We only make sure it is valid. + urllib.parse.urlparse(url) + unused.remove(key) + + key = 'scripts' + if key in data: + for name, value in data[key].items(): + # XXX Is there a stricter check? + check_name(name, loose=True) + # We only make sure it is valid. + parse_entry_point(value) + unused.remove(key) + + key = 'gui-scripts' + if key in data: + for _, value in data[key].items(): + # XXX Is there a stricter check? + check_name(name, loose=True) + # We only make sure it is valid. + parse_entry_point(value) + unused.remove(key) + + key = 'entry-points' + if key in data: + for groupname, group in data[key].items(): + # XXX Is there a stricter check? + check_name(groupname, loose=True) + for epname, value in group.items(): + # XXX Is there a stricter check? + check_name(epname, loose=True) + # We only make sure it is valid. + parse_entry_point(value) + unused.remove(key) + + key = 'dynamic' + if key in data: + for field in data[key]: + check_name(field, loose=True) + # XXX Fail it isn't one of the supported fields. + unused.remove(key) + + return data + + +def _normalize_build_system(data, rootdir, requirefiles, **_ignored): + # See PEP 518 and 517. + unused = set(data) + + key = 'requires' + if key in data: + reqs = data[key] + for i, raw in enumerate(reqs): + # We only make sure it is valid. + packaging.requirements.Requirement(raw) + unused.remove(key) + else: + raise ValueError('missing "requires" field') + + key = 'build-backend' + if key in data: + # We only make sure it is valid. + parse_entry_point(data[key]) + unused.remove(key) + + key = 'backend-path' + if key in data: + if 'build-backend' not in data: + raise ValueError('missing "build-backend" field') + kind = 'dir' if requirefiles else None + for dirname in data[key]: + _check_relfile(dirname, rootdir, kind=kind) + unused.remove(key) + + if unused: + raise ValueError(f'unsupported keys ({", ".join(sorted(unused))})') + + return data + + +def _normalize_tool(data, tools, rootdir, **_ignored): + # See PEP 518. + tools = tools or {} + for name, tooldata in list(data.items()): + if name in tools: + normalize = tools[name] + data[name] = normalize(name, tooldata, rootdir=rootdir) + if data[name] is None: + del data[name] + return data + + +SECTIONS = { + 'project': _normalize_project, + 'build-system': _normalize_build_system, + 'tool': _normalize_tool, +} diff --git a/pyperformance/_pythoninfo.py b/pyperformance/_pythoninfo.py new file mode 100644 index 00000000..69205b98 --- /dev/null +++ b/pyperformance/_pythoninfo.py @@ -0,0 +1,166 @@ +# A utility library for getting information about a Python executable. +# +# This may be used as a script. + +__all__ = [ + 'get_python_id', + 'get_python_info', + 'inspect_python_install', +] + + +import hashlib +import json +import os +import subprocess +import sys + + +def get_python_id(python=sys.executable, *, prefix=None): + """Return a unique (str) identifier for the given Python executable.""" + if not python or isinstance(python, str): + info = get_python_info(python or sys.executable) + else: + info = python + python = info['executable'] + + data = [ + # "executable" represents the install location + # (and build, to an extent). + info['executable'], + # sys.version encodes version, git info, build_date, and build_tool. + info['version_str'], + info['implementation_name'], + '.'.join(str(v) for v in info['implementation_version']), + str(info['api_version']), + info['magic_number'], + ] + # XXX Add git info if a dev build. + + h = hashlib.sha256() + for value in data: + h.update(value.encode('utf-8')) + # XXX Also include the sorted output of "python -m pip freeze"? + py_id = h.hexdigest() + # XXX Return the whole string? + py_id = py_id[:12] + + if prefix: + if prefix is True: + major, minor = info['version_info'][:2] + py_id = f'{info["implementation_name"]}{major}.{minor}-{py_id}' + else: + py_id = prefix + py_id + + return py_id + + +def get_python_info(python=sys.executable): + if not python or python == sys.executable: + return _get_raw_info() + + try: + text = subprocess.check_output( + [python, __file__], + universal_newlines=True, + ) + except subprocess.CalledProcessError: + raise Exception(f'could not get info for {python}') + return json.loads(text) + + +def inspect_python_install(python=sys.executable): + if isinstance(python, str): + info = get_python_info(python) + else: + info = python + return _inspect_python_install(**info) + + +####################################### +# internal implementation + +try: + PLATLIBDIR = sys.platlibdir +except AttributeError: + PLATLIBDIR = 'lib' +STDLIB_DIR = os.path.dirname(os.__file__) +try: + from importlib.util import MAGIC_NUMBER +except ImportError: + import _imp + MAGIC_NUMBER = _imp.get_magic() + + +def _inspect_python_install(executable, prefix, base_prefix, + platlibdir, stdlib_dir, + version_info, platform, implementation_name, + **_ignored): + is_venv = prefix != base_prefix + + if os.path.basename(stdlib_dir) == 'Lib': + base_executable = os.path.join(os.path.dirname(stdlib_dir), 'python') + if not os.path.exists(base_executable): + raise NotImplementedError(base_executable) + is_dev = True + else: + major, minor = version_info[:2] + python = f'python{major}.{minor}' + if is_venv: + if '.' in os.path.basename(executable): + ext = executable.rpartition('.')[2] + python_exe = f'{python}.{ext}' + else: + python_exe = python + expected = os.path.join(base_prefix, platlibdir, python) + if stdlib_dir == expected: + bindir = os.path.basename(os.path.dirname(executable)) + base_executable = os.path.join(base_prefix, bindir, python_exe) + else: + # XXX This is good enough for now. + base_executable = executable + #raise NotImplementedError(stdlib_dir) + elif implementation_name == 'cpython': + if platform == 'win32': + expected = os.path.join(prefix, platlibdir) + else: + expected = os.path.join(prefix, platlibdir, python) + if stdlib_dir == expected: + base_executable = executable + else: + raise NotImplementedError(stdlib_dir) + else: + base_executable = executable + is_dev = False + + return base_executable, is_dev, is_venv + + +def _get_raw_info(): + return { + 'executable': sys.executable, + 'version_str': sys.version, + 'version_info': tuple(sys.version_info), + 'hexversion': sys.hexversion, + 'api_version': sys.api_version, + 'magic_number': MAGIC_NUMBER.hex(), + 'implementation_name': sys.implementation.name.lower(), + 'implementation_version': tuple(sys.implementation.version), + 'platform': sys.platform, + 'prefix': sys.prefix, + 'exec_prefix': sys.exec_prefix, + 'base_prefix': sys.base_prefix, + 'base_exec_prefix': sys.base_exec_prefix, + 'platlibdir': PLATLIBDIR, + 'stdlib_dir': STDLIB_DIR, + # XXX Also include the build options (e.g. configure flags)? + } + + +####################################### +# use as a script + +if __name__ == '__main__': + info = _get_raw_info() + json.dump(info, sys.stdout, indent=4) + print() diff --git a/pyperformance/_utils.py b/pyperformance/_utils.py new file mode 100644 index 00000000..53d49591 --- /dev/null +++ b/pyperformance/_utils.py @@ -0,0 +1,184 @@ + +__all__ = [ + # filesystem + 'temporary_file', + 'check_file', + 'check_dir', + # platform + 'MS_WINDOWS', + 'run_command', + # misc + 'check_name', + 'parse_name_pattern', + 'parse_tag_pattern', + 'parse_selections', + 'iter_clean_lines', +] + + +####################################### +# filesystem utils + +import contextlib +import errno +import os +import os.path +import tempfile + + +@contextlib.contextmanager +def temporary_file(): + tmp_filename = tempfile.mktemp() + try: + yield tmp_filename + finally: + try: + os.unlink(tmp_filename) + except OSError as exc: + if exc.errno != errno.ENOENT: + raise + + +def check_file(filename): + if not os.path.isabs(filename): + raise ValueError(f'expected absolute path, got {filename!r}') + if not os.path.isfile(filename): + raise ValueError(f'file missing ({filename})') + + +def check_dir(dirname): + if not os.path.isabs(dirname): + raise ValueError(f'expected absolute path, got {dirname!r}') + if not os.path.isdir(dirname): + raise ValueError(f'directory missing ({dirname})') + + +def resolve_file(filename, relroot=None): + resolved = os.path.normpath(filename) + resolved = os.path.expanduser(resolved) + #resolved = os.path.expandvars(filename) + if not os.path.isabs(resolved): + if not relroot: + relroot = os.getcwd() + elif not os.path.isabs(relroot): + raise NotImplementedError(relroot) + resolved = os.path.join(relroot, resolved) + return resolved + + +####################################### +# platform utils + +import logging +import subprocess +import sys + + +MS_WINDOWS = (sys.platform == 'win32') + + +def run_command(command, env=None, *, hide_stderr=True): + if hide_stderr: + kw = {'stderr': subprocess.PIPE} + else: + kw = {} + + logging.info("Running `%s`", + " ".join(list(map(str, command)))) + + # Explicitly flush standard streams, required if streams are buffered + # (not TTY) to write lines in the expected order + sys.stdout.flush() + sys.stderr.flush() + + proc = subprocess.Popen(command, + universal_newlines=True, + env=env, + **kw) + try: + stderr = proc.communicate()[1] + except: # noqa + if proc.stderr: + proc.stderr.close() + try: + proc.kill() + except OSError: + # process already exited + pass + proc.wait() + raise + + if proc.returncode != 0: + if hide_stderr: + sys.stderr.flush() + sys.stderr.write(stderr) + sys.stderr.flush() + raise RuntimeError("Benchmark died") + + +####################################### +# misc utils + +def check_name(name, *, loose=False, allownumeric=False): + if not name or not isinstance(name, str): + raise ValueError(f'bad name {name!r}') + if allownumeric: + name = f'_{name}' + if not loose: + if name.startswith('-'): + raise ValueError(name) + if not name.replace('-', '_').isidentifier(): + raise ValueError(name) + + +def parse_name_pattern(text, *, fail=True): + name = text + # XXX Support globs and/or regexes? (return a callable) + try: + check_name('_' + name) + except Exception: + if fail: + raise # re-raise + return None + return name + + +def parse_tag_pattern(text): + if not text.startswith('<'): + return None + if not text.endswith('>'): + return None + tag = text[1:-1] + # XXX Support globs and/or regexes? (return a callable) + check_name(tag) + return tag + + +def parse_selections(selections, parse_entry=None): + if isinstance(selections, str): + selections = selections.split(',') + if parse_entry is None: + parse_entry = (lambda o, e: (o, e, None, e)) + + for entry in selections: + entry = entry.strip() + if not entry: + continue + + op = '+' + if entry.startswith('-'): + op = '-' + entry = entry[1:] + + yield parse_entry(op, entry) + + +def iter_clean_lines(filename): + with open(filename) as reqsfile: + for line in reqsfile: + # strip comment + line = line.partition('#')[0] + line = line.rstrip() + if not line: + continue + yield line diff --git a/pyperformance/benchmarks/__init__.py b/pyperformance/benchmarks/__init__.py deleted file mode 100644 index 9e2c56c1..00000000 --- a/pyperformance/benchmarks/__init__.py +++ /dev/null @@ -1,353 +0,0 @@ -import logging - -from pyperformance.run import run_perf_script - - -# Benchmark groups. The "default" group is what's run if no -b option is -# specified. -DEFAULT_GROUP = [ - '2to3', - 'chameleon', - 'chaos', - 'crypto_pyaes', - 'deltablue', - 'django_template', - 'dulwich_log', - 'fannkuch', - 'float', - - # FIXME: this benchmark fails with: - # TypeError: code() argument 15 must be bytes, not tuple - # 'genshi', - - 'go', - 'hexiom', - - # FIXME: this benchmark fails with: - # Unable to get the program 'hg' from the virtual environment - # 'hg_startup', - - 'html5lib', - 'json_dumps', - 'json_loads', - 'logging', - 'mako', - 'meteor_contest', - 'nbody', - 'nqueens', - 'pathlib', - 'pickle', - 'pickle_dict', - 'pickle_list', - 'pickle_pure_python', - 'pidigits', - 'pyflate', - 'python_startup', - 'python_startup_no_site', - 'raytrace', - 'regex_compile', - 'regex_dna', - 'regex_effbot', - 'regex_v8', - 'richards', - 'scimark', - 'spectral_norm', - 'sqlalchemy_declarative', - 'sqlalchemy_imperative', - 'sqlite_synth', - 'sympy', - 'telco', - 'tornado_http', - 'unpack_sequence', - 'unpickle', - 'unpickle_list', - 'unpickle_pure_python', - 'xml_etree', -] - -BENCH_GROUPS = { - # get_benchmarks() creates an "all" group which includes every benchmark - # pyperformance knows about. - "default": DEFAULT_GROUP, - "startup": ["normal_startup", "startup_nosite", - "hg_startup"], - "regex": ["regex_v8", "regex_effbot", "regex_compile", - "regex_dna"], - "serialize": ["pickle_pure_python", "unpickle_pure_python", # Not for Python 3 - "pickle", "unpickle", - "xml_etree", - "json_dumps", "json_loads"], - "apps": ["2to3", "chameleon", "html5lib", "tornado_http"], - "math": ["float", "nbody", "pidigits"], - "template": ["django_template", "mako"], -} - - -def BM_2to3(python, options): - return run_perf_script(python, options, "2to3") - - -# def BM_hg_startup(python, options): -# return run_perf_script(python, options, "hg_startup") - - -def BM_Chameleon(python, options): - return run_perf_script(python, options, "chameleon") - - -def BM_Tornado_Http(python, options): - return run_perf_script(python, options, "tornado_http") - - -def BM_Django_Template(python, options): - return run_perf_script(python, options, "django_template") - - -def BM_Float(python, options): - return run_perf_script(python, options, "float") - - -def BM_mako(python, options): - return run_perf_script(python, options, "mako") - - -def BM_pathlib(python, options): - return run_perf_script(python, options, "pathlib") - - -def pickle_benchmark(python, options, *extra_args): - return run_perf_script(python, options, "pickle", - extra_args=list(extra_args)) - - -def BM_pickle(python, options): - return pickle_benchmark(python, options, "pickle") - - -def BM_unpickle(python, options): - return pickle_benchmark(python, options, "unpickle") - - -def BM_pickle_list(python, options): - return pickle_benchmark(python, options, "pickle_list") - - -def BM_pickle_dict(python, options): - return pickle_benchmark(python, options, "pickle_dict") - - -def BM_unpickle_list(python, options): - return pickle_benchmark(python, options, "unpickle_list") - - -def BM_pickle_pure_python(python, options): - return pickle_benchmark(python, options, "--pure-python", "pickle") - - -def BM_unpickle_pure_python(python, options): - return pickle_benchmark(python, options, "--pure-python", "unpickle") - - -def BM_xml_etree(python, options): - return run_perf_script(python, options, "xml_etree") - - -def BM_json_loads(python, options): - return run_perf_script(python, options, "json_loads") - - -def BM_json_dumps(python, options): - return run_perf_script(python, options, "json_dumps") - - -def BM_NQueens(python, options): - return run_perf_script(python, options, "nqueens") - - -def BM_Chaos(python, options): - return run_perf_script(python, options, "chaos") - - -def BM_Fannkuch(python, options): - return run_perf_script(python, options, "fannkuch") - - -def BM_Go(python, options): - return run_perf_script(python, options, "go") - - -def BM_Meteor_Contest(python, options): - return run_perf_script(python, options, "meteor_contest") - - -def BM_Spectral_Norm(python, options): - return run_perf_script(python, options, "spectral_norm") - - -def BM_Telco(python, options): - return run_perf_script(python, options, "telco") - - -def BM_hexiom(python, options): - return run_perf_script(python, options, "hexiom") - - -def BM_raytrace(python, options): - return run_perf_script(python, options, "raytrace") - - -def BM_logging(python, options): - return run_perf_script(python, options, "logging") - - -def BM_python_startup(python, options): - return run_perf_script(python, options, "python_startup") - - -def BM_python_startup_no_site(python, options): - return run_perf_script(python, options, "python_startup", - extra_args=["--no-site"]) - - -def BM_regex_v8(python, options): - return run_perf_script(python, options, "regex_v8") - - -def BM_regex_effbot(python, options): - return run_perf_script(python, options, "regex_effbot") - - -def BM_regex_compile(python, options): - return run_perf_script(python, options, "regex_compile") - - -def BM_regex_dna(python, options): - return run_perf_script(python, options, "regex_dna") - - -def BM_unpack_sequence(python, options): - return run_perf_script(python, options, "unpack_sequence") - - -def BM_nbody(python, options): - return run_perf_script(python, options, "nbody") - - -def BM_html5lib(python, options): - return run_perf_script(python, options, "html5lib") - - -def BM_richards(python, options): - return run_perf_script(python, options, "richards") - - -def BM_pidigits(python, options): - return run_perf_script(python, options, "pidigits") - - -def BM_crypto_pyaes(python, options): - return run_perf_script(python, options, "crypto_pyaes") - - -def BM_sympy(python, options): - return run_perf_script(python, options, "sympy") - - -def BM_deltablue(python, options): - return run_perf_script(python, options, "deltablue") - - -def BM_scimark(python, options): - return run_perf_script(python, options, "scimark") - - -def BM_dulwich_log(python, options): - return run_perf_script(python, options, "dulwich_log") - - -def BM_pyflate(python, options): - return run_perf_script(python, options, "pyflate") - - -def BM_sqlite_synth(python, options): - return run_perf_script(python, options, "sqlite_synth") - - -# def BM_genshi(python, options): -# return run_perf_script(python, options, "genshi") - - -def BM_sqlalchemy_declarative(python, options): - return run_perf_script(python, options, "sqlalchemy_declarative") - - -def BM_sqlalchemy_imperative(python, options): - return run_perf_script(python, options, "sqlalchemy_imperative") - - -def BM_mdp(python, options): - return run_perf_script(python, options, "mdp") - - -# End benchmarks, begin main entry point support. - -def get_benchmarks(): - bench_funcs = dict((name[3:].lower(), func) - for name, func in globals().items() - if name.startswith("BM_")) - - bench_groups = BENCH_GROUPS.copy() - - # create the 'all' group - bench_groups["all"] = sorted(bench_funcs) - - return (bench_funcs, bench_groups) - - -def expand_benchmark_name(bm_name, bench_groups): - """Recursively expand name benchmark names. - - Args: - bm_name: string naming a benchmark or benchmark group. - - Yields: - Names of actual benchmarks, with all group names fully expanded. - """ - expansion = bench_groups.get(bm_name) - if expansion: - for name in expansion: - for name in expand_benchmark_name(name, bench_groups): - yield name - else: - yield bm_name - - -def select_benchmarks(benchmarks, bench_groups): - legal_benchmarks = bench_groups["all"] - benchmarks = benchmarks.split(",") - positive_benchmarks = set(bm.lower() - for bm in benchmarks - if bm and not bm.startswith("-")) - negative_benchmarks = set(bm[1:].lower() - for bm in benchmarks - if bm and bm.startswith("-")) - - should_run = set() - if not positive_benchmarks: - should_run = set(expand_benchmark_name("default", bench_groups)) - - for name in positive_benchmarks: - for bm in expand_benchmark_name(name, bench_groups): - if bm not in legal_benchmarks: - logging.warning("No benchmark named %s", bm) - else: - should_run.add(bm) - - for bm in negative_benchmarks: - if bm in bench_groups: - raise ValueError("Negative groups not supported: -%s" % bm) - elif bm not in legal_benchmarks: - logging.warning("No benchmark named %s", bm) - else: - should_run.remove(bm) - return should_run diff --git a/pyperformance/cli.py b/pyperformance/cli.py index 46d72361..3680c886 100644 --- a/pyperformance/cli.py +++ b/pyperformance/cli.py @@ -1,7 +1,10 @@ import argparse +import contextlib +import logging import os.path import sys +from pyperformance import _utils, is_installed from pyperformance.venv import exec_in_virtualenv, cmd_venv @@ -10,14 +13,17 @@ def comma_separated(values): return list(filter(None, values)) -def filter_opts(cmd): - cmd.add_argument("-b", "--benchmarks", metavar="BM_LIST", default="default", +def filter_opts(cmd, *, allow_no_benchmarks=False): + cmd.add_argument("--manifest", help="benchmark manifest file to use") + + cmd.add_argument("-b", "--benchmarks", metavar="BM_LIST", default='', help=("Comma-separated list of benchmarks to run. Can" " contain both positive and negative arguments:" " --benchmarks=run_this,also_this,-not_this. If" " there are no positive arguments, we'll run all" " benchmarks except the negative arguments. " " Otherwise we run only the positive arguments.")) + cmd.set_defaults(allow_no_benchmarks=allow_no_benchmarks) def parse_args(): @@ -89,6 +95,7 @@ def parse_args(): cmd = subparsers.add_parser( 'list_groups', help='List benchmark groups of the running Python') cmds.append(cmd) + cmd.add_argument("--manifest", help="benchmark manifest file to use") # compile cmd = subparsers.add_parser( @@ -131,9 +138,17 @@ def parse_args(): # venv cmd = subparsers.add_parser('venv', help='Actions on the virtual environment') - cmd.add_argument("venv_action", nargs="?", - choices=('show', 'create', 'recreate', 'remove'), - default='show') + cmd.set_defaults(venv_action='show') + venvsubs = cmd.add_subparsers(dest="venv_action") + cmd = venvsubs.add_parser('show') + cmds.append(cmd) + cmd = venvsubs.add_parser('create') + filter_opts(cmd, allow_no_benchmarks=True) + cmds.append(cmd) + cmd = venvsubs.add_parser('recreate') + filter_opts(cmd, allow_no_benchmarks=True) + cmds.append(cmd) + cmd = venvsubs.add_parser('remove') cmds.append(cmd) for cmd in cmds: @@ -170,18 +185,81 @@ def parse_args(): abs_python = os.path.abspath(options.python) if not abs_python: print("ERROR: Unable to locate the Python executable: %r" % - options.python) + options.python, flush=True) sys.exit(1) options.python = abs_python + if hasattr(options, 'benchmarks'): + if options.benchmarks == '': + if not options.allow_no_benchmarks: + parser.error('--benchmarks cannot be empty') + options.benchmarks = None + return (parser, options) +@contextlib.contextmanager +def _might_need_venv(options): + try: + yield + except ModuleNotFoundError: + if not options.inside_venv: + print('switching to a venv.', flush=True) + exec_in_virtualenv(options) + raise # re-raise + + +def _manifest_from_options(options): + from pyperformance import _manifest + return _manifest.load_manifest(options.manifest) + + +def _benchmarks_from_options(options): + if not getattr(options, 'benchmarks', None): + return None + manifest = _manifest_from_options(options) + return _select_benchmarks(options.benchmarks, manifest) + + +def _select_benchmarks(raw, manifest): + from pyperformance import _benchmark_selections + + # Get the raw list of benchmarks. + entries = raw.lower() + parse_entry = (lambda o, s: _benchmark_selections.parse_selection(s, op=o)) + parsed = _utils.parse_selections(entries, parse_entry) + parsed_infos = list(parsed) + + # Disallow negative groups. + for op, _, kind, parsed in parsed_infos: + if callable(parsed): + continue + name = parsed.name if kind == 'benchmark' else parsed + if name in manifest.groups and op == '-': + raise ValueError(f'negative groups not supported: -{parsed.name}') + + # Get the selections. + selected = [] + for bench in _benchmark_selections.iter_selections(manifest, parsed_infos): + if isinstance(bench, str): + logging.warning(f"no benchmark named {bench!r}") + continue + selected.append(bench) + return selected + + def _main(): parser, options = parse_args() + if not is_installed(): + assert not options.inside_venv + print('switching to a venv.', flush=True) + exec_in_virtualenv(options) + if options.action == 'venv': - cmd_venv(options) + with _might_need_venv(options): + benchmarks = _benchmarks_from_options(options) + cmd_venv(options, benchmarks) sys.exit() elif options.action == 'compile': from pyperformance.compile import cmd_compile @@ -199,21 +277,25 @@ def _main(): from pyperformance.compare import cmd_show cmd_show(options) sys.exit() - - if not options.inside_venv: - exec_in_virtualenv(options) - - from pyperformance.cli_run import cmd_run, cmd_list, cmd_list_groups - - if options.action == 'run': - cmd_run(parser, options) + elif options.action == 'run': + with _might_need_venv(options): + from pyperformance.cli_run import cmd_run + benchmarks = _benchmarks_from_options(options) + cmd_run(options, benchmarks) elif options.action == 'compare': - from pyperformance.compare import cmd_compare + with _might_need_venv(options): + from pyperformance.compare import cmd_compare cmd_compare(options) elif options.action == 'list': - cmd_list(options) + with _might_need_venv(options): + from pyperformance.cli_run import cmd_list + benchmarks = _benchmarks_from_options(options) + cmd_list(options, benchmarks) elif options.action == 'list_groups': - cmd_list_groups(options) + with _might_need_venv(options): + from pyperformance.cli_run import cmd_list_groups + manifest = _manifest_from_options(options) + cmd_list_groups(manifest) else: parser.print_help() sys.exit(1) @@ -223,5 +305,5 @@ def main(): try: _main() except KeyboardInterrupt: - print("Benchmark suite interrupted: exit!") + print("Benchmark suite interrupted: exit!", flush=True) sys.exit(1) diff --git a/pyperformance/cli_run.py b/pyperformance/cli_run.py index a760c877..cc317160 100644 --- a/pyperformance/cli_run.py +++ b/pyperformance/cli_run.py @@ -5,18 +5,11 @@ import pyperf import pyperformance -from pyperformance.benchmarks import get_benchmarks, select_benchmarks from pyperformance.compare import display_benchmark_suite from pyperformance.run import run_benchmarks -def get_benchmarks_to_run(options): - bench_funcs, bench_groups = get_benchmarks() - should_run = select_benchmarks(options.benchmarks, bench_groups) - return (bench_funcs, bench_groups, should_run) - - -def cmd_run(parser, options): +def cmd_run(options, benchmarks): logging.basicConfig(level=logging.INFO) print("Python benchmark suite %s" % pyperformance.__version__) @@ -33,9 +26,8 @@ def cmd_run(parser, options): if not os.path.isabs(executable): print("ERROR: \"%s\" is not an absolute path" % executable) sys.exit(1) - bench_funcs, bench_groups, should_run = get_benchmarks_to_run(options) - cmd_prefix = [executable] - suite, errors = run_benchmarks(bench_funcs, should_run, cmd_prefix, options) + + suite, errors = run_benchmarks(benchmarks, executable, options) if not suite: print("ERROR: No benchmark was run") @@ -55,29 +47,27 @@ def cmd_run(parser, options): sys.exit(1) -def cmd_list(options): - bench_funcs, bench_groups, all_funcs = get_benchmarks_to_run(options) - +def cmd_list(options, benchmarks): print("%r benchmarks:" % options.benchmarks) - for func in sorted(all_funcs): - print("- %s" % func) + for bench in sorted(benchmarks): + print("- %s" % bench.name) print() - print("Total: %s benchmarks" % len(all_funcs)) - + print("Total: %s benchmarks" % len(benchmarks)) -def cmd_list_groups(options): - bench_funcs, bench_groups = get_benchmarks() - funcs = set(bench_groups['all']) - all_funcs = set(funcs) +def cmd_list_groups(manifest): + all_benchmarks = set(manifest.benchmarks) - for group, funcs in sorted(bench_groups.items()): - funcs = set(funcs) & all_funcs - if not funcs: + groups = sorted(manifest.groups - {'all', 'default'}) + groups[0:0] = ['all', 'default'] + for group in groups: + specs = list(manifest.resolve_group(group)) + known = set(specs) & all_benchmarks + if not known: # skip empty groups continue - print("%s (%s):" % (group, len(funcs))) - for func in sorted(funcs): - print("- %s" % func) + print("%s (%s):" % (group, len(specs))) + for spec in sorted(specs): + print("- %s" % spec.name) print() diff --git a/pyperformance/compile.py b/pyperformance/compile.py index 30d2fd90..70041c6d 100644 --- a/pyperformance/compile.py +++ b/pyperformance/compile.py @@ -18,7 +18,7 @@ from urllib.request import urlopen import pyperformance -from pyperformance.utils import MS_WINDOWS +from pyperformance._utils import MS_WINDOWS from pyperformance.venv import (GET_PIP_URL, REQ_OLD_PIP, PERFORMANCE_ROOT, download, is_build_dir) @@ -237,6 +237,29 @@ def safe_makedirs(self, directory): raise +def resolve_python(prefix, builddir, *, fallback=True): + if sys.platform in ('darwin', 'win32'): + program_ext = '.exe' + else: + program_ext = '' + + if prefix: + if sys.platform == 'darwin': + program_ext = '' + program = os.path.join(prefix, "bin", "python3" + program_ext) + exists = os.path.exists(program) + if not exists and fallback: + program2 = os.path.join(prefix, "bin", "python" + program_ext) + if os.path.exists(program2): + program = program2 + exists = True + else: + assert builddir + program = os.path.join(builddir, "python" + program_ext) + exists = os.path.exists(program) + return program, exists + + class Python(Task): def __init__(self, app, conf): super().__init__(app, conf.build_dir) @@ -288,28 +311,19 @@ def compile(self): self.run('make') def install_python(self): - if sys.platform in ('darwin', 'win32'): - program_ext = '.exe' - else: - program_ext = '' - + program, _ = resolve_python( + self.conf.prefix if self.conf.install else None, + self.conf.build_dir, + ) if self.conf.install: - prefix = self.conf.prefix - self.app.safe_rmdir(prefix) - self.app.safe_makedirs(prefix) - + program, _ = resolve_python(self.conf.prefix, self.conf.build_dir) + self.app.safe_rmdir(self.conf.prefix) + self.app.safe_makedirs(self.conf.prefix) self.run('make', 'install') - - if sys.platform == 'darwin': - program_ext = '' - - self.program = os.path.join(prefix, "bin", "python" + program_ext) - if not os.path.exists(self.program): - self.program = os.path.join(prefix, "bin", "python3" + program_ext) else: - # don't install: run python from the compilation directory - self.program = os.path.join(self.conf.build_dir, - "python" + program_ext) + program, _ = resolve_python(None, self.conf.build_dir) + # else don't install: run python from the compilation directory + self.program = program def get_version(self): # Dump the Python version @@ -411,6 +425,9 @@ def compile_install(self): class BenchmarkRevision(Application): + + _dryrun = False + def __init__(self, conf, revision, branch=None, patch=None, setup_log=True, filename=None, commit_date=None, options=None): @@ -496,8 +513,16 @@ def compile_install(self): def create_venv(self): # Create venv - cmd = [self.python.program, '-u', '-m', 'pyperformance', - 'venv', 'recreate'] + python = self.python.program + if self._dryrun: + program, exists = resolve_python( + self.conf.prefix if self.conf.install else None, + self.conf.build_dir, + ) + if not python or not exists: + python = sys.executable + cmd = [python, '-u', '-m', 'pyperformance', 'venv', 'recreate', + '--benchmarks', ''] if self.conf.venv: cmd.extend(('--venv', self.conf.venv)) if self.options.inherit_environ: @@ -508,13 +533,18 @@ def create_venv(self): def run_benchmark(self): self.safe_makedirs(os.path.dirname(self.filename)) - cmd = [self.python.program, '-u', + python = self.python.program + if self._dryrun: + python = sys.executable + cmd = [python, '-u', '-m', 'pyperformance', 'run', '--verbose', '--output', self.filename] if self.options.inherit_environ: cmd.append('--inherit-environ=%s' % ','.join(self.options.inherit_environ)) + if self.conf.manifest: + cmd.extend(('--manifest', self.conf.manifest)) if self.conf.benchmarks: cmd.append('--benchmarks=%s' % self.conf.benchmarks) if self.conf.affinity: @@ -673,10 +703,11 @@ def prepare(self): def compile_bench(self): self.python = Python(self, self.conf) - try: - self.compile_install() - except SystemExit: - sys.exit(EXIT_COMPILE_ERROR) + if not self._dryrun: + try: + self.compile_install() + except SystemExit: + sys.exit(EXIT_COMPILE_ERROR) self.create_venv() @@ -742,6 +773,13 @@ def getstr(section, key, default=None): # strip spaces return value.strip() + def getfile(section, key, default=None): + value = getstr(section, key, default) + if not value: + return value + value = os.path.expanduser(value) + return value + def getboolean(section, key, default): try: sectionobj = cfgobj[section] @@ -750,19 +788,19 @@ def getboolean(section, key, default): return default # [config] - conf.json_dir = os.path.expanduser(getstr('config', 'json_dir')) + conf.json_dir = getfile('config', 'json_dir') conf.json_patch_dir = os.path.join(conf.json_dir, 'patch') conf.uploaded_json_dir = os.path.join(conf.json_dir, 'uploaded') conf.debug = getboolean('config', 'debug', False) if parse_compile: # [scm] - conf.repo_dir = os.path.expanduser(getstr('scm', 'repo_dir')) + conf.repo_dir = getfile('scm', 'repo_dir') conf.update = getboolean('scm', 'update', True) conf.git_remote = getstr('config', 'git_remote', default='remotes/origin') # [compile] - conf.directory = os.path.expanduser(getstr('compile', 'bench_dir')) + conf.directory = getfile('compile', 'bench_dir') conf.lto = getboolean('compile', 'lto', True) conf.pgo = getboolean('compile', 'pgo', True) conf.install = getboolean('compile', 'install', True) @@ -770,6 +808,7 @@ def getboolean(section, key, default): # [run_benchmark] conf.system_tune = getboolean('run_benchmark', 'system_tune', True) + conf.manifest = getfile('run_benchmark', 'manifest') conf.benchmarks = getstr('run_benchmark', 'benchmarks', default='') conf.affinity = getstr('run_benchmark', 'affinity', default='') conf.upload = getboolean('run_benchmark', 'upload', False) @@ -953,6 +992,8 @@ def cmd_compile(options): conf.update = False if options.no_tune: conf.system_tune = False + if options.venv: + conf.venv = options.venv bench = BenchmarkRevision(conf, options.revision, options.branch, patch=options.patch, options=options) bench.main() diff --git a/pyperformance/data-files/benchmarks/MANIFEST b/pyperformance/data-files/benchmarks/MANIFEST new file mode 100644 index 00000000..a52cb7a1 --- /dev/null +++ b/pyperformance/data-files/benchmarks/MANIFEST @@ -0,0 +1,68 @@ +[benchmarks] + +name metafile +2to3 +chameleon +chaos +crypto_pyaes +deltablue +django_template +dulwich_log +fannkuch +float +genshi +go +hexiom +# FIXME: this benchmark fails with: +# Unable to get the program 'hg' from the virtual environment +#hg_startup +html5lib +json_dumps +json_loads +logging +mako +mdp +meteor_contest +nbody +nqueens +pathlib +pickle +pickle_dict +pickle_list +pickle_pure_python +pidigits +pyflate +python_startup +python_startup_no_site +raytrace +regex_compile +regex_dna +regex_effbot +regex_v8 +richards +scimark +spectral_norm +sqlalchemy_declarative +sqlalchemy_imperative +sqlite_synth +sympy +telco +tornado_http +unpack_sequence +unpickle +unpickle_list +unpickle_pure_python +xml_etree + + +#[groups] +#startup +#regex +#serialize +#apps +#math +#template + + +[group default] +-mdp diff --git a/pyperformance/benchmarks/data/2to3/README.txt b/pyperformance/data-files/benchmarks/bm_2to3/data/2to3/README.txt similarity index 100% rename from pyperformance/benchmarks/data/2to3/README.txt rename to pyperformance/data-files/benchmarks/bm_2to3/data/2to3/README.txt diff --git a/pyperformance/benchmarks/data/2to3/__init__.py.txt b/pyperformance/data-files/benchmarks/bm_2to3/data/2to3/__init__.py.txt similarity index 100% rename from pyperformance/benchmarks/data/2to3/__init__.py.txt rename to pyperformance/data-files/benchmarks/bm_2to3/data/2to3/__init__.py.txt diff --git a/pyperformance/benchmarks/data/2to3/context_processors.py.txt b/pyperformance/data-files/benchmarks/bm_2to3/data/2to3/context_processors.py.txt similarity index 100% rename from pyperformance/benchmarks/data/2to3/context_processors.py.txt rename to pyperformance/data-files/benchmarks/bm_2to3/data/2to3/context_processors.py.txt diff --git a/pyperformance/benchmarks/data/2to3/exceptions.py.txt b/pyperformance/data-files/benchmarks/bm_2to3/data/2to3/exceptions.py.txt similarity index 100% rename from pyperformance/benchmarks/data/2to3/exceptions.py.txt rename to pyperformance/data-files/benchmarks/bm_2to3/data/2to3/exceptions.py.txt diff --git a/pyperformance/benchmarks/data/2to3/mail.py.txt b/pyperformance/data-files/benchmarks/bm_2to3/data/2to3/mail.py.txt similarity index 100% rename from pyperformance/benchmarks/data/2to3/mail.py.txt rename to pyperformance/data-files/benchmarks/bm_2to3/data/2to3/mail.py.txt diff --git a/pyperformance/benchmarks/data/2to3/paginator.py.txt b/pyperformance/data-files/benchmarks/bm_2to3/data/2to3/paginator.py.txt similarity index 100% rename from pyperformance/benchmarks/data/2to3/paginator.py.txt rename to pyperformance/data-files/benchmarks/bm_2to3/data/2to3/paginator.py.txt diff --git a/pyperformance/benchmarks/data/2to3/signals.py.txt b/pyperformance/data-files/benchmarks/bm_2to3/data/2to3/signals.py.txt similarity index 100% rename from pyperformance/benchmarks/data/2to3/signals.py.txt rename to pyperformance/data-files/benchmarks/bm_2to3/data/2to3/signals.py.txt diff --git a/pyperformance/benchmarks/data/2to3/template_loader.py.txt b/pyperformance/data-files/benchmarks/bm_2to3/data/2to3/template_loader.py.txt similarity index 100% rename from pyperformance/benchmarks/data/2to3/template_loader.py.txt rename to pyperformance/data-files/benchmarks/bm_2to3/data/2to3/template_loader.py.txt diff --git a/pyperformance/benchmarks/data/2to3/urlresolvers.py.txt b/pyperformance/data-files/benchmarks/bm_2to3/data/2to3/urlresolvers.py.txt similarity index 100% rename from pyperformance/benchmarks/data/2to3/urlresolvers.py.txt rename to pyperformance/data-files/benchmarks/bm_2to3/data/2to3/urlresolvers.py.txt diff --git a/pyperformance/benchmarks/data/2to3/xheaders.py.txt b/pyperformance/data-files/benchmarks/bm_2to3/data/2to3/xheaders.py.txt similarity index 100% rename from pyperformance/benchmarks/data/2to3/xheaders.py.txt rename to pyperformance/data-files/benchmarks/bm_2to3/data/2to3/xheaders.py.txt diff --git a/pyperformance/data-files/benchmarks/bm_2to3/pyproject.toml b/pyperformance/data-files/benchmarks/bm_2to3/pyproject.toml new file mode 100644 index 00000000..f35eb568 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_2to3/pyproject.toml @@ -0,0 +1,10 @@ +[project] +name = "pyperformance_bm_2to3" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "2to3" +tags = "apps" diff --git a/pyperformance/benchmarks/bm_2to3.py b/pyperformance/data-files/benchmarks/bm_2to3/run_benchmark.py similarity index 100% rename from pyperformance/benchmarks/bm_2to3.py rename to pyperformance/data-files/benchmarks/bm_2to3/run_benchmark.py diff --git a/pyperformance/data-files/benchmarks/bm_chameleon/pyproject.toml b/pyperformance/data-files/benchmarks/bm_chameleon/pyproject.toml new file mode 100644 index 00000000..b9dbd16d --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_chameleon/pyproject.toml @@ -0,0 +1,13 @@ +[project] +name = "pyperformance_bm_chameleon" +requires-python = ">=3.8" +dependencies = [ + "pyperf", + "Chameleon", +] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "chameleon" +tags = "apps" diff --git a/pyperformance/data-files/benchmarks/bm_chameleon/requirements.txt b/pyperformance/data-files/benchmarks/bm_chameleon/requirements.txt new file mode 100644 index 00000000..260c3bc4 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_chameleon/requirements.txt @@ -0,0 +1 @@ +chameleon==3.9.1 diff --git a/pyperformance/benchmarks/bm_chameleon.py b/pyperformance/data-files/benchmarks/bm_chameleon/run_benchmark.py similarity index 100% rename from pyperformance/benchmarks/bm_chameleon.py rename to pyperformance/data-files/benchmarks/bm_chameleon/run_benchmark.py diff --git a/pyperformance/data-files/benchmarks/bm_chaos/pyproject.toml b/pyperformance/data-files/benchmarks/bm_chaos/pyproject.toml new file mode 100644 index 00000000..7ba09715 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_chaos/pyproject.toml @@ -0,0 +1,9 @@ +[project] +name = "pyperformance_bm_chaos" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "chaos" diff --git a/pyperformance/benchmarks/bm_chaos.py b/pyperformance/data-files/benchmarks/bm_chaos/run_benchmark.py similarity index 100% rename from pyperformance/benchmarks/bm_chaos.py rename to pyperformance/data-files/benchmarks/bm_chaos/run_benchmark.py diff --git a/pyperformance/data-files/benchmarks/bm_crypto_pyaes/pyproject.toml b/pyperformance/data-files/benchmarks/bm_crypto_pyaes/pyproject.toml new file mode 100644 index 00000000..cc97eff4 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_crypto_pyaes/pyproject.toml @@ -0,0 +1,12 @@ +[project] +name = "pyperformance_bm_crypto_pyaes" +requires-python = ">=3.8" +dependencies = [ + "pyperf", + "pyaes", +] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "crypto_pyaes" diff --git a/pyperformance/data-files/benchmarks/bm_crypto_pyaes/requirements.txt b/pyperformance/data-files/benchmarks/bm_crypto_pyaes/requirements.txt new file mode 100644 index 00000000..68abeee5 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_crypto_pyaes/requirements.txt @@ -0,0 +1 @@ +pyaes==1.6.1 diff --git a/pyperformance/benchmarks/bm_crypto_pyaes.py b/pyperformance/data-files/benchmarks/bm_crypto_pyaes/run_benchmark.py similarity index 100% rename from pyperformance/benchmarks/bm_crypto_pyaes.py rename to pyperformance/data-files/benchmarks/bm_crypto_pyaes/run_benchmark.py diff --git a/pyperformance/data-files/benchmarks/bm_deltablue/pyproject.toml b/pyperformance/data-files/benchmarks/bm_deltablue/pyproject.toml new file mode 100644 index 00000000..2345a57e --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_deltablue/pyproject.toml @@ -0,0 +1,9 @@ +[project] +name = "pyperformance_bm_deltablue" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "deltablue" diff --git a/pyperformance/benchmarks/bm_deltablue.py b/pyperformance/data-files/benchmarks/bm_deltablue/run_benchmark.py similarity index 100% rename from pyperformance/benchmarks/bm_deltablue.py rename to pyperformance/data-files/benchmarks/bm_deltablue/run_benchmark.py diff --git a/pyperformance/data-files/benchmarks/bm_django_template/pyproject.toml b/pyperformance/data-files/benchmarks/bm_django_template/pyproject.toml new file mode 100644 index 00000000..0b66d9d0 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_django_template/pyproject.toml @@ -0,0 +1,13 @@ +[project] +name = "pyperformance_bm_django_template" +requires-python = ">=3.8" +dependencies = [ + "pyperf", + "django", +] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "django_template" +tags = "template" diff --git a/pyperformance/data-files/benchmarks/bm_django_template/requirements.txt b/pyperformance/data-files/benchmarks/bm_django_template/requirements.txt new file mode 100644 index 00000000..4a3490bf --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_django_template/requirements.txt @@ -0,0 +1,4 @@ +asgiref==3.3.4 +django==3.2.4 +pytz==2021.1 +sqlparse==0.4.1 diff --git a/pyperformance/benchmarks/bm_django_template.py b/pyperformance/data-files/benchmarks/bm_django_template/run_benchmark.py similarity index 100% rename from pyperformance/benchmarks/bm_django_template.py rename to pyperformance/data-files/benchmarks/bm_django_template/run_benchmark.py diff --git a/pyperformance/benchmarks/data/asyncio.git/COMMIT_EDITMSG b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/COMMIT_EDITMSG similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/COMMIT_EDITMSG rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/COMMIT_EDITMSG diff --git a/pyperformance/benchmarks/data/asyncio.git/FETCH_HEAD b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/FETCH_HEAD similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/FETCH_HEAD rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/FETCH_HEAD diff --git a/pyperformance/benchmarks/data/asyncio.git/HEAD b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/HEAD similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/HEAD rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/HEAD diff --git a/pyperformance/benchmarks/data/asyncio.git/ORIG_HEAD b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/ORIG_HEAD similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/ORIG_HEAD rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/ORIG_HEAD diff --git a/pyperformance/benchmarks/data/asyncio.git/config b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/config similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/config rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/config diff --git a/pyperformance/benchmarks/data/asyncio.git/description b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/description similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/description rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/description diff --git a/pyperformance/benchmarks/data/asyncio.git/hooks/applypatch-msg.sample b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/hooks/applypatch-msg.sample similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/hooks/applypatch-msg.sample rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/hooks/applypatch-msg.sample diff --git a/pyperformance/benchmarks/data/asyncio.git/hooks/commit-msg.sample b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/hooks/commit-msg.sample similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/hooks/commit-msg.sample rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/hooks/commit-msg.sample diff --git a/pyperformance/benchmarks/data/asyncio.git/hooks/post-update.sample b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/hooks/post-update.sample similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/hooks/post-update.sample rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/hooks/post-update.sample diff --git a/pyperformance/benchmarks/data/asyncio.git/hooks/pre-applypatch.sample b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/hooks/pre-applypatch.sample similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/hooks/pre-applypatch.sample rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/hooks/pre-applypatch.sample diff --git a/pyperformance/benchmarks/data/asyncio.git/hooks/pre-commit.sample b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/hooks/pre-commit.sample similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/hooks/pre-commit.sample rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/hooks/pre-commit.sample diff --git a/pyperformance/benchmarks/data/asyncio.git/hooks/pre-push.sample b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/hooks/pre-push.sample similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/hooks/pre-push.sample rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/hooks/pre-push.sample diff --git a/pyperformance/benchmarks/data/asyncio.git/hooks/pre-rebase.sample b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/hooks/pre-rebase.sample similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/hooks/pre-rebase.sample rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/hooks/pre-rebase.sample diff --git a/pyperformance/benchmarks/data/asyncio.git/hooks/prepare-commit-msg.sample b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/hooks/prepare-commit-msg.sample similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/hooks/prepare-commit-msg.sample rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/hooks/prepare-commit-msg.sample diff --git a/pyperformance/benchmarks/data/asyncio.git/hooks/update.sample b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/hooks/update.sample similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/hooks/update.sample rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/hooks/update.sample diff --git a/pyperformance/benchmarks/data/asyncio.git/index b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/index similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/index rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/index diff --git a/pyperformance/benchmarks/data/asyncio.git/info/exclude b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/info/exclude similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/info/exclude rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/info/exclude diff --git a/pyperformance/benchmarks/data/asyncio.git/info/refs b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/info/refs similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/info/refs rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/info/refs diff --git a/pyperformance/benchmarks/data/asyncio.git/logs/HEAD b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/logs/HEAD similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/logs/HEAD rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/logs/HEAD diff --git a/pyperformance/benchmarks/data/asyncio.git/logs/refs/heads/master b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/logs/refs/heads/master similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/logs/refs/heads/master rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/logs/refs/heads/master diff --git a/pyperformance/benchmarks/data/asyncio.git/logs/refs/remotes/origin/HEAD b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/logs/refs/remotes/origin/HEAD similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/logs/refs/remotes/origin/HEAD rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/logs/refs/remotes/origin/HEAD diff --git a/pyperformance/benchmarks/data/asyncio.git/logs/refs/remotes/origin/bind_modules b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/logs/refs/remotes/origin/bind_modules similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/logs/refs/remotes/origin/bind_modules rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/logs/refs/remotes/origin/bind_modules diff --git a/pyperformance/benchmarks/data/asyncio.git/logs/refs/remotes/origin/master b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/logs/refs/remotes/origin/master similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/logs/refs/remotes/origin/master rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/logs/refs/remotes/origin/master diff --git a/pyperformance/benchmarks/data/asyncio.git/logs/refs/remotes/origin/zero_timeout b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/logs/refs/remotes/origin/zero_timeout similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/logs/refs/remotes/origin/zero_timeout rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/logs/refs/remotes/origin/zero_timeout diff --git a/pyperformance/benchmarks/data/asyncio.git/objects/info/packs b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/objects/info/packs similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/objects/info/packs rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/objects/info/packs diff --git a/pyperformance/benchmarks/data/asyncio.git/objects/pack/pack-7e1b1ace85030071ca314cd565ae038bacc302a4.idx b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/objects/pack/pack-7e1b1ace85030071ca314cd565ae038bacc302a4.idx similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/objects/pack/pack-7e1b1ace85030071ca314cd565ae038bacc302a4.idx rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/objects/pack/pack-7e1b1ace85030071ca314cd565ae038bacc302a4.idx diff --git a/pyperformance/benchmarks/data/asyncio.git/objects/pack/pack-7e1b1ace85030071ca314cd565ae038bacc302a4.pack b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/objects/pack/pack-7e1b1ace85030071ca314cd565ae038bacc302a4.pack similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/objects/pack/pack-7e1b1ace85030071ca314cd565ae038bacc302a4.pack rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/objects/pack/pack-7e1b1ace85030071ca314cd565ae038bacc302a4.pack diff --git a/pyperformance/benchmarks/data/asyncio.git/packed-refs b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/packed-refs similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/packed-refs rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/packed-refs diff --git a/pyperformance/benchmarks/data/asyncio.git/refs/remotes/origin/HEAD b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/refs/remotes/origin/HEAD similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/refs/remotes/origin/HEAD rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/refs/remotes/origin/HEAD diff --git a/pyperformance/data-files/benchmarks/bm_dulwich_log/pyproject.toml b/pyperformance/data-files/benchmarks/bm_dulwich_log/pyproject.toml new file mode 100644 index 00000000..ac4df546 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_dulwich_log/pyproject.toml @@ -0,0 +1,12 @@ +[project] +name = "pyperformance_bm_dulwich_log" +requires-python = ">=3.8" +dependencies = [ + "pyperf", + "dulwich", # optional? +] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "dulwich_log" diff --git a/pyperformance/data-files/benchmarks/bm_dulwich_log/requirements.txt b/pyperformance/data-files/benchmarks/bm_dulwich_log/requirements.txt new file mode 100644 index 00000000..0adbfb46 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_dulwich_log/requirements.txt @@ -0,0 +1,3 @@ +certifi==2021.5.30 +dulwich==0.20.23 +urllib3==1.26.5 diff --git a/pyperformance/benchmarks/bm_dulwich_log.py b/pyperformance/data-files/benchmarks/bm_dulwich_log/run_benchmark.py similarity index 100% rename from pyperformance/benchmarks/bm_dulwich_log.py rename to pyperformance/data-files/benchmarks/bm_dulwich_log/run_benchmark.py diff --git a/pyperformance/data-files/benchmarks/bm_fannkuch/pyproject.toml b/pyperformance/data-files/benchmarks/bm_fannkuch/pyproject.toml new file mode 100644 index 00000000..0a13e04c --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_fannkuch/pyproject.toml @@ -0,0 +1,9 @@ +[project] +name = "pyperformance_bm_fannkuch" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "fannkuch" diff --git a/pyperformance/benchmarks/bm_fannkuch.py b/pyperformance/data-files/benchmarks/bm_fannkuch/run_benchmark.py similarity index 100% rename from pyperformance/benchmarks/bm_fannkuch.py rename to pyperformance/data-files/benchmarks/bm_fannkuch/run_benchmark.py diff --git a/pyperformance/data-files/benchmarks/bm_float/pyproject.toml b/pyperformance/data-files/benchmarks/bm_float/pyproject.toml new file mode 100644 index 00000000..fd0133a7 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_float/pyproject.toml @@ -0,0 +1,10 @@ +[project] +name = "pyperformance_bm_float" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "float" +tags = "math" diff --git a/pyperformance/benchmarks/bm_float.py b/pyperformance/data-files/benchmarks/bm_float/run_benchmark.py similarity index 100% rename from pyperformance/benchmarks/bm_float.py rename to pyperformance/data-files/benchmarks/bm_float/run_benchmark.py diff --git a/pyperformance/data-files/benchmarks/bm_genshi/pyproject.toml b/pyperformance/data-files/benchmarks/bm_genshi/pyproject.toml new file mode 100644 index 00000000..fbba40df --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_genshi/pyproject.toml @@ -0,0 +1,13 @@ +[project] +name = "pyperformance_bm_genshi" +requires-python = ">=3.8" +dependencies = [ + "pyperf", + "Genshi", +] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "genshi" +tags = "template" diff --git a/pyperformance/data-files/benchmarks/bm_genshi/requirements.txt b/pyperformance/data-files/benchmarks/bm_genshi/requirements.txt new file mode 100644 index 00000000..c2444cb6 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_genshi/requirements.txt @@ -0,0 +1,2 @@ +genshi==0.7.5 +six==1.16.0 diff --git a/pyperformance/benchmarks/bm_genshi.py b/pyperformance/data-files/benchmarks/bm_genshi/run_benchmark.py similarity index 100% rename from pyperformance/benchmarks/bm_genshi.py rename to pyperformance/data-files/benchmarks/bm_genshi/run_benchmark.py diff --git a/pyperformance/data-files/benchmarks/bm_go/pyproject.toml b/pyperformance/data-files/benchmarks/bm_go/pyproject.toml new file mode 100644 index 00000000..a4abcf90 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_go/pyproject.toml @@ -0,0 +1,9 @@ +[project] +name = "pyperformance_bm_go" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "go" diff --git a/pyperformance/benchmarks/bm_go.py b/pyperformance/data-files/benchmarks/bm_go/run_benchmark.py similarity index 100% rename from pyperformance/benchmarks/bm_go.py rename to pyperformance/data-files/benchmarks/bm_go/run_benchmark.py diff --git a/pyperformance/data-files/benchmarks/bm_hexiom/pyproject.toml b/pyperformance/data-files/benchmarks/bm_hexiom/pyproject.toml new file mode 100644 index 00000000..bb66e1f5 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_hexiom/pyproject.toml @@ -0,0 +1,9 @@ +[project] +name = "pyperformance_bm_hexiom" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "hexiom" diff --git a/pyperformance/benchmarks/bm_hexiom.py b/pyperformance/data-files/benchmarks/bm_hexiom/run_benchmark.py similarity index 100% rename from pyperformance/benchmarks/bm_hexiom.py rename to pyperformance/data-files/benchmarks/bm_hexiom/run_benchmark.py diff --git a/pyperformance/data-files/benchmarks/bm_hg_startup/pyproject.toml b/pyperformance/data-files/benchmarks/bm_hg_startup/pyproject.toml new file mode 100644 index 00000000..74d67f3e --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_hg_startup/pyproject.toml @@ -0,0 +1,13 @@ +[project] +name = "pyperformance_bm_hg_startup" +requires-python = ">=3.8" +dependencies = [ + "pyperf", + "mercurial", +] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "hg_startup" +tags = "startup" diff --git a/pyperformance/data-files/benchmarks/bm_hg_startup/requirements.txt b/pyperformance/data-files/benchmarks/bm_hg_startup/requirements.txt new file mode 100644 index 00000000..7b54aaa7 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_hg_startup/requirements.txt @@ -0,0 +1 @@ +mercurial==5.8 diff --git a/pyperformance/benchmarks/bm_hg_startup.py b/pyperformance/data-files/benchmarks/bm_hg_startup/run_benchmark.py similarity index 100% rename from pyperformance/benchmarks/bm_hg_startup.py rename to pyperformance/data-files/benchmarks/bm_hg_startup/run_benchmark.py diff --git a/pyperformance/benchmarks/data/w3_tr_html5.html b/pyperformance/data-files/benchmarks/bm_html5lib/data/w3_tr_html5.html similarity index 100% rename from pyperformance/benchmarks/data/w3_tr_html5.html rename to pyperformance/data-files/benchmarks/bm_html5lib/data/w3_tr_html5.html diff --git a/pyperformance/data-files/benchmarks/bm_html5lib/pyproject.toml b/pyperformance/data-files/benchmarks/bm_html5lib/pyproject.toml new file mode 100644 index 00000000..3bd96abd --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_html5lib/pyproject.toml @@ -0,0 +1,13 @@ +[project] +name = "pyperformance_bm_html5lib" +requires-python = ">=3.8" +dependencies = [ + "pyperf", + "html5lib", +] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "html5lib" +tags = "apps" diff --git a/pyperformance/data-files/benchmarks/bm_html5lib/requirements.txt b/pyperformance/data-files/benchmarks/bm_html5lib/requirements.txt new file mode 100644 index 00000000..937d99a9 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_html5lib/requirements.txt @@ -0,0 +1,3 @@ +html5lib==1.1 +six==1.16.0 +webencodings==0.5.1 diff --git a/pyperformance/benchmarks/bm_html5lib.py b/pyperformance/data-files/benchmarks/bm_html5lib/run_benchmark.py similarity index 100% rename from pyperformance/benchmarks/bm_html5lib.py rename to pyperformance/data-files/benchmarks/bm_html5lib/run_benchmark.py diff --git a/pyperformance/data-files/benchmarks/bm_json_dumps/pyproject.toml b/pyperformance/data-files/benchmarks/bm_json_dumps/pyproject.toml new file mode 100644 index 00000000..b292fcd7 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_json_dumps/pyproject.toml @@ -0,0 +1,10 @@ +[project] +name = "pyperformance_bm_json_dumps" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "json_dumps" +tags = "serialize" diff --git a/pyperformance/benchmarks/bm_json_dumps.py b/pyperformance/data-files/benchmarks/bm_json_dumps/run_benchmark.py similarity index 100% rename from pyperformance/benchmarks/bm_json_dumps.py rename to pyperformance/data-files/benchmarks/bm_json_dumps/run_benchmark.py diff --git a/pyperformance/data-files/benchmarks/bm_json_loads/pyproject.toml b/pyperformance/data-files/benchmarks/bm_json_loads/pyproject.toml new file mode 100644 index 00000000..18c73fda --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_json_loads/pyproject.toml @@ -0,0 +1,10 @@ +[project] +name = "pyperformance_bm_json_loads" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "json_loads" +tags = "serialize" diff --git a/pyperformance/benchmarks/bm_json_loads.py b/pyperformance/data-files/benchmarks/bm_json_loads/run_benchmark.py similarity index 100% rename from pyperformance/benchmarks/bm_json_loads.py rename to pyperformance/data-files/benchmarks/bm_json_loads/run_benchmark.py diff --git a/pyperformance/data-files/benchmarks/bm_logging/pyproject.toml b/pyperformance/data-files/benchmarks/bm_logging/pyproject.toml new file mode 100644 index 00000000..7b2d0878 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_logging/pyproject.toml @@ -0,0 +1,9 @@ +[project] +name = "pyperformance_bm_logging" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "logging" diff --git a/pyperformance/benchmarks/bm_logging.py b/pyperformance/data-files/benchmarks/bm_logging/run_benchmark.py similarity index 100% rename from pyperformance/benchmarks/bm_logging.py rename to pyperformance/data-files/benchmarks/bm_logging/run_benchmark.py diff --git a/pyperformance/data-files/benchmarks/bm_mako/pyproject.toml b/pyperformance/data-files/benchmarks/bm_mako/pyproject.toml new file mode 100644 index 00000000..80e1abce --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_mako/pyproject.toml @@ -0,0 +1,13 @@ +[project] +name = "pyperformance_bm_mako" +requires-python = ">=3.8" +dependencies = [ + "pyperf", + "Mako", +] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "mako" +tags = "template" diff --git a/pyperformance/data-files/benchmarks/bm_mako/requirements.txt b/pyperformance/data-files/benchmarks/bm_mako/requirements.txt new file mode 100644 index 00000000..5f2fe892 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_mako/requirements.txt @@ -0,0 +1,2 @@ +mako==1.1.4 +markupsafe==2.0.1 diff --git a/pyperformance/benchmarks/bm_mako.py b/pyperformance/data-files/benchmarks/bm_mako/run_benchmark.py similarity index 100% rename from pyperformance/benchmarks/bm_mako.py rename to pyperformance/data-files/benchmarks/bm_mako/run_benchmark.py diff --git a/pyperformance/data-files/benchmarks/bm_mdp/pyproject.toml b/pyperformance/data-files/benchmarks/bm_mdp/pyproject.toml new file mode 100644 index 00000000..176ab715 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_mdp/pyproject.toml @@ -0,0 +1,9 @@ +[project] +name = "pyperformance_bm_mdp" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "mdp" diff --git a/pyperformance/benchmarks/bm_mdp.py b/pyperformance/data-files/benchmarks/bm_mdp/run_benchmark.py similarity index 100% rename from pyperformance/benchmarks/bm_mdp.py rename to pyperformance/data-files/benchmarks/bm_mdp/run_benchmark.py diff --git a/pyperformance/data-files/benchmarks/bm_meteor_contest/pyproject.toml b/pyperformance/data-files/benchmarks/bm_meteor_contest/pyproject.toml new file mode 100644 index 00000000..3196e0e1 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_meteor_contest/pyproject.toml @@ -0,0 +1,9 @@ +[project] +name = "pyperformance_bm_meteor_contest" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "meteor_contest" diff --git a/pyperformance/benchmarks/bm_meteor_contest.py b/pyperformance/data-files/benchmarks/bm_meteor_contest/run_benchmark.py similarity index 100% rename from pyperformance/benchmarks/bm_meteor_contest.py rename to pyperformance/data-files/benchmarks/bm_meteor_contest/run_benchmark.py diff --git a/pyperformance/data-files/benchmarks/bm_nbody/pyproject.toml b/pyperformance/data-files/benchmarks/bm_nbody/pyproject.toml new file mode 100644 index 00000000..546c300a --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_nbody/pyproject.toml @@ -0,0 +1,10 @@ +[project] +name = "pyperformance_bm_nbody" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "nbody" +tags = "math" diff --git a/pyperformance/benchmarks/bm_nbody.py b/pyperformance/data-files/benchmarks/bm_nbody/run_benchmark.py similarity index 100% rename from pyperformance/benchmarks/bm_nbody.py rename to pyperformance/data-files/benchmarks/bm_nbody/run_benchmark.py diff --git a/pyperformance/data-files/benchmarks/bm_nqueens/pyproject.toml b/pyperformance/data-files/benchmarks/bm_nqueens/pyproject.toml new file mode 100644 index 00000000..93ad084f --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_nqueens/pyproject.toml @@ -0,0 +1,9 @@ +[project] +name = "pyperformance_bm_nqueens" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "nqueens" diff --git a/pyperformance/benchmarks/bm_nqueens.py b/pyperformance/data-files/benchmarks/bm_nqueens/run_benchmark.py similarity index 100% rename from pyperformance/benchmarks/bm_nqueens.py rename to pyperformance/data-files/benchmarks/bm_nqueens/run_benchmark.py diff --git a/pyperformance/data-files/benchmarks/bm_pathlib/pyproject.toml b/pyperformance/data-files/benchmarks/bm_pathlib/pyproject.toml new file mode 100644 index 00000000..caa4bb1a --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_pathlib/pyproject.toml @@ -0,0 +1,9 @@ +[project] +name = "pyperformance_bm_pathlib" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "pathlib" diff --git a/pyperformance/benchmarks/bm_pathlib.py b/pyperformance/data-files/benchmarks/bm_pathlib/run_benchmark.py similarity index 100% rename from pyperformance/benchmarks/bm_pathlib.py rename to pyperformance/data-files/benchmarks/bm_pathlib/run_benchmark.py diff --git a/pyperformance/data-files/benchmarks/bm_pickle/bm_pickle_dict.toml b/pyperformance/data-files/benchmarks/bm_pickle/bm_pickle_dict.toml new file mode 100644 index 00000000..2a87c920 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_pickle/bm_pickle_dict.toml @@ -0,0 +1,11 @@ +[project] +name = "pyperformance_bm_pickle_dict" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "pickle_dict" +tags = "serialize" +extra_opts = ["pickle_dict"] diff --git a/pyperformance/data-files/benchmarks/bm_pickle/bm_pickle_list.toml b/pyperformance/data-files/benchmarks/bm_pickle/bm_pickle_list.toml new file mode 100644 index 00000000..ab8a3618 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_pickle/bm_pickle_list.toml @@ -0,0 +1,11 @@ +[project] +name = "pyperformance_bm_pickle_list" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "pickle_list" +tags = "serialize" +extra_opts = ["pickle_list"] diff --git a/pyperformance/data-files/benchmarks/bm_pickle/bm_pickle_pure_python.toml b/pyperformance/data-files/benchmarks/bm_pickle/bm_pickle_pure_python.toml new file mode 100644 index 00000000..94288918 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_pickle/bm_pickle_pure_python.toml @@ -0,0 +1,11 @@ +[project] +name = "pyperformance_bm_pickle_pure_python" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "pickle_pure_python" +tags = "serialize" +extra_opts = ["--pure-python", "pickle"] diff --git a/pyperformance/data-files/benchmarks/bm_pickle/bm_unpickle.toml b/pyperformance/data-files/benchmarks/bm_pickle/bm_unpickle.toml new file mode 100644 index 00000000..959609d0 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_pickle/bm_unpickle.toml @@ -0,0 +1,11 @@ +[project] +name = "pyperformance_bm_unpickle" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "unpickle" +tags = "serialize" +extra_opts = ["unpickle"] diff --git a/pyperformance/data-files/benchmarks/bm_pickle/bm_unpickle_list.toml b/pyperformance/data-files/benchmarks/bm_pickle/bm_unpickle_list.toml new file mode 100644 index 00000000..b5eb4da3 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_pickle/bm_unpickle_list.toml @@ -0,0 +1,11 @@ +[project] +name = "pyperformance_bm_unpickle_list" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "unpickle_list" +tags = "serialize" +extra_opts = ["unpickle_list"] diff --git a/pyperformance/data-files/benchmarks/bm_pickle/bm_unpickle_pure_python.toml b/pyperformance/data-files/benchmarks/bm_pickle/bm_unpickle_pure_python.toml new file mode 100644 index 00000000..af1a2e7c --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_pickle/bm_unpickle_pure_python.toml @@ -0,0 +1,11 @@ +[project] +name = "pyperformance_bm_unpickle_pure_python" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "unpickle_pure_python" +tags = "serialize" +extra_opts = ["--pure-python", "unpickle"] diff --git a/pyperformance/data-files/benchmarks/bm_pickle/pyproject.toml b/pyperformance/data-files/benchmarks/bm_pickle/pyproject.toml new file mode 100644 index 00000000..87bc6ab6 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_pickle/pyproject.toml @@ -0,0 +1,11 @@ +[project] +name = "pyperformance_bm_pickle" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "pickle" +tags = "serialize" +extra_opts = ["pickle"] diff --git a/pyperformance/benchmarks/bm_pickle.py b/pyperformance/data-files/benchmarks/bm_pickle/run_benchmark.py similarity index 100% rename from pyperformance/benchmarks/bm_pickle.py rename to pyperformance/data-files/benchmarks/bm_pickle/run_benchmark.py diff --git a/pyperformance/data-files/benchmarks/bm_pidigits/pyproject.toml b/pyperformance/data-files/benchmarks/bm_pidigits/pyproject.toml new file mode 100644 index 00000000..8fef04a5 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_pidigits/pyproject.toml @@ -0,0 +1,10 @@ +[project] +name = "pyperformance_bm_pidigits" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "pidigits" +tags = "math" diff --git a/pyperformance/benchmarks/bm_pidigits.py b/pyperformance/data-files/benchmarks/bm_pidigits/run_benchmark.py similarity index 100% rename from pyperformance/benchmarks/bm_pidigits.py rename to pyperformance/data-files/benchmarks/bm_pidigits/run_benchmark.py diff --git a/pyperformance/benchmarks/data/interpreter.tar.bz2 b/pyperformance/data-files/benchmarks/bm_pyflate/data/interpreter.tar.bz2 similarity index 100% rename from pyperformance/benchmarks/data/interpreter.tar.bz2 rename to pyperformance/data-files/benchmarks/bm_pyflate/data/interpreter.tar.bz2 diff --git a/pyperformance/data-files/benchmarks/bm_pyflate/pyproject.toml b/pyperformance/data-files/benchmarks/bm_pyflate/pyproject.toml new file mode 100644 index 00000000..e1c0a7ff --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_pyflate/pyproject.toml @@ -0,0 +1,9 @@ +[project] +name = "pyperformance_bm_pyflate" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "pyflate" diff --git a/pyperformance/benchmarks/bm_pyflate.py b/pyperformance/data-files/benchmarks/bm_pyflate/run_benchmark.py similarity index 100% rename from pyperformance/benchmarks/bm_pyflate.py rename to pyperformance/data-files/benchmarks/bm_pyflate/run_benchmark.py diff --git a/pyperformance/data-files/benchmarks/bm_python_startup/bm_python_startup_no_site.toml b/pyperformance/data-files/benchmarks/bm_python_startup/bm_python_startup_no_site.toml new file mode 100644 index 00000000..13dd29f0 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_python_startup/bm_python_startup_no_site.toml @@ -0,0 +1,11 @@ +[project] +name = "pyperformance_bm_python_startup_no_site" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "python_startup_no_site" +extra_opts = ["--no-site"] +tags = "startup" diff --git a/pyperformance/data-files/benchmarks/bm_python_startup/pyproject.toml b/pyperformance/data-files/benchmarks/bm_python_startup/pyproject.toml new file mode 100644 index 00000000..1e55ace1 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_python_startup/pyproject.toml @@ -0,0 +1,10 @@ +[project] +name = "pyperformance_bm_python_startup" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "python_startup" +tags = "startup" diff --git a/pyperformance/benchmarks/bm_python_startup.py b/pyperformance/data-files/benchmarks/bm_python_startup/run_benchmark.py similarity index 100% rename from pyperformance/benchmarks/bm_python_startup.py rename to pyperformance/data-files/benchmarks/bm_python_startup/run_benchmark.py diff --git a/pyperformance/data-files/benchmarks/bm_raytrace/pyproject.toml b/pyperformance/data-files/benchmarks/bm_raytrace/pyproject.toml new file mode 100644 index 00000000..d9ca5ab5 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_raytrace/pyproject.toml @@ -0,0 +1,9 @@ +[project] +name = "pyperformance_bm_raytrace" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "raytrace" diff --git a/pyperformance/benchmarks/bm_raytrace.py b/pyperformance/data-files/benchmarks/bm_raytrace/run_benchmark.py similarity index 100% rename from pyperformance/benchmarks/bm_raytrace.py rename to pyperformance/data-files/benchmarks/bm_raytrace/run_benchmark.py diff --git a/pyperformance/data-files/benchmarks/bm_regex_compile/bm_regex_effbot.py b/pyperformance/data-files/benchmarks/bm_regex_compile/bm_regex_effbot.py new file mode 120000 index 00000000..99624945 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_regex_compile/bm_regex_effbot.py @@ -0,0 +1 @@ +../bm_regex_effbot/run_benchmark.py \ No newline at end of file diff --git a/pyperformance/data-files/benchmarks/bm_regex_compile/bm_regex_v8.py b/pyperformance/data-files/benchmarks/bm_regex_compile/bm_regex_v8.py new file mode 120000 index 00000000..fe5f5259 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_regex_compile/bm_regex_v8.py @@ -0,0 +1 @@ +../bm_regex_v8/run_benchmark.py \ No newline at end of file diff --git a/pyperformance/data-files/benchmarks/bm_regex_compile/pyproject.toml b/pyperformance/data-files/benchmarks/bm_regex_compile/pyproject.toml new file mode 100644 index 00000000..386df534 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_regex_compile/pyproject.toml @@ -0,0 +1,10 @@ +[project] +name = "pyperformance_bm_regex_compile" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "regex_compile" +tags = "regex" diff --git a/pyperformance/benchmarks/bm_regex_compile.py b/pyperformance/data-files/benchmarks/bm_regex_compile/run_benchmark.py similarity index 100% rename from pyperformance/benchmarks/bm_regex_compile.py rename to pyperformance/data-files/benchmarks/bm_regex_compile/run_benchmark.py diff --git a/pyperformance/data-files/benchmarks/bm_regex_dna/pyproject.toml b/pyperformance/data-files/benchmarks/bm_regex_dna/pyproject.toml new file mode 100644 index 00000000..33a40ca2 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_regex_dna/pyproject.toml @@ -0,0 +1,10 @@ +[project] +name = "pyperformance_bm_regex_dna" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "regex_dna" +tags = "regex" diff --git a/pyperformance/benchmarks/bm_regex_dna.py b/pyperformance/data-files/benchmarks/bm_regex_dna/run_benchmark.py similarity index 100% rename from pyperformance/benchmarks/bm_regex_dna.py rename to pyperformance/data-files/benchmarks/bm_regex_dna/run_benchmark.py diff --git a/pyperformance/data-files/benchmarks/bm_regex_effbot/pyproject.toml b/pyperformance/data-files/benchmarks/bm_regex_effbot/pyproject.toml new file mode 100644 index 00000000..3d64e118 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_regex_effbot/pyproject.toml @@ -0,0 +1,10 @@ +[project] +name = "pyperformance_bm_regex_effbot" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "regex_effbot" +tags = "regex" diff --git a/pyperformance/benchmarks/bm_regex_effbot.py b/pyperformance/data-files/benchmarks/bm_regex_effbot/run_benchmark.py similarity index 100% rename from pyperformance/benchmarks/bm_regex_effbot.py rename to pyperformance/data-files/benchmarks/bm_regex_effbot/run_benchmark.py diff --git a/pyperformance/data-files/benchmarks/bm_regex_v8/pyproject.toml b/pyperformance/data-files/benchmarks/bm_regex_v8/pyproject.toml new file mode 100644 index 00000000..86f70bb9 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_regex_v8/pyproject.toml @@ -0,0 +1,10 @@ +[project] +name = "pyperformance_bm_regex_v8" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "regex_v8" +tags = "regex" diff --git a/pyperformance/benchmarks/bm_regex_v8.py b/pyperformance/data-files/benchmarks/bm_regex_v8/run_benchmark.py similarity index 100% rename from pyperformance/benchmarks/bm_regex_v8.py rename to pyperformance/data-files/benchmarks/bm_regex_v8/run_benchmark.py diff --git a/pyperformance/data-files/benchmarks/bm_richards/pyproject.toml b/pyperformance/data-files/benchmarks/bm_richards/pyproject.toml new file mode 100644 index 00000000..aa464ddb --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_richards/pyproject.toml @@ -0,0 +1,9 @@ +[project] +name = "pyperformance_bm_richards" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "richards" diff --git a/pyperformance/benchmarks/bm_richards.py b/pyperformance/data-files/benchmarks/bm_richards/run_benchmark.py similarity index 100% rename from pyperformance/benchmarks/bm_richards.py rename to pyperformance/data-files/benchmarks/bm_richards/run_benchmark.py diff --git a/pyperformance/data-files/benchmarks/bm_scimark/pyproject.toml b/pyperformance/data-files/benchmarks/bm_scimark/pyproject.toml new file mode 100644 index 00000000..8b2f2ca1 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_scimark/pyproject.toml @@ -0,0 +1,9 @@ +[project] +name = "pyperformance_bm_scimark" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "scimark" diff --git a/pyperformance/benchmarks/bm_scimark.py b/pyperformance/data-files/benchmarks/bm_scimark/run_benchmark.py similarity index 100% rename from pyperformance/benchmarks/bm_scimark.py rename to pyperformance/data-files/benchmarks/bm_scimark/run_benchmark.py diff --git a/pyperformance/data-files/benchmarks/bm_spectral_norm/pyproject.toml b/pyperformance/data-files/benchmarks/bm_spectral_norm/pyproject.toml new file mode 100644 index 00000000..8ebdddb3 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_spectral_norm/pyproject.toml @@ -0,0 +1,9 @@ +[project] +name = "pyperformance_bm_spectral_norm" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "spectral_norm" diff --git a/pyperformance/benchmarks/bm_spectral_norm.py b/pyperformance/data-files/benchmarks/bm_spectral_norm/run_benchmark.py similarity index 100% rename from pyperformance/benchmarks/bm_spectral_norm.py rename to pyperformance/data-files/benchmarks/bm_spectral_norm/run_benchmark.py diff --git a/pyperformance/data-files/benchmarks/bm_sqlalchemy_declarative/pyproject.toml b/pyperformance/data-files/benchmarks/bm_sqlalchemy_declarative/pyproject.toml new file mode 100644 index 00000000..3c1cc775 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_sqlalchemy_declarative/pyproject.toml @@ -0,0 +1,12 @@ +[project] +name = "pyperformance_bm_sqlalchemy_declarative" +requires-python = ">=3.8" +dependencies = [ + "pyperf", + "SQLAlchemy", +] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "sqlalchemy_declarative" diff --git a/pyperformance/data-files/benchmarks/bm_sqlalchemy_declarative/requirements.txt b/pyperformance/data-files/benchmarks/bm_sqlalchemy_declarative/requirements.txt new file mode 100644 index 00000000..b1f2cb01 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_sqlalchemy_declarative/requirements.txt @@ -0,0 +1,2 @@ +greenlet==1.1.0 +sqlalchemy==1.4.19 diff --git a/pyperformance/benchmarks/bm_sqlalchemy_declarative.py b/pyperformance/data-files/benchmarks/bm_sqlalchemy_declarative/run_benchmark.py similarity index 100% rename from pyperformance/benchmarks/bm_sqlalchemy_declarative.py rename to pyperformance/data-files/benchmarks/bm_sqlalchemy_declarative/run_benchmark.py diff --git a/pyperformance/data-files/benchmarks/bm_sqlalchemy_imperative/pyproject.toml b/pyperformance/data-files/benchmarks/bm_sqlalchemy_imperative/pyproject.toml new file mode 100644 index 00000000..1b9f75db --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_sqlalchemy_imperative/pyproject.toml @@ -0,0 +1,12 @@ +[project] +name = "pyperformance_bm_sqlalchemy_imperative" +requires-python = ">=3.8" +dependencies = [ + "pyperf", + "SQLAlchemy", +] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "sqlalchemy_imperative" diff --git a/pyperformance/data-files/benchmarks/bm_sqlalchemy_imperative/requirements.txt b/pyperformance/data-files/benchmarks/bm_sqlalchemy_imperative/requirements.txt new file mode 100644 index 00000000..b1f2cb01 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_sqlalchemy_imperative/requirements.txt @@ -0,0 +1,2 @@ +greenlet==1.1.0 +sqlalchemy==1.4.19 diff --git a/pyperformance/benchmarks/bm_sqlalchemy_imperative.py b/pyperformance/data-files/benchmarks/bm_sqlalchemy_imperative/run_benchmark.py similarity index 100% rename from pyperformance/benchmarks/bm_sqlalchemy_imperative.py rename to pyperformance/data-files/benchmarks/bm_sqlalchemy_imperative/run_benchmark.py diff --git a/pyperformance/data-files/benchmarks/bm_sqlite_synth/pyproject.toml b/pyperformance/data-files/benchmarks/bm_sqlite_synth/pyproject.toml new file mode 100644 index 00000000..f406d737 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_sqlite_synth/pyproject.toml @@ -0,0 +1,9 @@ +[project] +name = "pyperformance_bm_sqlite_synth" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "sqlite_synth" diff --git a/pyperformance/benchmarks/bm_sqlite_synth.py b/pyperformance/data-files/benchmarks/bm_sqlite_synth/run_benchmark.py similarity index 100% rename from pyperformance/benchmarks/bm_sqlite_synth.py rename to pyperformance/data-files/benchmarks/bm_sqlite_synth/run_benchmark.py diff --git a/pyperformance/data-files/benchmarks/bm_sympy/pyproject.toml b/pyperformance/data-files/benchmarks/bm_sympy/pyproject.toml new file mode 100644 index 00000000..bcd03442 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_sympy/pyproject.toml @@ -0,0 +1,12 @@ +[project] +name = "pyperformance_bm_sympy" +requires-python = ">=3.8" +dependencies = [ + "pyperf", + "sympy", +] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "sympy" diff --git a/pyperformance/data-files/benchmarks/bm_sympy/requirements.txt b/pyperformance/data-files/benchmarks/bm_sympy/requirements.txt new file mode 100644 index 00000000..652d404d --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_sympy/requirements.txt @@ -0,0 +1,2 @@ +mpmath==1.2.1 +sympy==1.8 diff --git a/pyperformance/benchmarks/bm_sympy.py b/pyperformance/data-files/benchmarks/bm_sympy/run_benchmark.py similarity index 100% rename from pyperformance/benchmarks/bm_sympy.py rename to pyperformance/data-files/benchmarks/bm_sympy/run_benchmark.py diff --git a/pyperformance/benchmarks/data/telco-bench.b b/pyperformance/data-files/benchmarks/bm_telco/data/telco-bench.b similarity index 100% rename from pyperformance/benchmarks/data/telco-bench.b rename to pyperformance/data-files/benchmarks/bm_telco/data/telco-bench.b diff --git a/pyperformance/data-files/benchmarks/bm_telco/pyproject.toml b/pyperformance/data-files/benchmarks/bm_telco/pyproject.toml new file mode 100644 index 00000000..194aa095 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_telco/pyproject.toml @@ -0,0 +1,9 @@ +[project] +name = "pyperformance_bm_telco" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "telco" diff --git a/pyperformance/benchmarks/bm_telco.py b/pyperformance/data-files/benchmarks/bm_telco/run_benchmark.py similarity index 100% rename from pyperformance/benchmarks/bm_telco.py rename to pyperformance/data-files/benchmarks/bm_telco/run_benchmark.py diff --git a/pyperformance/data-files/benchmarks/bm_tornado_http/pyproject.toml b/pyperformance/data-files/benchmarks/bm_tornado_http/pyproject.toml new file mode 100644 index 00000000..c165b4fb --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_tornado_http/pyproject.toml @@ -0,0 +1,13 @@ +[project] +name = "pyperformance_bm_tornado_http" +requires-python = ">=3.8" +dependencies = [ + "pyperf", + "tornado", +] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "tornado_http" +tags = "apps" diff --git a/pyperformance/data-files/benchmarks/bm_tornado_http/requirements.txt b/pyperformance/data-files/benchmarks/bm_tornado_http/requirements.txt new file mode 100644 index 00000000..ca2eb1c6 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_tornado_http/requirements.txt @@ -0,0 +1 @@ +tornado==6.1 diff --git a/pyperformance/benchmarks/bm_tornado_http.py b/pyperformance/data-files/benchmarks/bm_tornado_http/run_benchmark.py similarity index 100% rename from pyperformance/benchmarks/bm_tornado_http.py rename to pyperformance/data-files/benchmarks/bm_tornado_http/run_benchmark.py diff --git a/pyperformance/data-files/benchmarks/bm_unpack_sequence/pyproject.toml b/pyperformance/data-files/benchmarks/bm_unpack_sequence/pyproject.toml new file mode 100644 index 00000000..ddbd2559 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_unpack_sequence/pyproject.toml @@ -0,0 +1,9 @@ +[project] +name = "pyperformance_bm_unpack_sequence" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "unpack_sequence" diff --git a/pyperformance/benchmarks/bm_unpack_sequence.py b/pyperformance/data-files/benchmarks/bm_unpack_sequence/run_benchmark.py similarity index 100% rename from pyperformance/benchmarks/bm_unpack_sequence.py rename to pyperformance/data-files/benchmarks/bm_unpack_sequence/run_benchmark.py diff --git a/pyperformance/data-files/benchmarks/bm_xml_etree/pyproject.toml b/pyperformance/data-files/benchmarks/bm_xml_etree/pyproject.toml new file mode 100644 index 00000000..21feb611 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_xml_etree/pyproject.toml @@ -0,0 +1,10 @@ +[project] +name = "pyperformance_bm_xml_etree" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "xml_etree" +tags = "serialize" diff --git a/pyperformance/benchmarks/bm_xml_etree.py b/pyperformance/data-files/benchmarks/bm_xml_etree/run_benchmark.py similarity index 100% rename from pyperformance/benchmarks/bm_xml_etree.py rename to pyperformance/data-files/benchmarks/bm_xml_etree/run_benchmark.py diff --git a/pyperformance/data-files/requirements.txt b/pyperformance/data-files/requirements.txt new file mode 100644 index 00000000..cea4f9f7 --- /dev/null +++ b/pyperformance/data-files/requirements.txt @@ -0,0 +1,16 @@ +# +# This file is autogenerated by pip-compile +# To update, run: +# +# pip-compile pyperformance/requirements.in +# +packaging==21.0 + # via -r pyperformance/requirements.in +psutil==5.8.0 + # via -r pyperformance/requirements.in +pyparsing==2.4.7 + # via packaging +pyperf==2.2.0 + # via -r pyperformance/requirements.in +toml==0.10.2 + # via -r pyperformance/requirements.in diff --git a/pyperformance/requirements.in b/pyperformance/requirements.in deleted file mode 100644 index 3fbadf60..00000000 --- a/pyperformance/requirements.in +++ /dev/null @@ -1,34 +0,0 @@ -# pyperformance dependencies -# -------------------------- - -pyperf - - -# Benchmarks dependencies -# ----------------------- -# -# When one of these dependencies is upgraded, the pyperformance major version -# should be increased to respect semantic versionning. Comparison between -# two pyperformance results of two different major versions is not reliable. - -Chameleon # bm_chameleon -Django # bm_django_template -# FIXME: reenable genshi -# Genshi # bm_genshi -Mako # bm_mako -SQLAlchemy # bm_sqlalchemy_declarative -# FIXME: reenable hg_startup -# mercurial # bm_hg_startup -html5lib # bm_html5lib -pyaes # bm_crypto_pyaes -sympy # bm_sympy -tornado # bm_tornado_http - - -# Optional dependencies -# --------------------- -# -# The list of optional dependencies is hardcoded in pyperformance/venv.py - -psutil -dulwich # bm_dulwich_log diff --git a/pyperformance/requirements.txt b/pyperformance/requirements.txt deleted file mode 100644 index cd897337..00000000 --- a/pyperformance/requirements.txt +++ /dev/null @@ -1,48 +0,0 @@ -# -# This file is autogenerated by pip-compile -# To update, run: -# -# pip-compile requirements.in -# -asgiref==3.3.4 - # via django -certifi==2020.12.5 - # via dulwich -chameleon==3.9.0 - # via -r requirements.in -django==3.2.5 - # via -r requirements.in -dulwich==0.20.21 - # via -r requirements.in -greenlet==1.1.0 - # via sqlalchemy -html5lib==1.1 - # via -r requirements.in -mako==1.1.4 - # via -r requirements.in -markupsafe==1.1.1 - # via mako -mpmath==1.2.1 - # via sympy -psutil==5.8.0 - # via -r requirements.in -pyaes==1.6.1 - # via -r requirements.in -pyperf==2.2.0 - # via -r requirements.in -pytz==2021.1 - # via django -six==1.16.0 - # via html5lib -sqlalchemy==1.4.15 - # via -r requirements.in -sqlparse==0.4.2 - # via django -sympy==1.8 - # via -r requirements.in -tornado==6.1 - # via -r requirements.in -urllib3==1.26.5 - # via dulwich -webencodings==0.5.1 - # via html5lib diff --git a/pyperformance/run.py b/pyperformance/run.py index 88007dc5..8e196547 100644 --- a/pyperformance/run.py +++ b/pyperformance/run.py @@ -1,140 +1,196 @@ -import logging -import os.path -import subprocess +from collections import namedtuple +import hashlib import sys +import time import traceback try: import multiprocessing except ImportError: multiprocessing = None -import pyperf - import pyperformance -from pyperformance.utils import temporary_file -from pyperformance.venv import PERFORMANCE_ROOT +from . import _utils, _pythoninfo +from . import venv as _venv class BenchmarkException(Exception): pass -# Utility functions - - -def Relative(*path): - return os.path.join(PERFORMANCE_ROOT, 'benchmarks', *path) - +class RunID(namedtuple('RunID', 'python compat bench timestamp')): -def run_command(command, hide_stderr=True): - if hide_stderr: - kw = {'stderr': subprocess.PIPE} - else: - kw = {} + def __new__(cls, python, compat, bench, timestamp): + self = super().__new__( + cls, + python, + compat, + bench or None, + int(timestamp) if timestamp else None, + ) + return self - logging.info("Running `%s`", - " ".join(list(map(str, command)))) + def __str__(self): + if not self.timestamp: + return self.name + return f'{self.name}-{self.timestamp}' - # Explicitly flush standard streams, required if streams are buffered - # (not TTY) to write lines in the expected order - sys.stdout.flush() - sys.stderr.flush() - - proc = subprocess.Popen(command, - universal_newlines=True, - **kw) - try: - stderr = proc.communicate()[1] - except: # noqa - if proc.stderr: - proc.stderr.close() + @property + def name(self): try: - proc.kill() - except OSError: - # process already exited - pass - proc.wait() - raise - - if proc.returncode != 0: - if hide_stderr: - sys.stderr.flush() - sys.stderr.write(stderr) - sys.stderr.flush() - raise RuntimeError("Benchmark died") - - -def copy_perf_options(cmd, options): - if options.debug_single_value: - cmd.append('--debug-single-value') - elif options.rigorous: - cmd.append('--rigorous') - elif options.fast: - cmd.append('--fast') - - if options.verbose: - cmd.append('--verbose') + return self._name + except AttributeError: + name = f'{self.python}-compat-{self.compat}' + if self.bench: + name = f'{name}-bm-{self.bench.name}' + self._name = name + return self._name - if options.affinity: - cmd.append('--affinity=%s' % options.affinity) - if options.track_memory: - cmd.append('--track-memory') - if options.inherit_environ: - cmd.append('--inherit-environ=%s' % ','.join(options.inherit_environ)) +def get_run_id(python, bench=None): + py_id = _pythoninfo.get_python_id(python, prefix=True) + compat_id = get_compatibility_id(bench) + ts = time.time() + return RunID(py_id, compat_id, bench, ts) -def run_perf_script(python, options, name, extra_args=[]): - bm_path = Relative("bm_%s.py" % name) - cmd = list(python) - cmd.append('-u') - cmd.append(bm_path) - cmd.extend(extra_args) - copy_perf_options(cmd, options) - with temporary_file() as tmp: - cmd.extend(('--output', tmp)) - run_command(cmd, hide_stderr=not options.verbose) - return pyperf.BenchmarkSuite.load(tmp) +def run_benchmarks(should_run, python, options): + to_run = sorted(should_run) + runid = get_run_id(python) + + benchmarks = {} + venvs = set() + if options.venv: + venv = _venv.VirtualEnvironment( + options.python, + options.venv, + inherit_environ=options.inherit_environ, + ) + venv.ensure(refresh=False) + venvs.add(venv.get_path()) + for i, bench in enumerate(to_run): + bench_runid = runid._replace(bench=bench) + assert bench_runid.name, (bench, bench_runid) + venv = _venv.VirtualEnvironment( + options.python, + options.venv, + inherit_environ=options.inherit_environ, + name=bench_runid.name, + usebase=True, + ) + print(f'({i+1:>2}/{len(to_run)}) creating venv for benchmark ({bench.name})') + venv_path = venv.get_path() + alreadyseen = venv_path in venvs + venv.ensure(refresh=not alreadyseen) + try: + # XXX Do not override when there is a requirements collision. + venv.install_reqs(bench) + except _venv.RequirementsInstallationFailedError: + print('(benchmark will be skipped)') + print() + venv = None + venvs.add(venv_path) + benchmarks[bench] = (venv, bench_runid) -def run_benchmarks(bench_funcs, should_run, cmd_prefix, options): suite = None - to_run = sorted(should_run) run_count = str(len(to_run)) errors = [] - for index, name in enumerate(to_run): - func = bench_funcs[name] + pyperf_opts = get_pyperf_opts(options) + + import pyperf + for index, bench in enumerate(to_run): + name = bench.name print("[%s/%s] %s..." % (str(index + 1).rjust(len(run_count)), run_count, name)) sys.stdout.flush() def add_bench(dest_suite, obj): if isinstance(obj, pyperf.BenchmarkSuite): - benchmarks = obj + results = obj else: - benchmarks = (obj,) + results = (obj,) version = pyperformance.__version__ - for bench in benchmarks: - bench.update_metadata({'performance_version': version}) + for res in results: + res.update_metadata({'performance_version': version}) if dest_suite is not None: - dest_suite.add_benchmark(bench) + dest_suite.add_benchmark(res) else: - dest_suite = pyperf.BenchmarkSuite([bench]) + dest_suite = pyperf.BenchmarkSuite([res]) return dest_suite + bench_venv, bench_runid = benchmarks.get(bench) + if bench_venv is None: + print("ERROR: Benchmark %s failed: could not install requirements" % name) + errors.append(name) + continue try: - bench = func(cmd_prefix, options) + result = bench.run( + python, + bench_runid, + pyperf_opts, + venv=bench_venv, + verbose=options.verbose, + ) except Exception as exc: print("ERROR: Benchmark %s failed: %s" % (name, exc)) traceback.print_exc() errors.append(name) else: - suite = add_bench(suite, bench) + suite = add_bench(suite, result) print() return (suite, errors) + + +# Utility functions + +def get_compatibility_id(bench=None): + # XXX Do not include the pyperformance reqs if a benchmark was provided? + reqs = sorted(_utils.iter_clean_lines(_venv.REQUIREMENTS_FILE)) + if bench: + lockfile = bench.requirements_lockfile + if lockfile and os.path.exists(lockfile): + reqs += sorted(_utils.iter_clean_lines(lockfile)) + + data = [ + # XXX Favor pyperf.__version__ instead? + pyperformance.__version__, + '\n'.join(reqs), + ] + + h = hashlib.sha256() + for value in data: + h.update(value.encode('utf-8')) + compat_id = h.hexdigest() + # XXX Return the whole string? + compat_id = compat_id[:12] + + return compat_id + + +def get_pyperf_opts(options): + opts = [] + + if options.debug_single_value: + opts.append('--debug-single-value') + elif options.rigorous: + opts.append('--rigorous') + elif options.fast: + opts.append('--fast') + + if options.verbose: + opts.append('--verbose') + + if options.affinity: + opts.append('--affinity=%s' % options.affinity) + if options.track_memory: + opts.append('--track-memory') + if options.inherit_environ: + opts.append('--inherit-environ=%s' % ','.join(options.inherit_environ)) + + return opts diff --git a/pyperformance/utils.py b/pyperformance/utils.py deleted file mode 100644 index 0d6abdbc..00000000 --- a/pyperformance/utils.py +++ /dev/null @@ -1,21 +0,0 @@ -import contextlib -import errno -import os -import sys -import tempfile - - -MS_WINDOWS = (sys.platform == 'win32') - - -@contextlib.contextmanager -def temporary_file(): - tmp_filename = tempfile.mktemp() - try: - yield tmp_filename - finally: - try: - os.unlink(tmp_filename) - except OSError as exc: - if exc.errno != errno.ENOENT: - raise diff --git a/pyperformance/venv.py b/pyperformance/venv.py index de6ad242..d65031e5 100644 --- a/pyperformance/venv.py +++ b/pyperformance/venv.py @@ -1,5 +1,6 @@ import errno import os +import os.path import shutil import subprocess import sys @@ -8,6 +9,7 @@ from shlex import quote as shell_quote import pyperformance +from . import _utils, _pythoninfo GET_PIP_URL = 'https://bootstrap.pypa.io/get-pip.py' @@ -15,6 +17,7 @@ REQ_OLD_SETUPTOOLS = 'setuptools==18.5' PERFORMANCE_ROOT = os.path.realpath(os.path.dirname(__file__)) +REQUIREMENTS_FILE = os.path.join(pyperformance.DATA_DIR, 'requirements.txt') def is_build_dir(): @@ -24,8 +27,27 @@ def is_build_dir(): return os.path.exists(os.path.join(root_dir, 'setup.py')) +class RequirementsInstallationFailedError(Exception): + pass + + class Requirements(object): - def __init__(self, filename, optional): + + @classmethod + def from_file(cls, filename, optional=None): + self = cls() + self._add_from_file(filename, optional) + return self + + @classmethod + def from_benchmarks(cls, benchmarks): + self = cls() + for bench in benchmarks or (): + filename = bench.requirements_lockfile + self._add_from_file(filename) + return self + + def __init__(self): # if pip or setuptools is updated: # .github/workflows/main.yml should be updated as well @@ -45,30 +67,48 @@ def __init__(self, filename, optional): ] # requirements - self.req = [] + self.specs = [] # optional requirements - self.optional = [] + self._optional = set() - with open(filename) as fp: - for line in fp.readlines(): - # strip comment - line = line.partition('#')[0] - line = line.rstrip() - if not line: - continue + def __len__(self): + return len(self.specs) - # strip env markers - req = line.partition(';')[0] + def iter_non_optional(self): + for spec in self.specs: + if spec in self._optional: + continue + yield spec - # strip version - req = req.partition('==')[0] - req = req.partition('>=')[0] + def iter_optional(self): + for spec in self.specs: + if spec not in self._optional: + continue + yield spec - if req in optional: - self.optional.append(line) - else: - self.req.append(line) + def _add_from_file(self, filename, optional=None): + if not os.path.exists(filename): + return + for line in _utils.iter_clean_lines(filename): + self._add(line, optional) + + def _add(self, line, optional=None): + self.specs.append(line) + if optional: + # strip env markers + req = line.partition(';')[0] + # strip version + req = req.partition('==')[0] + req = req.partition('>=')[0] + if req in optional: + self._optional.add(line) + + def get(self, name): + for req in self.specs: + if req.startswith(name): + return req + return None def safe_rmtree(path): @@ -136,12 +176,30 @@ def download(url, filename): class VirtualEnvironment(object): - def __init__(self, options): - self.options = options - self.python = options.python - self._venv_path = options.venv + + def __init__(self, python, root=None, *, + inherit_environ=None, + name=None, + usebase=False, + ): + if usebase: + python, _, _ = _pythoninfo.inspect_python_install(python) + + self.python = python + self.inherit_environ = inherit_environ or None + self._name = name or None + self._venv_path = root or None self._pip_program = None self._force_old_pip = False + self._prepared = False + + @property + def name(self): + if self._name is None: + from .run import get_run_id + runid = get_run_id(self.python) + self._name = runid.name + return self._name def get_python_program(self): venv_path = self.get_path() @@ -161,7 +219,7 @@ def run_cmd_nocheck(self, cmd, verbose=True): sys.stdout.flush() sys.stderr.flush() - env = create_environ(self.options.inherit_environ) + env = create_environ(self.inherit_environ) try: proc = subprocess.Popen(cmd, env=env) except OSError as exc: @@ -202,47 +260,11 @@ def get_output_nocheck(self, *cmd): return (exitcode, stdout) def get_path(self): - if self._venv_path is not None: - return self._venv_path - - script = textwrap.dedent(""" - import hashlib - import sys - - performance_version = sys.argv[1] - requirements = sys.argv[2] - - data = performance_version + sys.executable + sys.version - - pyver = sys.version_info - - implementation = sys.implementation.name.lower() - - if not isinstance(data, bytes): - data = data.encode('utf-8') - with open(requirements, 'rb') as fp: - data += fp.read() - sha1 = hashlib.sha1(data).hexdigest() - - name = ('%s%s.%s-%s' - % (implementation, pyver.major, pyver.minor, sha1[:12])) - print(name) - """) - - requirements = os.path.join(PERFORMANCE_ROOT, 'requirements.txt') - cmd = (self.python, '-c', script, - pyperformance.__version__, requirements) - proc = subprocess.Popen(cmd, - stdout=subprocess.PIPE, - universal_newlines=True) - stdout = proc.communicate()[0] - if proc.returncode: - print("ERROR: failed to create the name of the virtual environment") - sys.exit(1) - - venv_name = stdout.rstrip() - self._venv_path = venv_path = os.path.join('venv', venv_name) - return venv_path + if not self._venv_path: + self._venv_path = os.path.abspath( + os.path.join('venv', self.name), + ) + return self._venv_path def _get_pip_program(self): venv_path = self.get_path() @@ -383,49 +405,43 @@ def exists(self): venv_python = self.get_python_program() return os.path.exists(venv_python) - def _install_req(self): + def prepare(self, install=True): + venv_path = self.get_path() + print("Installing the virtual environment %s" % venv_path) + if self._prepared or (self._prepared is None and not install): + print('(already installed)') + return pip_program = self.get_pip_program() - # parse requirements - filename = os.path.join(PERFORMANCE_ROOT, 'requirements.txt') - requirements = Requirements(filename, - # FIXME: don't hardcode requirements - ['psutil', 'dulwich']) - - # Upgrade pip - cmd = pip_program + ['install', '-U'] - if self._force_old_pip: - cmd.extend((REQ_OLD_PIP, REQ_OLD_SETUPTOOLS)) + if not self._prepared: + # parse requirements + basereqs = Requirements.from_file(REQUIREMENTS_FILE, ['psutil']) + + # Upgrade pip + cmd = pip_program + ['install', '-U'] + if self._force_old_pip: + cmd.extend((REQ_OLD_PIP, REQ_OLD_SETUPTOOLS)) + else: + cmd.extend(basereqs.pip) + self.run_cmd(cmd) + + # Upgrade installer dependencies (setuptools, ...) + cmd = pip_program + ['install', '-U'] + cmd.extend(basereqs.installer) + self.run_cmd(cmd) + + if install: + # install pyperformance inside the virtual environment + if is_build_dir(): + root_dir = os.path.dirname(PERFORMANCE_ROOT) + cmd = pip_program + ['install', '-e', root_dir] + else: + version = pyperformance.__version__ + cmd = pip_program + ['install', 'pyperformance==%s' % version] + self.run_cmd(cmd) + self._prepared = True else: - cmd.extend(requirements.pip) - self.run_cmd(cmd) - - # Upgrade installer dependencies (setuptools, ...) - cmd = pip_program + ['install', '-U'] - cmd.extend(requirements.installer) - self.run_cmd(cmd) - - # install requirements - cmd = pip_program + ['install'] - cmd.extend(requirements.req) - self.run_cmd(cmd) - - # install optional requirements - for req in requirements.optional: - cmd = pip_program + ['install', '-U', req] - exitcode = self.run_cmd_nocheck(cmd) - if exitcode: - print("WARNING: failed to install %s" % req) - print() - - # install pyperformance inside the virtual environment - if is_build_dir(): - root_dir = os.path.dirname(PERFORMANCE_ROOT) - cmd = pip_program + ['install', '-e', root_dir] - else: - version = pyperformance.__version__ - cmd = pip_program + ['install', 'pyperformance==%s' % version] - self.run_cmd(cmd) + self._prepared = None # Display the pip version cmd = pip_program + ['--version'] @@ -435,26 +451,87 @@ def _install_req(self): cmd = pip_program + ['freeze'] self.run_cmd(cmd) - def create(self): - if self.exists(): - return - + def create(self, install=True): venv_path = self.get_path() - print("Creating the virtual environment %s" % venv_path) + if self.exists(): + raise Exception(f'virtual environment {venv_path} already exists') try: self._create_venv() - self._install_req() + self.prepare(install) except: # noqa print() safe_rmtree(venv_path) raise + def ensure(self, refresh=True, install=True): + venv_path = self.get_path() + if self.exists(): + if refresh: + self.prepare(install) + else: + self.create(install) + + def install_reqs(self, requirements=None, *, exitonerror=False): + venv_path = self.get_path() + print("Installing requirements into the virtual environment %s" % venv_path) + + # parse requirements + bench = None + if requirements is None: + requirements = Requirements() + elif hasattr(requirements, 'requirements_lockfile'): + bench = requirements + requirements = Requirements.from_benchmarks([bench]) + + # Every benchmark must depend on pyperf. + if requirements and bench is not None: + if not requirements.get('pyperf'): + basereqs = Requirements.from_file(REQUIREMENTS_FILE, ['psutil']) + pyperf_req = basereqs.get('pyperf') + if not pyperf_req: + raise NotImplementedError + requirements.specs.append(pyperf_req) + + pip_program = self.get_pip_program() + if not requirements: + print('(nothing to install)') + else: + self.prepare(install=bench is None) + + # install requirements + cmd = pip_program + ['install'] + reqs = list(requirements.iter_non_optional()) + cmd.extend(reqs) + exitcode = self.run_cmd_nocheck(cmd) + if exitcode: + if exitonerror: + sys.exit(exitcode) + raise RequirementsInstallationFailedError(reqs) + + # install optional requirements + for req in requirements.iter_optional(): + cmd = pip_program + ['install', '-U', req] + exitcode = self.run_cmd_nocheck(cmd) + if exitcode: + print("WARNING: failed to install %s" % req) + print() + + # Dump the package list and their versions: pip freeze + cmd = pip_program + ['freeze'] + self.run_cmd(cmd) + + return requirements + def exec_in_virtualenv(options): - venv = VirtualEnvironment(options) + venv = VirtualEnvironment( + options.python, + options.venv, + inherit_environ=options.inherit_environ, + ) - venv.create() + venv.ensure() venv_python = venv.get_python_program() args = [venv_python, "-m", "pyperformance"] + \ @@ -470,27 +547,46 @@ def exec_in_virtualenv(options): os.execv(args[0], args) -def cmd_venv(options): +def cmd_venv(options, benchmarks=None): action = options.venv_action - venv = VirtualEnvironment(options) - venv_path = venv.get_path() + requirements = Requirements.from_benchmarks(benchmarks) - if action in ('create', 'recreate'): - recreated = False - if action == 'recreate' and venv.exists(): - recreated = True - shutil.rmtree(venv_path) - print("The old virtual environment %s has been removed" % venv_path) - print() - - if not venv.exists(): - venv.create() + venv = VirtualEnvironment( + options.python, + options.venv, + inherit_environ=options.inherit_environ, + ) + venv_path = venv.get_path() + exists = venv.exists() - what = 'recreated' if recreated else 'created' - print("The virtual environment %s has been %s" % (venv_path, what)) - else: + if action == 'create': + if exists: print("The virtual environment %s already exists" % venv_path) + venv.ensure() + venv.install_reqs(requirements, exitonerror=True) + if not exists: + print("The virtual environment %s has been created" % venv_path) + + elif action == 'recreate': + if exists: + if venv.get_python_program() == sys.executable: + print("The virtual environment %s already exists" % venv_path) + print("(it matches the currently running Python executable)") + venv.ensure() + venv.install_reqs(requirements, exitonerror=True) + else: + print("The virtual environment %s already exists" % venv_path) + shutil.rmtree(venv_path) + print("The old virtual environment %s has been removed" % venv_path) + print() + venv.ensure() + venv.install_reqs(requirements, exitonerror=True) + print("The virtual environment %s has been recreated" % venv_path) + else: + venv.create() + venv.install_reqs(requirements, exitonerror=True) + print("The virtual environment %s has been created" % venv_path) elif action == 'remove': if os.path.exists(venv_path): diff --git a/requirements.in b/requirements.in new file mode 100644 index 00000000..0d76bbd8 --- /dev/null +++ b/requirements.in @@ -0,0 +1,25 @@ +# When one of these dependencies is upgraded, the pyperformance major version +# should be increased to respect semantic versionning. Comparison between +# two pyperformance results of two different major versions is not reliable. +# +# To rebuild requirements.txt: +# +# pip-compile --upgrade -o requirements.txt requirements.in + +# pyperformance dependencies +# -------------------------- + +pyperf + +# for benchmark metadata: +packaging +toml + + +# Optional dependencies +# --------------------- +# +# The list of optional dependencies is hardcoded in pyperformance/venv.py + +# XXX Do we still need this? +psutil diff --git a/requirements.txt b/requirements.txt new file mode 120000 index 00000000..4ce56a00 --- /dev/null +++ b/requirements.txt @@ -0,0 +1 @@ +pyperformance/data-files/requirements.txt \ No newline at end of file diff --git a/runtests.py b/runtests.py index 34ac702a..5c044e62 100755 --- a/runtests.py +++ b/runtests.py @@ -7,7 +7,7 @@ def run_cmd(cmd): - print("Execute: %s" % ' '.join(cmd)) + print("(runtests.py) Execute: %s" % ' '.join(cmd), flush=True) proc = subprocess.Popen(cmd) try: proc.wait() @@ -15,10 +15,11 @@ def run_cmd(cmd): proc.kill() proc.wait() raise + sys.stdout.flush() exitcode = proc.returncode if exitcode: sys.exit(exitcode) - print("") + print("", flush=True) def run_tests(venv): @@ -39,16 +40,16 @@ def run_bench(*cmd): cmd = cmd + ('--venv', venv) run_cmd(cmd) - run_bench(python, script, 'venv', 'create') + run_bench(python, '-u', script, 'venv', 'create', '-b', 'all') egg_info = "pyperformance.egg-info" - print("Remove directory %s" % egg_info) + print("(runtests.py) Remove directory %s" % egg_info, flush=True) try: shutil.rmtree(egg_info) except FileNotFoundError: pass - run_bench(python, script, 'venv') + run_bench(python, '-u', script, 'venv', 'create') for filename in ( os.path.join('pyperformance', 'tests', 'data', 'py36.json'), @@ -56,8 +57,8 @@ def run_bench(*cmd): ): run_cmd((python, script, 'show', filename)) - run_bench(python, script, 'list') - run_bench(python, script, 'list_groups') + run_bench(python, '-u', script, 'list') + run_bench(python, '-u', script, 'list_groups') json = os.path.join(venv, 'bench.json') @@ -65,18 +66,18 @@ def run_bench(*cmd): # # --debug-single-value: benchmark results don't matter, we only # check that running benchmarks don't fail. - run_bench(python, script, 'run', '-b', 'all', '--debug-single-value', + run_bench(python, '-u', script, 'run', '-b', 'all', '--debug-single-value', '-o', json) # Display slowest benchmarks - run_cmd((venv_python, '-m', 'pyperf', 'slowest', json)) + run_cmd((venv_python, '-u', '-m', 'pyperf', 'slowest', json)) - run_bench(python, script, 'venv', 'remove') + run_bench(python, '-u', script, 'venv', 'remove') def main(): # Unit tests - cmd = [sys.executable, + cmd = [sys.executable, '-u', os.path.join('pyperformance', 'tests', 'test_compare.py')] run_cmd(cmd) diff --git a/setup.py b/setup.py index ee17a62d..5570e27e 100644 --- a/setup.py +++ b/setup.py @@ -9,7 +9,7 @@ # # - python3 -m pip install --user --upgrade pip-tools # - git clean -fdx # remove all untracked files! -# - (cd pyperformance; pip-compile --upgrade requirements.in) +# - (pip-compile --upgrade -o requirements.txt requirements.in) # # Prepare a release: # @@ -59,39 +59,11 @@ def main(): import io import os.path - from setuptools import setup + from setuptools import setup, find_packages with io.open('README.rst', encoding="utf8") as fp: long_description = fp.read().strip() - packages = [ - 'pyperformance', - 'pyperformance.benchmarks', - 'pyperformance.benchmarks.data', - 'pyperformance.benchmarks.data.2to3', - 'pyperformance.tests', - 'pyperformance.tests.data', - ] - - data = { - 'pyperformance': ['requirements.txt'], - 'pyperformance.tests': ['data/*.json'], - } - - # Search for all files in pyperformance/benchmarks/data/ - data_dir = os.path.join('pyperformance', 'benchmarks', 'data') - benchmarks_data = [] - for root, dirnames, filenames in os.walk(data_dir): - # Strip pyperformance/benchmarks/ prefix - root = os.path.normpath(root) - root = root.split(os.path.sep) - root = os.path.sep.join(root[2:]) - - for filename in filenames: - filename = os.path.join(root, filename) - benchmarks_data.append(filename) - data['pyperformance.benchmarks'] = benchmarks_data - options = { 'name': 'pyperformance', 'version': VERSION, @@ -101,12 +73,12 @@ def main(): 'long_description': long_description, 'url': 'https://github.com/python/benchmarks', 'classifiers': CLASSIFIERS, - 'packages': packages, - 'package_data': data, + 'packages': find_packages(), + 'include_package_data': True, 'entry_points': { 'console_scripts': ['pyperformance=pyperformance.cli:main'] }, - 'install_requires': ["pyperf"], + 'install_requires': ["pyperf", "toml", "packaging"], } setup(**options)