From 509d93cdffe0888bc213d29ae7a1daa39f229773 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Fri, 18 Jun 2021 16:29:38 -0600 Subject: [PATCH 001/126] benchmarks -> _benchmarks --- .../{benchmarks => _benchmarks}/__init__.py | 0 .../{benchmarks => _benchmarks}/bm_2to3.py | 0 .../{benchmarks => _benchmarks}/bm_chameleon.py | 0 .../{benchmarks => _benchmarks}/bm_chaos.py | 0 .../{benchmarks => _benchmarks}/bm_crypto_pyaes.py | 0 .../{benchmarks => _benchmarks}/bm_deltablue.py | 0 .../bm_django_template.py | 0 .../{benchmarks => _benchmarks}/bm_dulwich_log.py | 0 .../{benchmarks => _benchmarks}/bm_fannkuch.py | 0 .../{benchmarks => _benchmarks}/bm_float.py | 0 .../{benchmarks => _benchmarks}/bm_genshi.py | 0 pyperformance/{benchmarks => _benchmarks}/bm_go.py | 0 .../{benchmarks => _benchmarks}/bm_hexiom.py | 0 .../{benchmarks => _benchmarks}/bm_hg_startup.py | 0 .../{benchmarks => _benchmarks}/bm_html5lib.py | 0 .../{benchmarks => _benchmarks}/bm_json_dumps.py | 0 .../{benchmarks => _benchmarks}/bm_json_loads.py | 0 .../{benchmarks => _benchmarks}/bm_logging.py | 0 .../{benchmarks => _benchmarks}/bm_mako.py | 0 pyperformance/{benchmarks => _benchmarks}/bm_mdp.py | 0 .../bm_meteor_contest.py | 0 .../{benchmarks => _benchmarks}/bm_nbody.py | 0 .../{benchmarks => _benchmarks}/bm_nqueens.py | 0 .../{benchmarks => _benchmarks}/bm_pathlib.py | 0 .../{benchmarks => _benchmarks}/bm_pickle.py | 0 .../{benchmarks => _benchmarks}/bm_pidigits.py | 0 .../{benchmarks => _benchmarks}/bm_pyflate.py | 0 .../bm_python_startup.py | 0 .../{benchmarks => _benchmarks}/bm_raytrace.py | 0 .../{benchmarks => _benchmarks}/bm_regex_compile.py | 0 .../{benchmarks => _benchmarks}/bm_regex_dna.py | 0 .../{benchmarks => _benchmarks}/bm_regex_effbot.py | 0 .../{benchmarks => _benchmarks}/bm_regex_v8.py | 0 .../{benchmarks => _benchmarks}/bm_richards.py | 0 .../{benchmarks => _benchmarks}/bm_scimark.py | 0 .../{benchmarks => _benchmarks}/bm_spectral_norm.py | 0 .../bm_sqlalchemy_declarative.py | 0 .../bm_sqlalchemy_imperative.py | 0 .../{benchmarks => _benchmarks}/bm_sqlite_synth.py | 0 .../{benchmarks => _benchmarks}/bm_sympy.py | 0 .../{benchmarks => _benchmarks}/bm_telco.py | 0 .../{benchmarks => _benchmarks}/bm_tornado_http.py | 0 .../bm_unpack_sequence.py | 0 .../{benchmarks => _benchmarks}/bm_xml_etree.py | 0 .../data/2to3/README.txt | 0 .../data/2to3/__init__.py.txt | 0 .../data/2to3/context_processors.py.txt | 0 .../data/2to3/exceptions.py.txt | 0 .../data/2to3/mail.py.txt | 0 .../data/2to3/paginator.py.txt | 0 .../data/2to3/signals.py.txt | 0 .../data/2to3/template_loader.py.txt | 0 .../data/2to3/urlresolvers.py.txt | 0 .../data/2to3/xheaders.py.txt | 0 .../data/asyncio.git/COMMIT_EDITMSG | 0 .../data/asyncio.git/FETCH_HEAD | 0 .../data/asyncio.git/HEAD | 0 .../data/asyncio.git/ORIG_HEAD | 0 .../data/asyncio.git/config | 0 .../data/asyncio.git/description | 0 .../data/asyncio.git/hooks/applypatch-msg.sample | 0 .../data/asyncio.git/hooks/commit-msg.sample | 0 .../data/asyncio.git/hooks/post-update.sample | 0 .../data/asyncio.git/hooks/pre-applypatch.sample | 0 .../data/asyncio.git/hooks/pre-commit.sample | 0 .../data/asyncio.git/hooks/pre-push.sample | 0 .../data/asyncio.git/hooks/pre-rebase.sample | 0 .../asyncio.git/hooks/prepare-commit-msg.sample | 0 .../data/asyncio.git/hooks/update.sample | 0 .../data/asyncio.git/index | Bin .../data/asyncio.git/info/exclude | 0 .../data/asyncio.git/info/refs | 0 .../data/asyncio.git/logs/HEAD | 0 .../data/asyncio.git/logs/refs/heads/master | 0 .../data/asyncio.git/logs/refs/remotes/origin/HEAD | 0 .../logs/refs/remotes/origin/bind_modules | 0 .../asyncio.git/logs/refs/remotes/origin/master | 0 .../logs/refs/remotes/origin/zero_timeout | 0 .../data/asyncio.git/objects/info/packs | 0 ...ack-7e1b1ace85030071ca314cd565ae038bacc302a4.idx | Bin ...ck-7e1b1ace85030071ca314cd565ae038bacc302a4.pack | Bin .../data/asyncio.git/packed-refs | 0 .../data/asyncio.git/refs/remotes/origin/HEAD | 0 .../data/interpreter.tar.bz2 | Bin .../{benchmarks => _benchmarks}/data/telco-bench.b | Bin .../data/w3_tr_html5.html | 0 pyperformance/cli_run.py | 11 ++++++----- pyperformance/run.py | 2 +- setup.py | 8 ++++---- 89 files changed, 11 insertions(+), 10 deletions(-) rename pyperformance/{benchmarks => _benchmarks}/__init__.py (100%) rename pyperformance/{benchmarks => _benchmarks}/bm_2to3.py (100%) rename pyperformance/{benchmarks => _benchmarks}/bm_chameleon.py (100%) rename pyperformance/{benchmarks => _benchmarks}/bm_chaos.py (100%) rename pyperformance/{benchmarks => _benchmarks}/bm_crypto_pyaes.py (100%) rename pyperformance/{benchmarks => _benchmarks}/bm_deltablue.py (100%) rename pyperformance/{benchmarks => _benchmarks}/bm_django_template.py (100%) rename pyperformance/{benchmarks => _benchmarks}/bm_dulwich_log.py (100%) rename pyperformance/{benchmarks => _benchmarks}/bm_fannkuch.py (100%) rename pyperformance/{benchmarks => _benchmarks}/bm_float.py (100%) rename pyperformance/{benchmarks => _benchmarks}/bm_genshi.py (100%) rename pyperformance/{benchmarks => _benchmarks}/bm_go.py (100%) rename pyperformance/{benchmarks => _benchmarks}/bm_hexiom.py (100%) rename pyperformance/{benchmarks => _benchmarks}/bm_hg_startup.py (100%) rename pyperformance/{benchmarks => _benchmarks}/bm_html5lib.py (100%) rename pyperformance/{benchmarks => _benchmarks}/bm_json_dumps.py (100%) rename pyperformance/{benchmarks => _benchmarks}/bm_json_loads.py (100%) rename pyperformance/{benchmarks => _benchmarks}/bm_logging.py (100%) rename pyperformance/{benchmarks => _benchmarks}/bm_mako.py (100%) rename pyperformance/{benchmarks => _benchmarks}/bm_mdp.py (100%) rename pyperformance/{benchmarks => _benchmarks}/bm_meteor_contest.py (100%) rename pyperformance/{benchmarks => _benchmarks}/bm_nbody.py (100%) rename pyperformance/{benchmarks => _benchmarks}/bm_nqueens.py (100%) rename pyperformance/{benchmarks => _benchmarks}/bm_pathlib.py (100%) rename pyperformance/{benchmarks => _benchmarks}/bm_pickle.py (100%) rename pyperformance/{benchmarks => _benchmarks}/bm_pidigits.py (100%) rename pyperformance/{benchmarks => _benchmarks}/bm_pyflate.py (100%) rename pyperformance/{benchmarks => _benchmarks}/bm_python_startup.py (100%) rename pyperformance/{benchmarks => _benchmarks}/bm_raytrace.py (100%) rename pyperformance/{benchmarks => _benchmarks}/bm_regex_compile.py (100%) rename pyperformance/{benchmarks => _benchmarks}/bm_regex_dna.py (100%) rename pyperformance/{benchmarks => _benchmarks}/bm_regex_effbot.py (100%) rename pyperformance/{benchmarks => _benchmarks}/bm_regex_v8.py (100%) rename pyperformance/{benchmarks => _benchmarks}/bm_richards.py (100%) rename pyperformance/{benchmarks => _benchmarks}/bm_scimark.py (100%) rename pyperformance/{benchmarks => _benchmarks}/bm_spectral_norm.py (100%) rename pyperformance/{benchmarks => _benchmarks}/bm_sqlalchemy_declarative.py (100%) rename pyperformance/{benchmarks => _benchmarks}/bm_sqlalchemy_imperative.py (100%) rename pyperformance/{benchmarks => _benchmarks}/bm_sqlite_synth.py (100%) rename pyperformance/{benchmarks => _benchmarks}/bm_sympy.py (100%) rename pyperformance/{benchmarks => _benchmarks}/bm_telco.py (100%) rename pyperformance/{benchmarks => _benchmarks}/bm_tornado_http.py (100%) rename pyperformance/{benchmarks => _benchmarks}/bm_unpack_sequence.py (100%) rename pyperformance/{benchmarks => _benchmarks}/bm_xml_etree.py (100%) rename pyperformance/{benchmarks => _benchmarks}/data/2to3/README.txt (100%) rename pyperformance/{benchmarks => _benchmarks}/data/2to3/__init__.py.txt (100%) rename pyperformance/{benchmarks => _benchmarks}/data/2to3/context_processors.py.txt (100%) rename pyperformance/{benchmarks => _benchmarks}/data/2to3/exceptions.py.txt (100%) rename pyperformance/{benchmarks => _benchmarks}/data/2to3/mail.py.txt (100%) rename pyperformance/{benchmarks => _benchmarks}/data/2to3/paginator.py.txt (100%) rename pyperformance/{benchmarks => _benchmarks}/data/2to3/signals.py.txt (100%) rename pyperformance/{benchmarks => _benchmarks}/data/2to3/template_loader.py.txt (100%) rename pyperformance/{benchmarks => _benchmarks}/data/2to3/urlresolvers.py.txt (100%) rename pyperformance/{benchmarks => _benchmarks}/data/2to3/xheaders.py.txt (100%) rename pyperformance/{benchmarks => _benchmarks}/data/asyncio.git/COMMIT_EDITMSG (100%) rename pyperformance/{benchmarks => _benchmarks}/data/asyncio.git/FETCH_HEAD (100%) rename pyperformance/{benchmarks => _benchmarks}/data/asyncio.git/HEAD (100%) rename pyperformance/{benchmarks => _benchmarks}/data/asyncio.git/ORIG_HEAD (100%) rename pyperformance/{benchmarks => _benchmarks}/data/asyncio.git/config (100%) rename pyperformance/{benchmarks => _benchmarks}/data/asyncio.git/description (100%) rename pyperformance/{benchmarks => _benchmarks}/data/asyncio.git/hooks/applypatch-msg.sample (100%) rename pyperformance/{benchmarks => _benchmarks}/data/asyncio.git/hooks/commit-msg.sample (100%) rename pyperformance/{benchmarks => _benchmarks}/data/asyncio.git/hooks/post-update.sample (100%) rename pyperformance/{benchmarks => _benchmarks}/data/asyncio.git/hooks/pre-applypatch.sample (100%) rename pyperformance/{benchmarks => _benchmarks}/data/asyncio.git/hooks/pre-commit.sample (100%) rename pyperformance/{benchmarks => _benchmarks}/data/asyncio.git/hooks/pre-push.sample (100%) rename pyperformance/{benchmarks => _benchmarks}/data/asyncio.git/hooks/pre-rebase.sample (100%) rename pyperformance/{benchmarks => _benchmarks}/data/asyncio.git/hooks/prepare-commit-msg.sample (100%) rename pyperformance/{benchmarks => _benchmarks}/data/asyncio.git/hooks/update.sample (100%) rename pyperformance/{benchmarks => _benchmarks}/data/asyncio.git/index (100%) rename pyperformance/{benchmarks => _benchmarks}/data/asyncio.git/info/exclude (100%) rename pyperformance/{benchmarks => _benchmarks}/data/asyncio.git/info/refs (100%) rename pyperformance/{benchmarks => _benchmarks}/data/asyncio.git/logs/HEAD (100%) rename pyperformance/{benchmarks => _benchmarks}/data/asyncio.git/logs/refs/heads/master (100%) rename pyperformance/{benchmarks => _benchmarks}/data/asyncio.git/logs/refs/remotes/origin/HEAD (100%) rename pyperformance/{benchmarks => _benchmarks}/data/asyncio.git/logs/refs/remotes/origin/bind_modules (100%) rename pyperformance/{benchmarks => _benchmarks}/data/asyncio.git/logs/refs/remotes/origin/master (100%) rename pyperformance/{benchmarks => _benchmarks}/data/asyncio.git/logs/refs/remotes/origin/zero_timeout (100%) rename pyperformance/{benchmarks => _benchmarks}/data/asyncio.git/objects/info/packs (100%) rename pyperformance/{benchmarks => _benchmarks}/data/asyncio.git/objects/pack/pack-7e1b1ace85030071ca314cd565ae038bacc302a4.idx (100%) rename pyperformance/{benchmarks => _benchmarks}/data/asyncio.git/objects/pack/pack-7e1b1ace85030071ca314cd565ae038bacc302a4.pack (100%) rename pyperformance/{benchmarks => _benchmarks}/data/asyncio.git/packed-refs (100%) rename pyperformance/{benchmarks => _benchmarks}/data/asyncio.git/refs/remotes/origin/HEAD (100%) rename pyperformance/{benchmarks => _benchmarks}/data/interpreter.tar.bz2 (100%) rename pyperformance/{benchmarks => _benchmarks}/data/telco-bench.b (100%) rename pyperformance/{benchmarks => _benchmarks}/data/w3_tr_html5.html (100%) diff --git a/pyperformance/benchmarks/__init__.py b/pyperformance/_benchmarks/__init__.py similarity index 100% rename from pyperformance/benchmarks/__init__.py rename to pyperformance/_benchmarks/__init__.py diff --git a/pyperformance/benchmarks/bm_2to3.py b/pyperformance/_benchmarks/bm_2to3.py similarity index 100% rename from pyperformance/benchmarks/bm_2to3.py rename to pyperformance/_benchmarks/bm_2to3.py diff --git a/pyperformance/benchmarks/bm_chameleon.py b/pyperformance/_benchmarks/bm_chameleon.py similarity index 100% rename from pyperformance/benchmarks/bm_chameleon.py rename to pyperformance/_benchmarks/bm_chameleon.py diff --git a/pyperformance/benchmarks/bm_chaos.py b/pyperformance/_benchmarks/bm_chaos.py similarity index 100% rename from pyperformance/benchmarks/bm_chaos.py rename to pyperformance/_benchmarks/bm_chaos.py diff --git a/pyperformance/benchmarks/bm_crypto_pyaes.py b/pyperformance/_benchmarks/bm_crypto_pyaes.py similarity index 100% rename from pyperformance/benchmarks/bm_crypto_pyaes.py rename to pyperformance/_benchmarks/bm_crypto_pyaes.py diff --git a/pyperformance/benchmarks/bm_deltablue.py b/pyperformance/_benchmarks/bm_deltablue.py similarity index 100% rename from pyperformance/benchmarks/bm_deltablue.py rename to pyperformance/_benchmarks/bm_deltablue.py diff --git a/pyperformance/benchmarks/bm_django_template.py b/pyperformance/_benchmarks/bm_django_template.py similarity index 100% rename from pyperformance/benchmarks/bm_django_template.py rename to pyperformance/_benchmarks/bm_django_template.py diff --git a/pyperformance/benchmarks/bm_dulwich_log.py b/pyperformance/_benchmarks/bm_dulwich_log.py similarity index 100% rename from pyperformance/benchmarks/bm_dulwich_log.py rename to pyperformance/_benchmarks/bm_dulwich_log.py diff --git a/pyperformance/benchmarks/bm_fannkuch.py b/pyperformance/_benchmarks/bm_fannkuch.py similarity index 100% rename from pyperformance/benchmarks/bm_fannkuch.py rename to pyperformance/_benchmarks/bm_fannkuch.py diff --git a/pyperformance/benchmarks/bm_float.py b/pyperformance/_benchmarks/bm_float.py similarity index 100% rename from pyperformance/benchmarks/bm_float.py rename to pyperformance/_benchmarks/bm_float.py diff --git a/pyperformance/benchmarks/bm_genshi.py b/pyperformance/_benchmarks/bm_genshi.py similarity index 100% rename from pyperformance/benchmarks/bm_genshi.py rename to pyperformance/_benchmarks/bm_genshi.py diff --git a/pyperformance/benchmarks/bm_go.py b/pyperformance/_benchmarks/bm_go.py similarity index 100% rename from pyperformance/benchmarks/bm_go.py rename to pyperformance/_benchmarks/bm_go.py diff --git a/pyperformance/benchmarks/bm_hexiom.py b/pyperformance/_benchmarks/bm_hexiom.py similarity index 100% rename from pyperformance/benchmarks/bm_hexiom.py rename to pyperformance/_benchmarks/bm_hexiom.py diff --git a/pyperformance/benchmarks/bm_hg_startup.py b/pyperformance/_benchmarks/bm_hg_startup.py similarity index 100% rename from pyperformance/benchmarks/bm_hg_startup.py rename to pyperformance/_benchmarks/bm_hg_startup.py diff --git a/pyperformance/benchmarks/bm_html5lib.py b/pyperformance/_benchmarks/bm_html5lib.py similarity index 100% rename from pyperformance/benchmarks/bm_html5lib.py rename to pyperformance/_benchmarks/bm_html5lib.py diff --git a/pyperformance/benchmarks/bm_json_dumps.py b/pyperformance/_benchmarks/bm_json_dumps.py similarity index 100% rename from pyperformance/benchmarks/bm_json_dumps.py rename to pyperformance/_benchmarks/bm_json_dumps.py diff --git a/pyperformance/benchmarks/bm_json_loads.py b/pyperformance/_benchmarks/bm_json_loads.py similarity index 100% rename from pyperformance/benchmarks/bm_json_loads.py rename to pyperformance/_benchmarks/bm_json_loads.py diff --git a/pyperformance/benchmarks/bm_logging.py b/pyperformance/_benchmarks/bm_logging.py similarity index 100% rename from pyperformance/benchmarks/bm_logging.py rename to pyperformance/_benchmarks/bm_logging.py diff --git a/pyperformance/benchmarks/bm_mako.py b/pyperformance/_benchmarks/bm_mako.py similarity index 100% rename from pyperformance/benchmarks/bm_mako.py rename to pyperformance/_benchmarks/bm_mako.py diff --git a/pyperformance/benchmarks/bm_mdp.py b/pyperformance/_benchmarks/bm_mdp.py similarity index 100% rename from pyperformance/benchmarks/bm_mdp.py rename to pyperformance/_benchmarks/bm_mdp.py diff --git a/pyperformance/benchmarks/bm_meteor_contest.py b/pyperformance/_benchmarks/bm_meteor_contest.py similarity index 100% rename from pyperformance/benchmarks/bm_meteor_contest.py rename to pyperformance/_benchmarks/bm_meteor_contest.py diff --git a/pyperformance/benchmarks/bm_nbody.py b/pyperformance/_benchmarks/bm_nbody.py similarity index 100% rename from pyperformance/benchmarks/bm_nbody.py rename to pyperformance/_benchmarks/bm_nbody.py diff --git a/pyperformance/benchmarks/bm_nqueens.py b/pyperformance/_benchmarks/bm_nqueens.py similarity index 100% rename from pyperformance/benchmarks/bm_nqueens.py rename to pyperformance/_benchmarks/bm_nqueens.py diff --git a/pyperformance/benchmarks/bm_pathlib.py b/pyperformance/_benchmarks/bm_pathlib.py similarity index 100% rename from pyperformance/benchmarks/bm_pathlib.py rename to pyperformance/_benchmarks/bm_pathlib.py diff --git a/pyperformance/benchmarks/bm_pickle.py b/pyperformance/_benchmarks/bm_pickle.py similarity index 100% rename from pyperformance/benchmarks/bm_pickle.py rename to pyperformance/_benchmarks/bm_pickle.py diff --git a/pyperformance/benchmarks/bm_pidigits.py b/pyperformance/_benchmarks/bm_pidigits.py similarity index 100% rename from pyperformance/benchmarks/bm_pidigits.py rename to pyperformance/_benchmarks/bm_pidigits.py diff --git a/pyperformance/benchmarks/bm_pyflate.py b/pyperformance/_benchmarks/bm_pyflate.py similarity index 100% rename from pyperformance/benchmarks/bm_pyflate.py rename to pyperformance/_benchmarks/bm_pyflate.py diff --git a/pyperformance/benchmarks/bm_python_startup.py b/pyperformance/_benchmarks/bm_python_startup.py similarity index 100% rename from pyperformance/benchmarks/bm_python_startup.py rename to pyperformance/_benchmarks/bm_python_startup.py diff --git a/pyperformance/benchmarks/bm_raytrace.py b/pyperformance/_benchmarks/bm_raytrace.py similarity index 100% rename from pyperformance/benchmarks/bm_raytrace.py rename to pyperformance/_benchmarks/bm_raytrace.py diff --git a/pyperformance/benchmarks/bm_regex_compile.py b/pyperformance/_benchmarks/bm_regex_compile.py similarity index 100% rename from pyperformance/benchmarks/bm_regex_compile.py rename to pyperformance/_benchmarks/bm_regex_compile.py diff --git a/pyperformance/benchmarks/bm_regex_dna.py b/pyperformance/_benchmarks/bm_regex_dna.py similarity index 100% rename from pyperformance/benchmarks/bm_regex_dna.py rename to pyperformance/_benchmarks/bm_regex_dna.py diff --git a/pyperformance/benchmarks/bm_regex_effbot.py b/pyperformance/_benchmarks/bm_regex_effbot.py similarity index 100% rename from pyperformance/benchmarks/bm_regex_effbot.py rename to pyperformance/_benchmarks/bm_regex_effbot.py diff --git a/pyperformance/benchmarks/bm_regex_v8.py b/pyperformance/_benchmarks/bm_regex_v8.py similarity index 100% rename from pyperformance/benchmarks/bm_regex_v8.py rename to pyperformance/_benchmarks/bm_regex_v8.py diff --git a/pyperformance/benchmarks/bm_richards.py b/pyperformance/_benchmarks/bm_richards.py similarity index 100% rename from pyperformance/benchmarks/bm_richards.py rename to pyperformance/_benchmarks/bm_richards.py diff --git a/pyperformance/benchmarks/bm_scimark.py b/pyperformance/_benchmarks/bm_scimark.py similarity index 100% rename from pyperformance/benchmarks/bm_scimark.py rename to pyperformance/_benchmarks/bm_scimark.py diff --git a/pyperformance/benchmarks/bm_spectral_norm.py b/pyperformance/_benchmarks/bm_spectral_norm.py similarity index 100% rename from pyperformance/benchmarks/bm_spectral_norm.py rename to pyperformance/_benchmarks/bm_spectral_norm.py diff --git a/pyperformance/benchmarks/bm_sqlalchemy_declarative.py b/pyperformance/_benchmarks/bm_sqlalchemy_declarative.py similarity index 100% rename from pyperformance/benchmarks/bm_sqlalchemy_declarative.py rename to pyperformance/_benchmarks/bm_sqlalchemy_declarative.py diff --git a/pyperformance/benchmarks/bm_sqlalchemy_imperative.py b/pyperformance/_benchmarks/bm_sqlalchemy_imperative.py similarity index 100% rename from pyperformance/benchmarks/bm_sqlalchemy_imperative.py rename to pyperformance/_benchmarks/bm_sqlalchemy_imperative.py diff --git a/pyperformance/benchmarks/bm_sqlite_synth.py b/pyperformance/_benchmarks/bm_sqlite_synth.py similarity index 100% rename from pyperformance/benchmarks/bm_sqlite_synth.py rename to pyperformance/_benchmarks/bm_sqlite_synth.py diff --git a/pyperformance/benchmarks/bm_sympy.py b/pyperformance/_benchmarks/bm_sympy.py similarity index 100% rename from pyperformance/benchmarks/bm_sympy.py rename to pyperformance/_benchmarks/bm_sympy.py diff --git a/pyperformance/benchmarks/bm_telco.py b/pyperformance/_benchmarks/bm_telco.py similarity index 100% rename from pyperformance/benchmarks/bm_telco.py rename to pyperformance/_benchmarks/bm_telco.py diff --git a/pyperformance/benchmarks/bm_tornado_http.py b/pyperformance/_benchmarks/bm_tornado_http.py similarity index 100% rename from pyperformance/benchmarks/bm_tornado_http.py rename to pyperformance/_benchmarks/bm_tornado_http.py diff --git a/pyperformance/benchmarks/bm_unpack_sequence.py b/pyperformance/_benchmarks/bm_unpack_sequence.py similarity index 100% rename from pyperformance/benchmarks/bm_unpack_sequence.py rename to pyperformance/_benchmarks/bm_unpack_sequence.py diff --git a/pyperformance/benchmarks/bm_xml_etree.py b/pyperformance/_benchmarks/bm_xml_etree.py similarity index 100% rename from pyperformance/benchmarks/bm_xml_etree.py rename to pyperformance/_benchmarks/bm_xml_etree.py diff --git a/pyperformance/benchmarks/data/2to3/README.txt b/pyperformance/_benchmarks/data/2to3/README.txt similarity index 100% rename from pyperformance/benchmarks/data/2to3/README.txt rename to pyperformance/_benchmarks/data/2to3/README.txt diff --git a/pyperformance/benchmarks/data/2to3/__init__.py.txt b/pyperformance/_benchmarks/data/2to3/__init__.py.txt similarity index 100% rename from pyperformance/benchmarks/data/2to3/__init__.py.txt rename to pyperformance/_benchmarks/data/2to3/__init__.py.txt diff --git a/pyperformance/benchmarks/data/2to3/context_processors.py.txt b/pyperformance/_benchmarks/data/2to3/context_processors.py.txt similarity index 100% rename from pyperformance/benchmarks/data/2to3/context_processors.py.txt rename to pyperformance/_benchmarks/data/2to3/context_processors.py.txt diff --git a/pyperformance/benchmarks/data/2to3/exceptions.py.txt b/pyperformance/_benchmarks/data/2to3/exceptions.py.txt similarity index 100% rename from pyperformance/benchmarks/data/2to3/exceptions.py.txt rename to pyperformance/_benchmarks/data/2to3/exceptions.py.txt diff --git a/pyperformance/benchmarks/data/2to3/mail.py.txt b/pyperformance/_benchmarks/data/2to3/mail.py.txt similarity index 100% rename from pyperformance/benchmarks/data/2to3/mail.py.txt rename to pyperformance/_benchmarks/data/2to3/mail.py.txt diff --git a/pyperformance/benchmarks/data/2to3/paginator.py.txt b/pyperformance/_benchmarks/data/2to3/paginator.py.txt similarity index 100% rename from pyperformance/benchmarks/data/2to3/paginator.py.txt rename to pyperformance/_benchmarks/data/2to3/paginator.py.txt diff --git a/pyperformance/benchmarks/data/2to3/signals.py.txt b/pyperformance/_benchmarks/data/2to3/signals.py.txt similarity index 100% rename from pyperformance/benchmarks/data/2to3/signals.py.txt rename to pyperformance/_benchmarks/data/2to3/signals.py.txt diff --git a/pyperformance/benchmarks/data/2to3/template_loader.py.txt b/pyperformance/_benchmarks/data/2to3/template_loader.py.txt similarity index 100% rename from pyperformance/benchmarks/data/2to3/template_loader.py.txt rename to pyperformance/_benchmarks/data/2to3/template_loader.py.txt diff --git a/pyperformance/benchmarks/data/2to3/urlresolvers.py.txt b/pyperformance/_benchmarks/data/2to3/urlresolvers.py.txt similarity index 100% rename from pyperformance/benchmarks/data/2to3/urlresolvers.py.txt rename to pyperformance/_benchmarks/data/2to3/urlresolvers.py.txt diff --git a/pyperformance/benchmarks/data/2to3/xheaders.py.txt b/pyperformance/_benchmarks/data/2to3/xheaders.py.txt similarity index 100% rename from pyperformance/benchmarks/data/2to3/xheaders.py.txt rename to pyperformance/_benchmarks/data/2to3/xheaders.py.txt diff --git a/pyperformance/benchmarks/data/asyncio.git/COMMIT_EDITMSG b/pyperformance/_benchmarks/data/asyncio.git/COMMIT_EDITMSG similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/COMMIT_EDITMSG rename to pyperformance/_benchmarks/data/asyncio.git/COMMIT_EDITMSG diff --git a/pyperformance/benchmarks/data/asyncio.git/FETCH_HEAD b/pyperformance/_benchmarks/data/asyncio.git/FETCH_HEAD similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/FETCH_HEAD rename to pyperformance/_benchmarks/data/asyncio.git/FETCH_HEAD diff --git a/pyperformance/benchmarks/data/asyncio.git/HEAD b/pyperformance/_benchmarks/data/asyncio.git/HEAD similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/HEAD rename to pyperformance/_benchmarks/data/asyncio.git/HEAD diff --git a/pyperformance/benchmarks/data/asyncio.git/ORIG_HEAD b/pyperformance/_benchmarks/data/asyncio.git/ORIG_HEAD similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/ORIG_HEAD rename to pyperformance/_benchmarks/data/asyncio.git/ORIG_HEAD diff --git a/pyperformance/benchmarks/data/asyncio.git/config b/pyperformance/_benchmarks/data/asyncio.git/config similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/config rename to pyperformance/_benchmarks/data/asyncio.git/config diff --git a/pyperformance/benchmarks/data/asyncio.git/description b/pyperformance/_benchmarks/data/asyncio.git/description similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/description rename to pyperformance/_benchmarks/data/asyncio.git/description diff --git a/pyperformance/benchmarks/data/asyncio.git/hooks/applypatch-msg.sample b/pyperformance/_benchmarks/data/asyncio.git/hooks/applypatch-msg.sample similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/hooks/applypatch-msg.sample rename to pyperformance/_benchmarks/data/asyncio.git/hooks/applypatch-msg.sample diff --git a/pyperformance/benchmarks/data/asyncio.git/hooks/commit-msg.sample b/pyperformance/_benchmarks/data/asyncio.git/hooks/commit-msg.sample similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/hooks/commit-msg.sample rename to pyperformance/_benchmarks/data/asyncio.git/hooks/commit-msg.sample diff --git a/pyperformance/benchmarks/data/asyncio.git/hooks/post-update.sample b/pyperformance/_benchmarks/data/asyncio.git/hooks/post-update.sample similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/hooks/post-update.sample rename to pyperformance/_benchmarks/data/asyncio.git/hooks/post-update.sample diff --git a/pyperformance/benchmarks/data/asyncio.git/hooks/pre-applypatch.sample b/pyperformance/_benchmarks/data/asyncio.git/hooks/pre-applypatch.sample similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/hooks/pre-applypatch.sample rename to pyperformance/_benchmarks/data/asyncio.git/hooks/pre-applypatch.sample diff --git a/pyperformance/benchmarks/data/asyncio.git/hooks/pre-commit.sample b/pyperformance/_benchmarks/data/asyncio.git/hooks/pre-commit.sample similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/hooks/pre-commit.sample rename to pyperformance/_benchmarks/data/asyncio.git/hooks/pre-commit.sample diff --git a/pyperformance/benchmarks/data/asyncio.git/hooks/pre-push.sample b/pyperformance/_benchmarks/data/asyncio.git/hooks/pre-push.sample similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/hooks/pre-push.sample rename to pyperformance/_benchmarks/data/asyncio.git/hooks/pre-push.sample diff --git a/pyperformance/benchmarks/data/asyncio.git/hooks/pre-rebase.sample b/pyperformance/_benchmarks/data/asyncio.git/hooks/pre-rebase.sample similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/hooks/pre-rebase.sample rename to pyperformance/_benchmarks/data/asyncio.git/hooks/pre-rebase.sample diff --git a/pyperformance/benchmarks/data/asyncio.git/hooks/prepare-commit-msg.sample b/pyperformance/_benchmarks/data/asyncio.git/hooks/prepare-commit-msg.sample similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/hooks/prepare-commit-msg.sample rename to pyperformance/_benchmarks/data/asyncio.git/hooks/prepare-commit-msg.sample diff --git a/pyperformance/benchmarks/data/asyncio.git/hooks/update.sample b/pyperformance/_benchmarks/data/asyncio.git/hooks/update.sample similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/hooks/update.sample rename to pyperformance/_benchmarks/data/asyncio.git/hooks/update.sample diff --git a/pyperformance/benchmarks/data/asyncio.git/index b/pyperformance/_benchmarks/data/asyncio.git/index similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/index rename to pyperformance/_benchmarks/data/asyncio.git/index diff --git a/pyperformance/benchmarks/data/asyncio.git/info/exclude b/pyperformance/_benchmarks/data/asyncio.git/info/exclude similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/info/exclude rename to pyperformance/_benchmarks/data/asyncio.git/info/exclude diff --git a/pyperformance/benchmarks/data/asyncio.git/info/refs b/pyperformance/_benchmarks/data/asyncio.git/info/refs similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/info/refs rename to pyperformance/_benchmarks/data/asyncio.git/info/refs diff --git a/pyperformance/benchmarks/data/asyncio.git/logs/HEAD b/pyperformance/_benchmarks/data/asyncio.git/logs/HEAD similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/logs/HEAD rename to pyperformance/_benchmarks/data/asyncio.git/logs/HEAD diff --git a/pyperformance/benchmarks/data/asyncio.git/logs/refs/heads/master b/pyperformance/_benchmarks/data/asyncio.git/logs/refs/heads/master similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/logs/refs/heads/master rename to pyperformance/_benchmarks/data/asyncio.git/logs/refs/heads/master diff --git a/pyperformance/benchmarks/data/asyncio.git/logs/refs/remotes/origin/HEAD b/pyperformance/_benchmarks/data/asyncio.git/logs/refs/remotes/origin/HEAD similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/logs/refs/remotes/origin/HEAD rename to pyperformance/_benchmarks/data/asyncio.git/logs/refs/remotes/origin/HEAD diff --git a/pyperformance/benchmarks/data/asyncio.git/logs/refs/remotes/origin/bind_modules b/pyperformance/_benchmarks/data/asyncio.git/logs/refs/remotes/origin/bind_modules similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/logs/refs/remotes/origin/bind_modules rename to pyperformance/_benchmarks/data/asyncio.git/logs/refs/remotes/origin/bind_modules diff --git a/pyperformance/benchmarks/data/asyncio.git/logs/refs/remotes/origin/master b/pyperformance/_benchmarks/data/asyncio.git/logs/refs/remotes/origin/master similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/logs/refs/remotes/origin/master rename to pyperformance/_benchmarks/data/asyncio.git/logs/refs/remotes/origin/master diff --git a/pyperformance/benchmarks/data/asyncio.git/logs/refs/remotes/origin/zero_timeout b/pyperformance/_benchmarks/data/asyncio.git/logs/refs/remotes/origin/zero_timeout similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/logs/refs/remotes/origin/zero_timeout rename to pyperformance/_benchmarks/data/asyncio.git/logs/refs/remotes/origin/zero_timeout diff --git a/pyperformance/benchmarks/data/asyncio.git/objects/info/packs b/pyperformance/_benchmarks/data/asyncio.git/objects/info/packs similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/objects/info/packs rename to pyperformance/_benchmarks/data/asyncio.git/objects/info/packs diff --git a/pyperformance/benchmarks/data/asyncio.git/objects/pack/pack-7e1b1ace85030071ca314cd565ae038bacc302a4.idx b/pyperformance/_benchmarks/data/asyncio.git/objects/pack/pack-7e1b1ace85030071ca314cd565ae038bacc302a4.idx similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/objects/pack/pack-7e1b1ace85030071ca314cd565ae038bacc302a4.idx rename to pyperformance/_benchmarks/data/asyncio.git/objects/pack/pack-7e1b1ace85030071ca314cd565ae038bacc302a4.idx diff --git a/pyperformance/benchmarks/data/asyncio.git/objects/pack/pack-7e1b1ace85030071ca314cd565ae038bacc302a4.pack b/pyperformance/_benchmarks/data/asyncio.git/objects/pack/pack-7e1b1ace85030071ca314cd565ae038bacc302a4.pack similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/objects/pack/pack-7e1b1ace85030071ca314cd565ae038bacc302a4.pack rename to pyperformance/_benchmarks/data/asyncio.git/objects/pack/pack-7e1b1ace85030071ca314cd565ae038bacc302a4.pack diff --git a/pyperformance/benchmarks/data/asyncio.git/packed-refs b/pyperformance/_benchmarks/data/asyncio.git/packed-refs similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/packed-refs rename to pyperformance/_benchmarks/data/asyncio.git/packed-refs diff --git a/pyperformance/benchmarks/data/asyncio.git/refs/remotes/origin/HEAD b/pyperformance/_benchmarks/data/asyncio.git/refs/remotes/origin/HEAD similarity index 100% rename from pyperformance/benchmarks/data/asyncio.git/refs/remotes/origin/HEAD rename to pyperformance/_benchmarks/data/asyncio.git/refs/remotes/origin/HEAD diff --git a/pyperformance/benchmarks/data/interpreter.tar.bz2 b/pyperformance/_benchmarks/data/interpreter.tar.bz2 similarity index 100% rename from pyperformance/benchmarks/data/interpreter.tar.bz2 rename to pyperformance/_benchmarks/data/interpreter.tar.bz2 diff --git a/pyperformance/benchmarks/data/telco-bench.b b/pyperformance/_benchmarks/data/telco-bench.b similarity index 100% rename from pyperformance/benchmarks/data/telco-bench.b rename to pyperformance/_benchmarks/data/telco-bench.b diff --git a/pyperformance/benchmarks/data/w3_tr_html5.html b/pyperformance/_benchmarks/data/w3_tr_html5.html similarity index 100% rename from pyperformance/benchmarks/data/w3_tr_html5.html rename to pyperformance/_benchmarks/data/w3_tr_html5.html diff --git a/pyperformance/cli_run.py b/pyperformance/cli_run.py index a760c877..5a961e1e 100644 --- a/pyperformance/cli_run.py +++ b/pyperformance/cli_run.py @@ -5,7 +5,8 @@ import pyperf import pyperformance -from pyperformance.benchmarks import get_benchmarks, select_benchmarks +# XXX Switch to pyperformance.benchmarks. +from pyperformance._benchmarks import get_benchmarks, select_benchmarks from pyperformance.compare import display_benchmark_suite from pyperformance.run import run_benchmarks @@ -13,7 +14,7 @@ def get_benchmarks_to_run(options): bench_funcs, bench_groups = get_benchmarks() should_run = select_benchmarks(options.benchmarks, bench_groups) - return (bench_funcs, bench_groups, should_run) + return (bench_funcs, should_run) def cmd_run(parser, options): @@ -33,7 +34,7 @@ def cmd_run(parser, options): if not os.path.isabs(executable): print("ERROR: \"%s\" is not an absolute path" % executable) sys.exit(1) - bench_funcs, bench_groups, should_run = get_benchmarks_to_run(options) + bench_funcs, should_run = get_benchmarks_to_run(options) cmd_prefix = [executable] suite, errors = run_benchmarks(bench_funcs, should_run, cmd_prefix, options) @@ -56,7 +57,7 @@ def cmd_run(parser, options): def cmd_list(options): - bench_funcs, bench_groups, all_funcs = get_benchmarks_to_run(options) + _, all_funcs = get_benchmarks_to_run(options) print("%r benchmarks:" % options.benchmarks) for func in sorted(all_funcs): @@ -66,7 +67,7 @@ def cmd_list(options): def cmd_list_groups(options): - bench_funcs, bench_groups = get_benchmarks() + _, bench_groups = get_benchmarks() funcs = set(bench_groups['all']) all_funcs = set(funcs) diff --git a/pyperformance/run.py b/pyperformance/run.py index 88007dc5..9670fe76 100644 --- a/pyperformance/run.py +++ b/pyperformance/run.py @@ -23,7 +23,7 @@ class BenchmarkException(Exception): def Relative(*path): - return os.path.join(PERFORMANCE_ROOT, 'benchmarks', *path) + return os.path.join(PERFORMANCE_ROOT, '_benchmarks', *path) def run_command(command, hide_stderr=True): diff --git a/setup.py b/setup.py index ee17a62d..4bf59bd2 100644 --- a/setup.py +++ b/setup.py @@ -66,9 +66,9 @@ def main(): packages = [ 'pyperformance', - 'pyperformance.benchmarks', - 'pyperformance.benchmarks.data', - 'pyperformance.benchmarks.data.2to3', + 'pyperformance._benchmarks', + 'pyperformance._benchmarks.data', + 'pyperformance._benchmarks.data.2to3', 'pyperformance.tests', 'pyperformance.tests.data', ] @@ -79,7 +79,7 @@ def main(): } # Search for all files in pyperformance/benchmarks/data/ - data_dir = os.path.join('pyperformance', 'benchmarks', 'data') + data_dir = os.path.join('pyperformance', '_benchmarks', 'data') benchmarks_data = [] for root, dirnames, filenames in os.walk(data_dir): # Strip pyperformance/benchmarks/ prefix From e16141d28f15cbc37d0a583778fbbd6e11cb8ed3 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Fri, 18 Jun 2021 17:25:27 -0600 Subject: [PATCH 002/126] Use a new API for pyperformance.benchmarks. --- pyperformance/benchmarks/__init__.py | 31 +++++++++++++++++ pyperformance/cli.py | 2 ++ pyperformance/cli_run.py | 50 +++++++++++++++------------- 3 files changed, 60 insertions(+), 23 deletions(-) create mode 100644 pyperformance/benchmarks/__init__.py diff --git a/pyperformance/benchmarks/__init__.py b/pyperformance/benchmarks/__init__.py new file mode 100644 index 00000000..ad0010e3 --- /dev/null +++ b/pyperformance/benchmarks/__init__.py @@ -0,0 +1,31 @@ +from .. import _benchmarks + + +def load_manifest(filename): + # XXX + return filename + + +def get_benchmarks(manifest): + # XXX Pull from the manifest. + _, groups = _benchmarks.get_benchmarks() + return groups['all'] + + +def get_benchmark_groups(manifest): + # XXX Pull from the manifest. + # XXX Return more than just bench names. + _, groups = _benchmarks.get_benchmarks() + return groups + + +def select_benchmarks(raw, manifest): + # XXX Pull from the manifest. + _, groups = _benchmarks.get_benchmarks() + return _benchmarks.select_benchmarks(raw, groups) + + +# XXX This should go away. +def _get_bench_funcs(manifest): + funcs, _ = _benchmarks.get_benchmarks() + return funcs diff --git a/pyperformance/cli.py b/pyperformance/cli.py index 46d72361..f3d0b8a6 100644 --- a/pyperformance/cli.py +++ b/pyperformance/cli.py @@ -11,6 +11,7 @@ def comma_separated(values): def filter_opts(cmd): + cmd.add_argument("--manifest", help="benchmark manifest file to use") cmd.add_argument("-b", "--benchmarks", metavar="BM_LIST", default="default", help=("Comma-separated list of benchmarks to run. Can" " contain both positive and negative arguments:" @@ -89,6 +90,7 @@ def parse_args(): cmd = subparsers.add_parser( 'list_groups', help='List benchmark groups of the running Python') cmds.append(cmd) + cmd.add_argument("--manifest", help="benchmark manifest file to use") # compile cmd = subparsers.add_parser( diff --git a/pyperformance/cli_run.py b/pyperformance/cli_run.py index 5a961e1e..137795d6 100644 --- a/pyperformance/cli_run.py +++ b/pyperformance/cli_run.py @@ -5,18 +5,17 @@ import pyperf import pyperformance -# XXX Switch to pyperformance.benchmarks. -from pyperformance._benchmarks import get_benchmarks, select_benchmarks +from pyperformance.benchmarks import ( + load_manifest, + get_benchmarks, + get_benchmark_groups, + select_benchmarks, + _get_bench_funcs, +) from pyperformance.compare import display_benchmark_suite from pyperformance.run import run_benchmarks -def get_benchmarks_to_run(options): - bench_funcs, bench_groups = get_benchmarks() - should_run = select_benchmarks(options.benchmarks, bench_groups) - return (bench_funcs, should_run) - - def cmd_run(parser, options): logging.basicConfig(level=logging.INFO) @@ -34,8 +33,13 @@ def cmd_run(parser, options): if not os.path.isabs(executable): print("ERROR: \"%s\" is not an absolute path" % executable) sys.exit(1) - bench_funcs, should_run = get_benchmarks_to_run(options) + + manifest = load_manifest(options.manifest) + should_run = select_benchmarks(options.benchmarks, manifest) + bench_funcs = _get_bench_funcs(manifest) + cmd_prefix = [executable] + # XXX We should be passing the manifest in rather than "bench_funcs". suite, errors = run_benchmarks(bench_funcs, should_run, cmd_prefix, options) if not suite: @@ -57,28 +61,28 @@ def cmd_run(parser, options): def cmd_list(options): - _, all_funcs = get_benchmarks_to_run(options) + manifest = load_manifest(options.manifest) + selected = select_benchmarks(options.benchmarks, manifest) print("%r benchmarks:" % options.benchmarks) - for func in sorted(all_funcs): - print("- %s" % func) + for name in sorted(selected): + print("- %s" % name) print() - print("Total: %s benchmarks" % len(all_funcs)) + print("Total: %s benchmarks" % len(selected)) def cmd_list_groups(options): - _, bench_groups = get_benchmarks() - - funcs = set(bench_groups['all']) - all_funcs = set(funcs) + manifest = load_manifest(options.manifest) + bench_groups = get_benchmark_groups(manifest) + all_benchmarks = set(get_benchmarks(manifest)) - for group, funcs in sorted(bench_groups.items()): - funcs = set(funcs) & all_funcs - if not funcs: + for group, names in sorted(bench_groups.items()): + known = set(names) & all_benchmarks + if not known: # skip empty groups continue - print("%s (%s):" % (group, len(funcs))) - for func in sorted(funcs): - print("- %s" % func) + print("%s (%s):" % (group, len(names))) + for name in sorted(names): + print("- %s" % name) print() From d2101b2aad3b075297b0210dd7c39da0c22e657e Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Mon, 21 Jun 2021 11:57:31 -0600 Subject: [PATCH 003/126] Deal with benchmark objects instead of names. --- pyperformance/benchmarks/__init__.py | 27 ++++++++++++++++----------- pyperformance/cli_run.py | 11 ++++------- pyperformance/run.py | 18 +++++++++--------- 3 files changed, 29 insertions(+), 27 deletions(-) diff --git a/pyperformance/benchmarks/__init__.py b/pyperformance/benchmarks/__init__.py index ad0010e3..fea7d791 100644 --- a/pyperformance/benchmarks/__init__.py +++ b/pyperformance/benchmarks/__init__.py @@ -1,15 +1,25 @@ +from collections import namedtuple + from .. import _benchmarks +Benchmark = namedtuple('Benchmark', 'name run') + + def load_manifest(filename): # XXX return filename -def get_benchmarks(manifest): +def iter_benchmarks(manifest): # XXX Pull from the manifest. - _, groups = _benchmarks.get_benchmarks() - return groups['all'] + funcs, _ = _benchmarks.get_benchmarks() + for name, func in funcs.items(): + yield Benchmark(name, func) + + +def get_benchmarks(manifest): + return list(iter_benchmarks(manifest)) def get_benchmark_groups(manifest): @@ -21,11 +31,6 @@ def get_benchmark_groups(manifest): def select_benchmarks(raw, manifest): # XXX Pull from the manifest. - _, groups = _benchmarks.get_benchmarks() - return _benchmarks.select_benchmarks(raw, groups) - - -# XXX This should go away. -def _get_bench_funcs(manifest): - funcs, _ = _benchmarks.get_benchmarks() - return funcs + funcs, groups = _benchmarks.get_benchmarks() + for name in _benchmarks.select_benchmarks(raw, groups): + yield Benchmark(name, funcs[name]) diff --git a/pyperformance/cli_run.py b/pyperformance/cli_run.py index 137795d6..16e62690 100644 --- a/pyperformance/cli_run.py +++ b/pyperformance/cli_run.py @@ -10,7 +10,6 @@ get_benchmarks, get_benchmark_groups, select_benchmarks, - _get_bench_funcs, ) from pyperformance.compare import display_benchmark_suite from pyperformance.run import run_benchmarks @@ -36,11 +35,9 @@ def cmd_run(parser, options): manifest = load_manifest(options.manifest) should_run = select_benchmarks(options.benchmarks, manifest) - bench_funcs = _get_bench_funcs(manifest) cmd_prefix = [executable] - # XXX We should be passing the manifest in rather than "bench_funcs". - suite, errors = run_benchmarks(bench_funcs, should_run, cmd_prefix, options) + suite, errors = run_benchmarks(should_run, cmd_prefix, options) if not suite: print("ERROR: No benchmark was run") @@ -65,8 +62,8 @@ def cmd_list(options): selected = select_benchmarks(options.benchmarks, manifest) print("%r benchmarks:" % options.benchmarks) - for name in sorted(selected): - print("- %s" % name) + for bench in sorted(selected): + print("- %s" % bench.name) print() print("Total: %s benchmarks" % len(selected)) @@ -74,7 +71,7 @@ def cmd_list(options): def cmd_list_groups(options): manifest = load_manifest(options.manifest) bench_groups = get_benchmark_groups(manifest) - all_benchmarks = set(get_benchmarks(manifest)) + all_benchmarks = set(b.name for b in get_benchmarks(manifest)) for group, names in sorted(bench_groups.items()): known = set(names) & all_benchmarks diff --git a/pyperformance/run.py b/pyperformance/run.py index 9670fe76..f77bfdb3 100644 --- a/pyperformance/run.py +++ b/pyperformance/run.py @@ -97,14 +97,14 @@ def run_perf_script(python, options, name, extra_args=[]): return pyperf.BenchmarkSuite.load(tmp) -def run_benchmarks(bench_funcs, should_run, cmd_prefix, options): +def run_benchmarks(should_run, cmd_prefix, options): suite = None to_run = sorted(should_run) run_count = str(len(to_run)) errors = [] - for index, name in enumerate(to_run): - func = bench_funcs[name] + for index, bench in enumerate(to_run): + name = bench.name print("[%s/%s] %s..." % (str(index + 1).rjust(len(run_count)), run_count, name)) sys.stdout.flush() @@ -116,24 +116,24 @@ def add_bench(dest_suite, obj): benchmarks = (obj,) version = pyperformance.__version__ - for bench in benchmarks: - bench.update_metadata({'performance_version': version}) + for res in benchmarks: + res.update_metadata({'performance_version': version}) if dest_suite is not None: - dest_suite.add_benchmark(bench) + dest_suite.add_benchmark(res) else: - dest_suite = pyperf.BenchmarkSuite([bench]) + dest_suite = pyperf.BenchmarkSuite([res]) return dest_suite try: - bench = func(cmd_prefix, options) + result = bench.run(cmd_prefix, options) except Exception as exc: print("ERROR: Benchmark %s failed: %s" % (name, exc)) traceback.print_exc() errors.append(name) else: - suite = add_bench(suite, bench) + suite = add_bench(suite, result) print() From 20b2c2329d3ec225262c162b40f3885ab7775c41 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Mon, 21 Jun 2021 15:24:29 -0600 Subject: [PATCH 004/126] Refactor select_benchmarks(). --- pyperformance/_benchmarks/__init__.py | 49 ------------------------- pyperformance/benchmark/__init__.py | 47 ++++++++++++++++++++++++ pyperformance/benchmarks/__init__.py | 52 ++++++++++++++++++++------- pyperformance/cli.py | 1 + pyperformance/cli_run.py | 31 ++++++++++++++-- 5 files changed, 117 insertions(+), 63 deletions(-) create mode 100644 pyperformance/benchmark/__init__.py diff --git a/pyperformance/_benchmarks/__init__.py b/pyperformance/_benchmarks/__init__.py index ef391639..5091cf45 100644 --- a/pyperformance/_benchmarks/__init__.py +++ b/pyperformance/_benchmarks/__init__.py @@ -302,52 +302,3 @@ def get_benchmarks(): bench_groups["all"] = sorted(bench_funcs) return (bench_funcs, bench_groups) - - -def expand_benchmark_name(bm_name, bench_groups): - """Recursively expand name benchmark names. - - Args: - bm_name: string naming a benchmark or benchmark group. - - Yields: - Names of actual benchmarks, with all group names fully expanded. - """ - expansion = bench_groups.get(bm_name) - if expansion: - for name in expansion: - for name in expand_benchmark_name(name, bench_groups): - yield name - else: - yield bm_name - - -def select_benchmarks(benchmarks, bench_groups): - legal_benchmarks = bench_groups["all"] - benchmarks = benchmarks.split(",") - positive_benchmarks = set(bm.lower() - for bm in benchmarks - if bm and not bm.startswith("-")) - negative_benchmarks = set(bm[1:].lower() - for bm in benchmarks - if bm and bm.startswith("-")) - - should_run = set() - if not positive_benchmarks: - should_run = set(expand_benchmark_name("default", bench_groups)) - - for name in positive_benchmarks: - for bm in expand_benchmark_name(name, bench_groups): - if bm not in legal_benchmarks: - logging.warning("No benchmark named %s", bm) - else: - should_run.add(bm) - - for bm in negative_benchmarks: - if bm in bench_groups: - raise ValueError("Negative groups not supported: -%s" % bm) - elif bm not in legal_benchmarks: - logging.warning("No benchmark named %s", bm) - else: - should_run.remove(bm) - return should_run diff --git a/pyperformance/benchmark/__init__.py b/pyperformance/benchmark/__init__.py new file mode 100644 index 00000000..fe716836 --- /dev/null +++ b/pyperformance/benchmark/__init__.py @@ -0,0 +1,47 @@ +from collections import namedtuple +import types + + +BenchmarkSpec = namedtuple('BenchmarkSpec', 'name version origin') + + +def parse_benchmark(entry): + name = entry + version = None + origin = None + if not f'_{name}'.isidentifier(): + raise ValueError(f'unsupported benchmark name in {entry!r}') + return BenchmarkSpec(name, version, origin) + + +class Benchmark: + + def __init__(self, spec, run): + if isinstance(spec, str): + spec = parse_benchmark(spec) + + self.spec = spec + self.run = run + + def __repr__(self): + return f'{type(self).__name__}(spec={self.spec}, run={self.run})' + + def __getattr__(self, name): + return getattr(self.spec, name) + + def __hash__(self): + return hash(self.spec) + + def __eq__(self, other): + try: + other_spec = other.spec + except AttributeError: + return NotImplemented + return self.spec == other_spec + + def __gt__(self, other): + try: + other_spec = other.spec + except AttributeError: + return NotImplemented + return self.spec > other_spec diff --git a/pyperformance/benchmarks/__init__.py b/pyperformance/benchmarks/__init__.py index fea7d791..7b2eb09c 100644 --- a/pyperformance/benchmarks/__init__.py +++ b/pyperformance/benchmarks/__init__.py @@ -1,9 +1,5 @@ -from collections import namedtuple - -from .. import _benchmarks - - -Benchmark = namedtuple('Benchmark', 'name run') +from .. import _benchmarks, benchmark as _benchmark +from ._parse import parse_benchmarks def load_manifest(filename): @@ -15,7 +11,7 @@ def iter_benchmarks(manifest): # XXX Pull from the manifest. funcs, _ = _benchmarks.get_benchmarks() for name, func in funcs.items(): - yield Benchmark(name, func) + yield _benchmark.Benchmark(name, func) def get_benchmarks(manifest): @@ -29,8 +25,40 @@ def get_benchmark_groups(manifest): return groups -def select_benchmarks(raw, manifest): - # XXX Pull from the manifest. - funcs, groups = _benchmarks.get_benchmarks() - for name in _benchmarks.select_benchmarks(raw, groups): - yield Benchmark(name, funcs[name]) +def expand_benchmark_groups(parsed, groups): + if isinstance(parsed, str): + parsed = _benchmark.parse_benchmark(parsed) + + if not groups: + yield parsed + elif parsed.name not in groups: + yield parsed + else: + benchmarks = groups[parsed.name] + for bench in benchmarks or (): + yield from expand_benchmark_groups(bench, groups) + + +def select_benchmarks(raw, manifest, *, + expand=None, + known=None, + ): + if expand is None: + groups = get_benchmark_groups(manifest) + expand = lambda n: expand_benchmark_groups(n, groups) + if known is None: + known = get_benchmarks(manifest) + benchmarks = {b.spec: b for b in get_benchmarks(manifest)} + + included, excluded = parse_benchmarks(raw, expand=expand, known=known) + if not included: + included = set(expand('default', 'add')) + + selected = set() + for spec in included: + bench = benchmarks[spec] + selected.add(bench) + for spec in excluded: + bench = benchmarks[spec] + selected.remove(bench) + return selected diff --git a/pyperformance/cli.py b/pyperformance/cli.py index f3d0b8a6..42396ec6 100644 --- a/pyperformance/cli.py +++ b/pyperformance/cli.py @@ -13,6 +13,7 @@ def comma_separated(values): def filter_opts(cmd): cmd.add_argument("--manifest", help="benchmark manifest file to use") cmd.add_argument("-b", "--benchmarks", metavar="BM_LIST", default="default", + type=(lambda b: b.lower()), help=("Comma-separated list of benchmarks to run. Can" " contain both positive and negative arguments:" " --benchmarks=run_this,also_this,-not_this. If" diff --git a/pyperformance/cli_run.py b/pyperformance/cli_run.py index 16e62690..23278faa 100644 --- a/pyperformance/cli_run.py +++ b/pyperformance/cli_run.py @@ -5,16 +5,43 @@ import pyperf import pyperformance +from pyperformance.benchmark import parse_benchmark from pyperformance.benchmarks import ( load_manifest, + iter_benchmarks, get_benchmarks, get_benchmark_groups, + expand_benchmark_groups, select_benchmarks, ) from pyperformance.compare import display_benchmark_suite from pyperformance.run import run_benchmarks +def _select_benchmarks(raw, manifest): + groups = get_benchmark_groups(manifest) + def expand(parsed, op): + if isinstance(parsed, str): + parsed = parse_benchmark(parsed) + if parsed.name in groups and op == 'remove': + raise ValueError(f'negative groups not supported: -{parsed.name}') + yield from expand_benchmark_groups(parsed, groups) + + known = set(b.spec for b in iter_benchmarks(manifest)) + def check_known(parsed, raw): + if parsed not in known: + logging.warning(f"no benchmark named {parsed.name!r}") + return False + return True + + return select_benchmarks( + raw, + manifest, + expand=expand, + known=check_known, + ) + + def cmd_run(parser, options): logging.basicConfig(level=logging.INFO) @@ -34,7 +61,7 @@ def cmd_run(parser, options): sys.exit(1) manifest = load_manifest(options.manifest) - should_run = select_benchmarks(options.benchmarks, manifest) + should_run = _select_benchmarks(options.benchmarks, manifest) cmd_prefix = [executable] suite, errors = run_benchmarks(should_run, cmd_prefix, options) @@ -59,7 +86,7 @@ def cmd_run(parser, options): def cmd_list(options): manifest = load_manifest(options.manifest) - selected = select_benchmarks(options.benchmarks, manifest) + selected = _select_benchmarks(options.benchmarks, manifest) print("%r benchmarks:" % options.benchmarks) for bench in sorted(selected): From 20e84928361b0a03a3680fe590e6f8d5b3a6785a Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Mon, 21 Jun 2021 17:44:19 -0600 Subject: [PATCH 005/126] Add and use the default manifest file. --- pyperformance/_benchmarks/MANIFEST | 148 ++++++++++++++++++++++++++ pyperformance/_benchmarks/__init__.py | 24 ++++- pyperformance/benchmark/__init__.py | 5 +- pyperformance/benchmarks/__init__.py | 42 ++++++-- pyperformance/benchmarks/_parse.py | 53 +++++++++ pyperformance/benchmarks/manifest.py | 128 ++++++++++++++++++++++ pyperformance/cli_run.py | 18 ++-- pyperformance/run.py | 18 ---- 8 files changed, 394 insertions(+), 42 deletions(-) create mode 100644 pyperformance/_benchmarks/MANIFEST create mode 100644 pyperformance/benchmarks/_parse.py create mode 100644 pyperformance/benchmarks/manifest.py diff --git a/pyperformance/_benchmarks/MANIFEST b/pyperformance/_benchmarks/MANIFEST new file mode 100644 index 00000000..4c76dc1e --- /dev/null +++ b/pyperformance/_benchmarks/MANIFEST @@ -0,0 +1,148 @@ +[benchmarks] + +name version origin metafile +2to3 - - - +chameleon - - - +chaos - - - +crypto_pyaes - - - +deltablue - - - +django_template - - - +dulwich_log - - - +fannkuch - - - +float - - - +genshi - - - +go - - - +hexiom - - - +# FIXME: this benchmark fails with: +# Unable to get the program 'hg' from the virtual environment +#hg_startup - - - +#html5lib - - - +json_dumps - - - +json_loads - - - +logging - - - +mako - - - +mdp - - - +meteor_contest - - - +nbody - - - +nqueens - - - +pathlib - - - +pickle - - - +pickle_dict - - - +pickle_list - - - +pickle_pure_python - - - +pidigits - - - +pyflate - - - +python_startup - - - +python_startup_no_site - - - +raytrace - - - +regex_compile - - - +regex_dna - - - +regex_effbot - - - +regex_v8 - - - +richards - - - +scimark - - - +spectral_norm - - - +sqlalchemy_declarative - - - +sqlalchemy_imperative - - - +sqlite_synth - - - +sympy - - - +telco - - - +tornado_http - - - +unpack_sequence - - - +unpickle - - - +unpickle_list - - - +unpickle_pure_python - - - +xml_etree - - - + + +[group default] +2to3 +chameleon +chaos +crypto_pyaes +deltablue +django_template +dulwich_log +fannkuch +float +genshi +go +hexiom +#hg_startup +#html5lib +json_dumps +json_loads +logging +mako +meteor_contest +nbody +nqueens +pathlib +pickle +pickle_dict +pickle_list +pickle_pure_python +pidigits +pyflate +python_startup +python_startup_no_site +raytrace +regex_compile +regex_dna +regex_effbot +regex_v8 +richards +scimark +spectral_norm +sqlalchemy_declarative +sqlalchemy_imperative +sqlite_synth +sympy +telco +tornado_http +unpack_sequence +unpickle +unpickle_list +unpickle_pure_python +xml_etree + + +[group startup] +python_startup +python_startup_no_site +#hg_startup + + +[group regex] +regex_v8 +regex_effbot +regex_compile +regex_dna + + +[group serialize] +pickle_pure_python +unpickle_pure_python # Not for Python 3 +pickle +unpickle +xml_etree +json_dumps +json_loads + + +[group apps] +2to3 +chameleon +#html5lib +tornado_http + + +[group math] +float +nbody +pidigits + + +[group template] +django_template +mako diff --git a/pyperformance/_benchmarks/__init__.py b/pyperformance/_benchmarks/__init__.py index 5091cf45..cfe73231 100644 --- a/pyperformance/_benchmarks/__init__.py +++ b/pyperformance/_benchmarks/__init__.py @@ -1,8 +1,3 @@ -import logging - -from pyperformance.run import run_perf_script - - # Benchmark groups. The "default" group is what's run if no -b option is # specified. DEFAULT_GROUP = [ @@ -83,6 +78,24 @@ } +def Relative(*path): + return os.path.join(PERFORMANCE_ROOT, '_benchmarks', *path) + + +def run_perf_script(python, options, name, extra_args=[]): + bm_path = Relative("bm_%s.py" % name) + cmd = list(python) + cmd.append('-u') + cmd.append(bm_path) + cmd.extend(extra_args) + copy_perf_options(cmd, options) + + with temporary_file() as tmp: + cmd.extend(('--output', tmp)) + run_command(cmd, hide_stderr=not options.verbose) + return pyperf.BenchmarkSuite.load(tmp) + + def BM_2to3(python, options): return run_perf_script(python, options, "2to3") @@ -291,6 +304,7 @@ def BM_mdp(python, options): # End benchmarks, begin main entry point support. + def get_benchmarks(): bench_funcs = dict((name[3:].lower(), func) for name, func in globals().items() diff --git a/pyperformance/benchmark/__init__.py b/pyperformance/benchmark/__init__.py index fe716836..17e860e0 100644 --- a/pyperformance/benchmark/__init__.py +++ b/pyperformance/benchmark/__init__.py @@ -2,16 +2,17 @@ import types -BenchmarkSpec = namedtuple('BenchmarkSpec', 'name version origin') +BenchmarkSpec = namedtuple('BenchmarkSpec', 'name version origin metafile') def parse_benchmark(entry): name = entry version = None origin = None + metafile = None if not f'_{name}'.isidentifier(): raise ValueError(f'unsupported benchmark name in {entry!r}') - return BenchmarkSpec(name, version, origin) + return BenchmarkSpec(name, version, origin, metafile) class Benchmark: diff --git a/pyperformance/benchmarks/__init__.py b/pyperformance/benchmarks/__init__.py index 7b2eb09c..9b34fdca 100644 --- a/pyperformance/benchmarks/__init__.py +++ b/pyperformance/benchmarks/__init__.py @@ -1,17 +1,42 @@ +import os.path + +from .. import __version__ from .. import _benchmarks, benchmark as _benchmark from ._parse import parse_benchmarks +from . import manifest as _manifest + + +DEFAULTS_DIR = os.path.join( + os.path.dirname(os.path.dirname(__file__)), + '_benchmarks') +DEFAULT_MANIFEST = os.path.join(DEFAULTS_DIR, 'MANIFEST') -def load_manifest(filename): - # XXX - return filename +def load_manifest(filename, *, resolve=None): + if not filename: + filename = DEFAULT_MANIFEST + if resolve is None: + def resolve(bench): + if not bench.version: + bench = bench._replace(version=__version__) + if not bench.origin: + bench = bench._replace(origin='') + if not bench.metafile: + metafile = os.path.join(DEFAULTS_DIR, + f'bm_{bench.name}', + 'METADATA') + bench = bench._replace(metafile=metafile) + return bench + with open(filename) as infile: + return _manifest.parse_manifest(infile, resolve=resolve) def iter_benchmarks(manifest): - # XXX Pull from the manifest. + # XXX Use the benchmark's "run" script. funcs, _ = _benchmarks.get_benchmarks() - for name, func in funcs.items(): - yield _benchmark.Benchmark(name, func) + for spec in manifest.benchmarks: + func = funcs[spec.name] + yield _benchmark.Benchmark(spec, func) def get_benchmarks(manifest): @@ -19,10 +44,7 @@ def get_benchmarks(manifest): def get_benchmark_groups(manifest): - # XXX Pull from the manifest. - # XXX Return more than just bench names. - _, groups = _benchmarks.get_benchmarks() - return groups + return dict(manifest.groups) def expand_benchmark_groups(parsed, groups): diff --git a/pyperformance/benchmarks/_parse.py b/pyperformance/benchmarks/_parse.py new file mode 100644 index 00000000..e23b27a9 --- /dev/null +++ b/pyperformance/benchmarks/_parse.py @@ -0,0 +1,53 @@ +from ..benchmark import parse_benchmark + + +def parse_benchmarks(raw, *, expand=None, known=None): + included = set() + excluded = set() + for op, _, parsed in iter_parsed(raw, expand=expand, known=known): + if op == 'add': + included.add(parsed) + elif op == 'remove': + excluded.add(parsed) + else: + raise NotImplementedError(op) + return included, excluded + + +def iter_parsed(raw, *, expand=None, known=None): + if expand is None: + def expand(parsed, _op): + yield parsed + check_known = _resolve_check_known(known) + + if isinstance(raw, str): + raw = raw.split(',') + entries = (e.strip() for e in raw) + + for entry in entries: + if not entry: + continue + + op = 'add' + if entry.startswith('-'): + op = 'remove' + entry = entry[1:] + + parsed = parse_benchmark(entry) + for expanded in expand(parsed, op): + if check_known(expanded, entry): + yield op, entry, expanded + + +def _resolve_check_known(known): + if callable(known): + return known + elif not known: + return (lambda p: True) + else: + known = set(known) + def check_known(parsed, raw): + if parsed not in known: + raise ValueError(f'unknown benchmark {parsed!r} ({raw})') + return True + return check_known diff --git a/pyperformance/benchmarks/manifest.py b/pyperformance/benchmarks/manifest.py new file mode 100644 index 00000000..f0337227 --- /dev/null +++ b/pyperformance/benchmarks/manifest.py @@ -0,0 +1,128 @@ +from collections import namedtuple + +from .. import benchmark as _benchmark + + +BENCH_COLUMNS = ('name', 'version', 'origin', 'metafile') +BENCH_HEADER = '\t'.join(BENCH_COLUMNS) + + +BenchmarksManifest = namedtuple('BenchmarksManifest', 'benchmarks groups') + + +def parse_manifest(text, *, resolve=None): + if isinstance(text, str): + lines = text.splitlines() + else: + lines = iter(text) + + benchmarks = None + groups = {} + for section, seclines in _iter_sections(lines): + if section == 'benchmarks': + benchmarks = _parse_benchmarks(seclines, resolve) + elif benchmarks is None: + raise ValueError('invalid manifest file, expected "benchmarks" section') + elif section.startswith('group '): + _, _, group = section.partition(' ') + groups[group] = _parse_group(group, seclines, benchmarks) + return BenchmarksManifest(benchmarks, groups) + + +def _iter_sections(lines): + lines = (line.split('#')[0].strip() + for line in lines) + + name = None + section = None + for line in lines: + if not line: + continue + if line.startswith('[') and line.endswith(']'): + if name: + yield name, section + name = line[1:-1].strip() + section = [] + else: + if not name: + raise ValueError(f'expected new section, got {line!r}') + section.append(line) + if name: + yield name, section + else: + raise ValueError('invalid manifest file, no sections found') + + +def _parse_benchmarks(lines, resolve): + if not lines: + lines = [''] + lines = iter(lines) + if next(lines) != BENCH_HEADER: + raise ValueError('invalid manifest file, expected benchmarks header') + + benchmarks = [] + for line in lines: + try: + name, version, origin, metafile = line.split('\t') + except ValueError: + raise ValueError(f'bad benchmark line {line!r}') + if not version or version == '-': + version = None + if not origin or origin == '-': + origin = None + if not metafile or metafile == '-': + metafile = None + bench = _benchmark.BenchmarkSpec(name, version, origin, metafile) + if resolve is not None: + bench = resolve(bench) + benchmarks.append(bench) + return benchmarks + + +def _parse_group(name, lines, benchmarks): + benchmarks = set(benchmarks) + byname = {b.name: b for b in benchmarks} + group = [] + for line in lines: + bench = _benchmark.parse_benchmark(line) + if bench not in benchmarks: + try: + bench = byname[bench.name] + except KeyError: + raise ValueError(f'unknown benchmark {bench.name!r} ({name})') + group.append(bench) + return group + + +#def render_manifest(manifest): +# if isinstance(manifest, str): +# raise NotImplementedError +# manifest = manifest.splitlines() +# yield BENCH_HEADER +# for row in manifest: +# if isinstance(row, str): +# row = _parse_manifest_row(row) +# if isinstance(row, str): +# yield row +# continue +# line _render_manifest_row(row) +# +# raise NotImplementedError +# +# +#def parse_group_manifest(text): +# ... +# +# +#def render_group_manifest(group, benchmarks): +# # (manifest file, bm name) +# ... +# +# +#def parse_bench_from_manifest(line): +# raise NotImplementedError +# +# +#def render_bench_for_manifest(benchmark, columns): +# raise NotImplementedError +# name, origin, version, metafile = info diff --git a/pyperformance/cli_run.py b/pyperformance/cli_run.py index 23278faa..ea9e6389 100644 --- a/pyperformance/cli_run.py +++ b/pyperformance/cli_run.py @@ -86,7 +86,10 @@ def cmd_run(parser, options): def cmd_list(options): manifest = load_manifest(options.manifest) - selected = _select_benchmarks(options.benchmarks, manifest) + if 'all' in options.benchmarks.split(','): + selected = manifest.benchmarks + else: + selected = _select_benchmarks(options.benchmarks, manifest) print("%r benchmarks:" % options.benchmarks) for bench in sorted(selected): @@ -98,15 +101,16 @@ def cmd_list(options): def cmd_list_groups(options): manifest = load_manifest(options.manifest) bench_groups = get_benchmark_groups(manifest) - all_benchmarks = set(b.name for b in get_benchmarks(manifest)) + bench_groups['all'] = list(manifest.benchmarks) + all_benchmarks = set(manifest.benchmarks) - for group, names in sorted(bench_groups.items()): - known = set(names) & all_benchmarks + for group, specs in sorted(bench_groups.items()): + known = set(specs) & all_benchmarks if not known: # skip empty groups continue - print("%s (%s):" % (group, len(names))) - for name in sorted(names): - print("- %s" % name) + print("%s (%s):" % (group, len(specs))) + for spec in sorted(specs): + print("- %s" % spec.name) print() diff --git a/pyperformance/run.py b/pyperformance/run.py index f77bfdb3..fb267b3f 100644 --- a/pyperformance/run.py +++ b/pyperformance/run.py @@ -22,10 +22,6 @@ class BenchmarkException(Exception): # Utility functions -def Relative(*path): - return os.path.join(PERFORMANCE_ROOT, '_benchmarks', *path) - - def run_command(command, hide_stderr=True): if hide_stderr: kw = {'stderr': subprocess.PIPE} @@ -83,20 +79,6 @@ def copy_perf_options(cmd, options): cmd.append('--inherit-environ=%s' % ','.join(options.inherit_environ)) -def run_perf_script(python, options, name, extra_args=[]): - bm_path = Relative("bm_%s.py" % name) - cmd = list(python) - cmd.append('-u') - cmd.append(bm_path) - cmd.extend(extra_args) - copy_perf_options(cmd, options) - - with temporary_file() as tmp: - cmd.extend(('--output', tmp)) - run_command(cmd, hide_stderr=not options.verbose) - return pyperf.BenchmarkSuite.load(tmp) - - def run_benchmarks(should_run, cmd_prefix, options): suite = None to_run = sorted(should_run) From 4f74cb8a802835e1e1bc015a9698585730381cf6 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 22 Jun 2021 11:01:30 -0600 Subject: [PATCH 006/126] Move run_perf_script() back. --- pyperformance/_benchmarks/__init__.py | 21 +++------------------ pyperformance/run.py | 18 ++++++++++++++++++ 2 files changed, 21 insertions(+), 18 deletions(-) diff --git a/pyperformance/_benchmarks/__init__.py b/pyperformance/_benchmarks/__init__.py index cfe73231..4671d89d 100644 --- a/pyperformance/_benchmarks/__init__.py +++ b/pyperformance/_benchmarks/__init__.py @@ -1,3 +1,6 @@ +from ..run import run_perf_script + + # Benchmark groups. The "default" group is what's run if no -b option is # specified. DEFAULT_GROUP = [ @@ -78,24 +81,6 @@ } -def Relative(*path): - return os.path.join(PERFORMANCE_ROOT, '_benchmarks', *path) - - -def run_perf_script(python, options, name, extra_args=[]): - bm_path = Relative("bm_%s.py" % name) - cmd = list(python) - cmd.append('-u') - cmd.append(bm_path) - cmd.extend(extra_args) - copy_perf_options(cmd, options) - - with temporary_file() as tmp: - cmd.extend(('--output', tmp)) - run_command(cmd, hide_stderr=not options.verbose) - return pyperf.BenchmarkSuite.load(tmp) - - def BM_2to3(python, options): return run_perf_script(python, options, "2to3") diff --git a/pyperformance/run.py b/pyperformance/run.py index fb267b3f..37e7aca3 100644 --- a/pyperformance/run.py +++ b/pyperformance/run.py @@ -60,6 +60,24 @@ def run_command(command, hide_stderr=True): raise RuntimeError("Benchmark died") +def Relative(*path): + return os.path.join(PERFORMANCE_ROOT, '_benchmarks', *path) + + +def run_perf_script(python, options, name, extra_args=[]): + bm_path = Relative("bm_%s.py" % name) + cmd = list(python) + cmd.append('-u') + cmd.append(bm_path) + cmd.extend(extra_args) + copy_perf_options(cmd, options) + + with temporary_file() as tmp: + cmd.extend(('--output', tmp)) + run_command(cmd, hide_stderr=not options.verbose) + return pyperf.BenchmarkSuite.load(tmp) + + def copy_perf_options(cmd, options): if options.debug_single_value: cmd.append('--debug-single-value') From 3917d22030cccf8b9382102e27d59758d3822c40 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 22 Jun 2021 11:25:41 -0600 Subject: [PATCH 007/126] Clean up benchmark/__init__.py. --- pyperformance/benchmark/__init__.py | 50 ++----------------- pyperformance/benchmark/_benchmark.py | 34 +++++++++++++ pyperformance/benchmark/_spec.py | 14 ++++++ pyperformance/benchmarks/__init__.py | 4 +- .../benchmarks/{manifest.py => _manifest.py} | 0 5 files changed, 54 insertions(+), 48 deletions(-) create mode 100644 pyperformance/benchmark/_benchmark.py create mode 100644 pyperformance/benchmark/_spec.py rename pyperformance/benchmarks/{manifest.py => _manifest.py} (100%) diff --git a/pyperformance/benchmark/__init__.py b/pyperformance/benchmark/__init__.py index 17e860e0..0d3bb5ae 100644 --- a/pyperformance/benchmark/__init__.py +++ b/pyperformance/benchmark/__init__.py @@ -1,48 +1,4 @@ -from collections import namedtuple -import types - -BenchmarkSpec = namedtuple('BenchmarkSpec', 'name version origin metafile') - - -def parse_benchmark(entry): - name = entry - version = None - origin = None - metafile = None - if not f'_{name}'.isidentifier(): - raise ValueError(f'unsupported benchmark name in {entry!r}') - return BenchmarkSpec(name, version, origin, metafile) - - -class Benchmark: - - def __init__(self, spec, run): - if isinstance(spec, str): - spec = parse_benchmark(spec) - - self.spec = spec - self.run = run - - def __repr__(self): - return f'{type(self).__name__}(spec={self.spec}, run={self.run})' - - def __getattr__(self, name): - return getattr(self.spec, name) - - def __hash__(self): - return hash(self.spec) - - def __eq__(self, other): - try: - other_spec = other.spec - except AttributeError: - return NotImplemented - return self.spec == other_spec - - def __gt__(self, other): - try: - other_spec = other.spec - except AttributeError: - return NotImplemented - return self.spec > other_spec +# aliases +from ._spec import BenchmarkSpec, parse_benchmark +from ._benchmark import Benchmark diff --git a/pyperformance/benchmark/_benchmark.py b/pyperformance/benchmark/_benchmark.py new file mode 100644 index 00000000..e1c8b6b2 --- /dev/null +++ b/pyperformance/benchmark/_benchmark.py @@ -0,0 +1,34 @@ +from ._spec import parse_benchmark + + +class Benchmark: + + def __init__(self, spec, run): + if isinstance(spec, str): + spec = parse_benchmark(spec) + + self.spec = spec + self.run = run + + def __repr__(self): + return f'{type(self).__name__}(spec={self.spec}, run={self.run})' + + def __getattr__(self, name): + return getattr(self.spec, name) + + def __hash__(self): + return hash(self.spec) + + def __eq__(self, other): + try: + other_spec = other.spec + except AttributeError: + return NotImplemented + return self.spec == other_spec + + def __gt__(self, other): + try: + other_spec = other.spec + except AttributeError: + return NotImplemented + return self.spec > other_spec diff --git a/pyperformance/benchmark/_spec.py b/pyperformance/benchmark/_spec.py new file mode 100644 index 00000000..1de1d26f --- /dev/null +++ b/pyperformance/benchmark/_spec.py @@ -0,0 +1,14 @@ +from collections import namedtuple + + +BenchmarkSpec = namedtuple('BenchmarkSpec', 'name version origin metafile') + + +def parse_benchmark(entry): + name = entry + version = None + origin = None + metafile = None + if not f'_{name}'.isidentifier(): + raise ValueError(f'unsupported benchmark name in {entry!r}') + return BenchmarkSpec(name, version, origin, metafile) diff --git a/pyperformance/benchmarks/__init__.py b/pyperformance/benchmarks/__init__.py index 9b34fdca..0d1d6896 100644 --- a/pyperformance/benchmarks/__init__.py +++ b/pyperformance/benchmarks/__init__.py @@ -2,8 +2,10 @@ from .. import __version__ from .. import _benchmarks, benchmark as _benchmark +from . import _manifest + +# an alias (but also used here) from ._parse import parse_benchmarks -from . import manifest as _manifest DEFAULTS_DIR = os.path.join( diff --git a/pyperformance/benchmarks/manifest.py b/pyperformance/benchmarks/_manifest.py similarity index 100% rename from pyperformance/benchmarks/manifest.py rename to pyperformance/benchmarks/_manifest.py From 32e15ecb178e6e15d64bf05c8e19aaaad1b58fef Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 22 Jun 2021 17:11:50 -0600 Subject: [PATCH 008/126] Make the utils a package. --- pyperformance/_utils/__init__.py | 9 +++++++++ pyperformance/{utils.py => _utils/_fs.py} | 4 ---- pyperformance/_utils/_platform.py | 5 +++++ pyperformance/compile.py | 2 +- pyperformance/run.py | 2 +- 5 files changed, 16 insertions(+), 6 deletions(-) create mode 100644 pyperformance/_utils/__init__.py rename pyperformance/{utils.py => _utils/_fs.py} (86%) create mode 100644 pyperformance/_utils/_platform.py diff --git a/pyperformance/_utils/__init__.py b/pyperformance/_utils/__init__.py new file mode 100644 index 00000000..b6081744 --- /dev/null +++ b/pyperformance/_utils/__init__.py @@ -0,0 +1,9 @@ + +######### +# aliases +from ._fs import ( + temporary_file, +) +from ._platform import ( + MS_WINDOWS, +) diff --git a/pyperformance/utils.py b/pyperformance/_utils/_fs.py similarity index 86% rename from pyperformance/utils.py rename to pyperformance/_utils/_fs.py index 0d6abdbc..3ef507f8 100644 --- a/pyperformance/utils.py +++ b/pyperformance/_utils/_fs.py @@ -1,13 +1,9 @@ import contextlib import errno import os -import sys import tempfile -MS_WINDOWS = (sys.platform == 'win32') - - @contextlib.contextmanager def temporary_file(): tmp_filename = tempfile.mktemp() diff --git a/pyperformance/_utils/_platform.py b/pyperformance/_utils/_platform.py new file mode 100644 index 00000000..e1b4390c --- /dev/null +++ b/pyperformance/_utils/_platform.py @@ -0,0 +1,5 @@ +import sys + + +MS_WINDOWS = (sys.platform == 'win32') + diff --git a/pyperformance/compile.py b/pyperformance/compile.py index 34ea46ca..46fd15eb 100644 --- a/pyperformance/compile.py +++ b/pyperformance/compile.py @@ -18,7 +18,7 @@ from urllib.request import urlopen import pyperformance -from pyperformance.utils import MS_WINDOWS +from pyperformance._utils import MS_WINDOWS from pyperformance.venv import (GET_PIP_URL, REQ_OLD_PIP, PERFORMANCE_ROOT, download, is_build_dir) diff --git a/pyperformance/run.py b/pyperformance/run.py index 37e7aca3..295f50b1 100644 --- a/pyperformance/run.py +++ b/pyperformance/run.py @@ -11,7 +11,7 @@ import pyperf import pyperformance -from pyperformance.utils import temporary_file +from pyperformance._utils import temporary_file from pyperformance.venv import PERFORMANCE_ROOT From b041e2e181835a3cdb07d7d50bba726baa8790e6 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Wed, 23 Jun 2021 10:58:24 -0600 Subject: [PATCH 009/126] Move each of the default benchmarks into its own directory. --- .../_benchmarks/{ => bm_2to3}/data/2to3/README.txt | 0 .../{ => bm_2to3}/data/2to3/__init__.py.txt | 0 .../data/2to3/context_processors.py.txt | 0 .../{ => bm_2to3}/data/2to3/exceptions.py.txt | 0 .../_benchmarks/{ => bm_2to3}/data/2to3/mail.py.txt | 0 .../{ => bm_2to3}/data/2to3/paginator.py.txt | 0 .../{ => bm_2to3}/data/2to3/signals.py.txt | 0 .../{ => bm_2to3}/data/2to3/template_loader.py.txt | 0 .../{ => bm_2to3}/data/2to3/urlresolvers.py.txt | 0 .../{ => bm_2to3}/data/2to3/xheaders.py.txt | 0 .../_benchmarks/{bm_2to3.py => bm_2to3/run.py} | 0 .../{bm_chameleon.py => bm_chameleon/run.py} | 0 .../_benchmarks/{bm_chaos.py => bm_chaos/run.py} | 0 .../{bm_crypto_pyaes.py => bm_crypto_pyaes/run.py} | 0 .../{bm_deltablue.py => bm_deltablue/run.py} | 0 .../run.py} | 0 .../data/asyncio.git/COMMIT_EDITMSG | 0 .../data/asyncio.git/FETCH_HEAD | 0 .../{ => bm_dulwich_log}/data/asyncio.git/HEAD | 0 .../{ => bm_dulwich_log}/data/asyncio.git/ORIG_HEAD | 0 .../{ => bm_dulwich_log}/data/asyncio.git/config | 0 .../data/asyncio.git/description | 0 .../data/asyncio.git/hooks/applypatch-msg.sample | 0 .../data/asyncio.git/hooks/commit-msg.sample | 0 .../data/asyncio.git/hooks/post-update.sample | 0 .../data/asyncio.git/hooks/pre-applypatch.sample | 0 .../data/asyncio.git/hooks/pre-commit.sample | 0 .../data/asyncio.git/hooks/pre-push.sample | 0 .../data/asyncio.git/hooks/pre-rebase.sample | 0 .../asyncio.git/hooks/prepare-commit-msg.sample | 0 .../data/asyncio.git/hooks/update.sample | 0 .../{ => bm_dulwich_log}/data/asyncio.git/index | Bin .../data/asyncio.git/info/exclude | 0 .../{ => bm_dulwich_log}/data/asyncio.git/info/refs | 0 .../{ => bm_dulwich_log}/data/asyncio.git/logs/HEAD | 0 .../data/asyncio.git/logs/refs/heads/master | 0 .../data/asyncio.git/logs/refs/remotes/origin/HEAD | 0 .../logs/refs/remotes/origin/bind_modules | 0 .../asyncio.git/logs/refs/remotes/origin/master | 0 .../logs/refs/remotes/origin/zero_timeout | 0 .../data/asyncio.git/objects/info/packs | 0 ...ack-7e1b1ace85030071ca314cd565ae038bacc302a4.idx | Bin ...ck-7e1b1ace85030071ca314cd565ae038bacc302a4.pack | Bin .../data/asyncio.git/packed-refs | 0 .../data/asyncio.git/refs/remotes/origin/HEAD | 0 .../{bm_dulwich_log.py => bm_dulwich_log/run.py} | 0 .../{bm_fannkuch.py => bm_fannkuch/run.py} | 0 .../_benchmarks/{bm_float.py => bm_float/run.py} | 0 .../_benchmarks/{bm_genshi.py => bm_genshi/run.py} | 0 .../_benchmarks/{bm_go.py => bm_go/run.py} | 0 .../_benchmarks/{bm_hexiom.py => bm_hexiom/run.py} | 0 .../{bm_hg_startup.py => bm_hg_startup/run.py} | 0 .../{ => bm_html5lib}/data/w3_tr_html5.html | 0 .../{bm_html5lib.py => bm_html5lib/run.py} | 0 .../{bm_json_dumps.py => bm_json_dumps/run.py} | 0 .../{bm_json_loads.py => bm_json_loads/run.py} | 0 .../{bm_logging.py => bm_logging/run.py} | 0 .../_benchmarks/{bm_mako.py => bm_mako/run.py} | 0 .../_benchmarks/{bm_mdp.py => bm_mdp/run.py} | 0 .../run.py} | 0 .../_benchmarks/{bm_nbody.py => bm_nbody/run.py} | 0 .../{bm_nqueens.py => bm_nqueens/run.py} | 0 .../{bm_pathlib.py => bm_pathlib/run.py} | 0 .../_benchmarks/{bm_pickle.py => bm_pickle/run.py} | 0 .../{bm_pidigits.py => bm_pidigits/run.py} | 0 .../{ => bm_pyflate}/data/interpreter.tar.bz2 | Bin .../{bm_pyflate.py => bm_pyflate/run.py} | 0 .../_benchmarks/bm_pyston_aiohttp_requirements.txt | 4 ++++ .../bm_pyston_djangocms_requirements.txt | 9 +++++++++ .../bm_pyston_flaskblogging_requirements.txt | 5 +++++ .../bm_pyston_gevent_bench_hu_requirements.txt | 2 ++ .../_benchmarks/bm_pyston_gunicor_requirements.txt | 5 +++++ .../_benchmarks/bm_pyston_mypy_requirements.txt | 1 + .../bm_pyston_pycparser_requirements.txt | 1 + .../_benchmarks/bm_pyston_pylint_requirements.txt | 1 + ...bm_pyston_pytorch_alexnet_infer_requirements.txt | 2 ++ .../_benchmarks/bm_pyston_thrift_requirements.txt | 1 + .../run.py} | 0 .../{bm_raytrace.py => bm_raytrace/run.py} | 0 .../run.py} | 0 .../{bm_regex_dna.py => bm_regex_dna/run.py} | 0 .../{bm_regex_effbot.py => bm_regex_effbot/run.py} | 0 .../{bm_regex_v8.py => bm_regex_v8/run.py} | 0 .../{bm_richards.py => bm_richards/run.py} | 0 .../{bm_scimark.py => bm_scimark/run.py} | 0 .../run.py} | 0 .../run.py} | 0 .../run.py} | 0 .../{bm_sqlite_synth.py => bm_sqlite_synth/run.py} | 0 .../_benchmarks/{bm_sympy.py => bm_sympy/run.py} | 0 .../_benchmarks/{ => bm_telco}/data/telco-bench.b | Bin .../_benchmarks/{bm_telco.py => bm_telco/run.py} | 0 .../{bm_tornado_http.py => bm_tornado_http/run.py} | 0 .../run.py} | 0 .../{bm_xml_etree.py => bm_xml_etree/run.py} | 0 pyperformance/run.py | 2 +- 96 files changed, 32 insertions(+), 1 deletion(-) rename pyperformance/_benchmarks/{ => bm_2to3}/data/2to3/README.txt (100%) rename pyperformance/_benchmarks/{ => bm_2to3}/data/2to3/__init__.py.txt (100%) rename pyperformance/_benchmarks/{ => bm_2to3}/data/2to3/context_processors.py.txt (100%) rename pyperformance/_benchmarks/{ => bm_2to3}/data/2to3/exceptions.py.txt (100%) rename pyperformance/_benchmarks/{ => bm_2to3}/data/2to3/mail.py.txt (100%) rename pyperformance/_benchmarks/{ => bm_2to3}/data/2to3/paginator.py.txt (100%) rename pyperformance/_benchmarks/{ => bm_2to3}/data/2to3/signals.py.txt (100%) rename pyperformance/_benchmarks/{ => bm_2to3}/data/2to3/template_loader.py.txt (100%) rename pyperformance/_benchmarks/{ => bm_2to3}/data/2to3/urlresolvers.py.txt (100%) rename pyperformance/_benchmarks/{ => bm_2to3}/data/2to3/xheaders.py.txt (100%) rename pyperformance/_benchmarks/{bm_2to3.py => bm_2to3/run.py} (100%) rename pyperformance/_benchmarks/{bm_chameleon.py => bm_chameleon/run.py} (100%) rename pyperformance/_benchmarks/{bm_chaos.py => bm_chaos/run.py} (100%) rename pyperformance/_benchmarks/{bm_crypto_pyaes.py => bm_crypto_pyaes/run.py} (100%) rename pyperformance/_benchmarks/{bm_deltablue.py => bm_deltablue/run.py} (100%) rename pyperformance/_benchmarks/{bm_django_template.py => bm_django_template/run.py} (100%) rename pyperformance/_benchmarks/{ => bm_dulwich_log}/data/asyncio.git/COMMIT_EDITMSG (100%) rename pyperformance/_benchmarks/{ => bm_dulwich_log}/data/asyncio.git/FETCH_HEAD (100%) rename pyperformance/_benchmarks/{ => bm_dulwich_log}/data/asyncio.git/HEAD (100%) rename pyperformance/_benchmarks/{ => bm_dulwich_log}/data/asyncio.git/ORIG_HEAD (100%) rename pyperformance/_benchmarks/{ => bm_dulwich_log}/data/asyncio.git/config (100%) rename pyperformance/_benchmarks/{ => bm_dulwich_log}/data/asyncio.git/description (100%) rename pyperformance/_benchmarks/{ => bm_dulwich_log}/data/asyncio.git/hooks/applypatch-msg.sample (100%) rename pyperformance/_benchmarks/{ => bm_dulwich_log}/data/asyncio.git/hooks/commit-msg.sample (100%) rename pyperformance/_benchmarks/{ => bm_dulwich_log}/data/asyncio.git/hooks/post-update.sample (100%) rename pyperformance/_benchmarks/{ => bm_dulwich_log}/data/asyncio.git/hooks/pre-applypatch.sample (100%) rename pyperformance/_benchmarks/{ => bm_dulwich_log}/data/asyncio.git/hooks/pre-commit.sample (100%) rename pyperformance/_benchmarks/{ => bm_dulwich_log}/data/asyncio.git/hooks/pre-push.sample (100%) rename pyperformance/_benchmarks/{ => bm_dulwich_log}/data/asyncio.git/hooks/pre-rebase.sample (100%) rename pyperformance/_benchmarks/{ => bm_dulwich_log}/data/asyncio.git/hooks/prepare-commit-msg.sample (100%) rename pyperformance/_benchmarks/{ => bm_dulwich_log}/data/asyncio.git/hooks/update.sample (100%) rename pyperformance/_benchmarks/{ => bm_dulwich_log}/data/asyncio.git/index (100%) rename pyperformance/_benchmarks/{ => bm_dulwich_log}/data/asyncio.git/info/exclude (100%) rename pyperformance/_benchmarks/{ => bm_dulwich_log}/data/asyncio.git/info/refs (100%) rename pyperformance/_benchmarks/{ => bm_dulwich_log}/data/asyncio.git/logs/HEAD (100%) rename pyperformance/_benchmarks/{ => bm_dulwich_log}/data/asyncio.git/logs/refs/heads/master (100%) rename pyperformance/_benchmarks/{ => bm_dulwich_log}/data/asyncio.git/logs/refs/remotes/origin/HEAD (100%) rename pyperformance/_benchmarks/{ => bm_dulwich_log}/data/asyncio.git/logs/refs/remotes/origin/bind_modules (100%) rename pyperformance/_benchmarks/{ => bm_dulwich_log}/data/asyncio.git/logs/refs/remotes/origin/master (100%) rename pyperformance/_benchmarks/{ => bm_dulwich_log}/data/asyncio.git/logs/refs/remotes/origin/zero_timeout (100%) rename pyperformance/_benchmarks/{ => bm_dulwich_log}/data/asyncio.git/objects/info/packs (100%) rename pyperformance/_benchmarks/{ => bm_dulwich_log}/data/asyncio.git/objects/pack/pack-7e1b1ace85030071ca314cd565ae038bacc302a4.idx (100%) rename pyperformance/_benchmarks/{ => bm_dulwich_log}/data/asyncio.git/objects/pack/pack-7e1b1ace85030071ca314cd565ae038bacc302a4.pack (100%) rename pyperformance/_benchmarks/{ => bm_dulwich_log}/data/asyncio.git/packed-refs (100%) rename pyperformance/_benchmarks/{ => bm_dulwich_log}/data/asyncio.git/refs/remotes/origin/HEAD (100%) rename pyperformance/_benchmarks/{bm_dulwich_log.py => bm_dulwich_log/run.py} (100%) rename pyperformance/_benchmarks/{bm_fannkuch.py => bm_fannkuch/run.py} (100%) rename pyperformance/_benchmarks/{bm_float.py => bm_float/run.py} (100%) rename pyperformance/_benchmarks/{bm_genshi.py => bm_genshi/run.py} (100%) rename pyperformance/_benchmarks/{bm_go.py => bm_go/run.py} (100%) rename pyperformance/_benchmarks/{bm_hexiom.py => bm_hexiom/run.py} (100%) rename pyperformance/_benchmarks/{bm_hg_startup.py => bm_hg_startup/run.py} (100%) rename pyperformance/_benchmarks/{ => bm_html5lib}/data/w3_tr_html5.html (100%) rename pyperformance/_benchmarks/{bm_html5lib.py => bm_html5lib/run.py} (100%) rename pyperformance/_benchmarks/{bm_json_dumps.py => bm_json_dumps/run.py} (100%) rename pyperformance/_benchmarks/{bm_json_loads.py => bm_json_loads/run.py} (100%) rename pyperformance/_benchmarks/{bm_logging.py => bm_logging/run.py} (100%) rename pyperformance/_benchmarks/{bm_mako.py => bm_mako/run.py} (100%) rename pyperformance/_benchmarks/{bm_mdp.py => bm_mdp/run.py} (100%) rename pyperformance/_benchmarks/{bm_meteor_contest.py => bm_meteor_contest/run.py} (100%) rename pyperformance/_benchmarks/{bm_nbody.py => bm_nbody/run.py} (100%) rename pyperformance/_benchmarks/{bm_nqueens.py => bm_nqueens/run.py} (100%) rename pyperformance/_benchmarks/{bm_pathlib.py => bm_pathlib/run.py} (100%) rename pyperformance/_benchmarks/{bm_pickle.py => bm_pickle/run.py} (100%) rename pyperformance/_benchmarks/{bm_pidigits.py => bm_pidigits/run.py} (100%) rename pyperformance/_benchmarks/{ => bm_pyflate}/data/interpreter.tar.bz2 (100%) rename pyperformance/_benchmarks/{bm_pyflate.py => bm_pyflate/run.py} (100%) create mode 100644 pyperformance/_benchmarks/bm_pyston_aiohttp_requirements.txt create mode 100644 pyperformance/_benchmarks/bm_pyston_djangocms_requirements.txt create mode 100644 pyperformance/_benchmarks/bm_pyston_flaskblogging_requirements.txt create mode 100644 pyperformance/_benchmarks/bm_pyston_gevent_bench_hu_requirements.txt create mode 100644 pyperformance/_benchmarks/bm_pyston_gunicor_requirements.txt create mode 100644 pyperformance/_benchmarks/bm_pyston_mypy_requirements.txt create mode 100644 pyperformance/_benchmarks/bm_pyston_pycparser_requirements.txt create mode 100644 pyperformance/_benchmarks/bm_pyston_pylint_requirements.txt create mode 100644 pyperformance/_benchmarks/bm_pyston_pytorch_alexnet_infer_requirements.txt create mode 100644 pyperformance/_benchmarks/bm_pyston_thrift_requirements.txt rename pyperformance/_benchmarks/{bm_python_startup.py => bm_python_startup/run.py} (100%) rename pyperformance/_benchmarks/{bm_raytrace.py => bm_raytrace/run.py} (100%) rename pyperformance/_benchmarks/{bm_regex_compile.py => bm_regex_compile/run.py} (100%) rename pyperformance/_benchmarks/{bm_regex_dna.py => bm_regex_dna/run.py} (100%) rename pyperformance/_benchmarks/{bm_regex_effbot.py => bm_regex_effbot/run.py} (100%) rename pyperformance/_benchmarks/{bm_regex_v8.py => bm_regex_v8/run.py} (100%) rename pyperformance/_benchmarks/{bm_richards.py => bm_richards/run.py} (100%) rename pyperformance/_benchmarks/{bm_scimark.py => bm_scimark/run.py} (100%) rename pyperformance/_benchmarks/{bm_spectral_norm.py => bm_spectral_norm/run.py} (100%) rename pyperformance/_benchmarks/{bm_sqlalchemy_declarative.py => bm_sqlalchemy_declarative/run.py} (100%) rename pyperformance/_benchmarks/{bm_sqlalchemy_imperative.py => bm_sqlalchemy_imperative/run.py} (100%) rename pyperformance/_benchmarks/{bm_sqlite_synth.py => bm_sqlite_synth/run.py} (100%) rename pyperformance/_benchmarks/{bm_sympy.py => bm_sympy/run.py} (100%) rename pyperformance/_benchmarks/{ => bm_telco}/data/telco-bench.b (100%) rename pyperformance/_benchmarks/{bm_telco.py => bm_telco/run.py} (100%) rename pyperformance/_benchmarks/{bm_tornado_http.py => bm_tornado_http/run.py} (100%) rename pyperformance/_benchmarks/{bm_unpack_sequence.py => bm_unpack_sequence/run.py} (100%) rename pyperformance/_benchmarks/{bm_xml_etree.py => bm_xml_etree/run.py} (100%) diff --git a/pyperformance/_benchmarks/data/2to3/README.txt b/pyperformance/_benchmarks/bm_2to3/data/2to3/README.txt similarity index 100% rename from pyperformance/_benchmarks/data/2to3/README.txt rename to pyperformance/_benchmarks/bm_2to3/data/2to3/README.txt diff --git a/pyperformance/_benchmarks/data/2to3/__init__.py.txt b/pyperformance/_benchmarks/bm_2to3/data/2to3/__init__.py.txt similarity index 100% rename from pyperformance/_benchmarks/data/2to3/__init__.py.txt rename to pyperformance/_benchmarks/bm_2to3/data/2to3/__init__.py.txt diff --git a/pyperformance/_benchmarks/data/2to3/context_processors.py.txt b/pyperformance/_benchmarks/bm_2to3/data/2to3/context_processors.py.txt similarity index 100% rename from pyperformance/_benchmarks/data/2to3/context_processors.py.txt rename to pyperformance/_benchmarks/bm_2to3/data/2to3/context_processors.py.txt diff --git a/pyperformance/_benchmarks/data/2to3/exceptions.py.txt b/pyperformance/_benchmarks/bm_2to3/data/2to3/exceptions.py.txt similarity index 100% rename from pyperformance/_benchmarks/data/2to3/exceptions.py.txt rename to pyperformance/_benchmarks/bm_2to3/data/2to3/exceptions.py.txt diff --git a/pyperformance/_benchmarks/data/2to3/mail.py.txt b/pyperformance/_benchmarks/bm_2to3/data/2to3/mail.py.txt similarity index 100% rename from pyperformance/_benchmarks/data/2to3/mail.py.txt rename to pyperformance/_benchmarks/bm_2to3/data/2to3/mail.py.txt diff --git a/pyperformance/_benchmarks/data/2to3/paginator.py.txt b/pyperformance/_benchmarks/bm_2to3/data/2to3/paginator.py.txt similarity index 100% rename from pyperformance/_benchmarks/data/2to3/paginator.py.txt rename to pyperformance/_benchmarks/bm_2to3/data/2to3/paginator.py.txt diff --git a/pyperformance/_benchmarks/data/2to3/signals.py.txt b/pyperformance/_benchmarks/bm_2to3/data/2to3/signals.py.txt similarity index 100% rename from pyperformance/_benchmarks/data/2to3/signals.py.txt rename to pyperformance/_benchmarks/bm_2to3/data/2to3/signals.py.txt diff --git a/pyperformance/_benchmarks/data/2to3/template_loader.py.txt b/pyperformance/_benchmarks/bm_2to3/data/2to3/template_loader.py.txt similarity index 100% rename from pyperformance/_benchmarks/data/2to3/template_loader.py.txt rename to pyperformance/_benchmarks/bm_2to3/data/2to3/template_loader.py.txt diff --git a/pyperformance/_benchmarks/data/2to3/urlresolvers.py.txt b/pyperformance/_benchmarks/bm_2to3/data/2to3/urlresolvers.py.txt similarity index 100% rename from pyperformance/_benchmarks/data/2to3/urlresolvers.py.txt rename to pyperformance/_benchmarks/bm_2to3/data/2to3/urlresolvers.py.txt diff --git a/pyperformance/_benchmarks/data/2to3/xheaders.py.txt b/pyperformance/_benchmarks/bm_2to3/data/2to3/xheaders.py.txt similarity index 100% rename from pyperformance/_benchmarks/data/2to3/xheaders.py.txt rename to pyperformance/_benchmarks/bm_2to3/data/2to3/xheaders.py.txt diff --git a/pyperformance/_benchmarks/bm_2to3.py b/pyperformance/_benchmarks/bm_2to3/run.py similarity index 100% rename from pyperformance/_benchmarks/bm_2to3.py rename to pyperformance/_benchmarks/bm_2to3/run.py diff --git a/pyperformance/_benchmarks/bm_chameleon.py b/pyperformance/_benchmarks/bm_chameleon/run.py similarity index 100% rename from pyperformance/_benchmarks/bm_chameleon.py rename to pyperformance/_benchmarks/bm_chameleon/run.py diff --git a/pyperformance/_benchmarks/bm_chaos.py b/pyperformance/_benchmarks/bm_chaos/run.py similarity index 100% rename from pyperformance/_benchmarks/bm_chaos.py rename to pyperformance/_benchmarks/bm_chaos/run.py diff --git a/pyperformance/_benchmarks/bm_crypto_pyaes.py b/pyperformance/_benchmarks/bm_crypto_pyaes/run.py similarity index 100% rename from pyperformance/_benchmarks/bm_crypto_pyaes.py rename to pyperformance/_benchmarks/bm_crypto_pyaes/run.py diff --git a/pyperformance/_benchmarks/bm_deltablue.py b/pyperformance/_benchmarks/bm_deltablue/run.py similarity index 100% rename from pyperformance/_benchmarks/bm_deltablue.py rename to pyperformance/_benchmarks/bm_deltablue/run.py diff --git a/pyperformance/_benchmarks/bm_django_template.py b/pyperformance/_benchmarks/bm_django_template/run.py similarity index 100% rename from pyperformance/_benchmarks/bm_django_template.py rename to pyperformance/_benchmarks/bm_django_template/run.py diff --git a/pyperformance/_benchmarks/data/asyncio.git/COMMIT_EDITMSG b/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/COMMIT_EDITMSG similarity index 100% rename from pyperformance/_benchmarks/data/asyncio.git/COMMIT_EDITMSG rename to pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/COMMIT_EDITMSG diff --git a/pyperformance/_benchmarks/data/asyncio.git/FETCH_HEAD b/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/FETCH_HEAD similarity index 100% rename from pyperformance/_benchmarks/data/asyncio.git/FETCH_HEAD rename to pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/FETCH_HEAD diff --git a/pyperformance/_benchmarks/data/asyncio.git/HEAD b/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/HEAD similarity index 100% rename from pyperformance/_benchmarks/data/asyncio.git/HEAD rename to pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/HEAD diff --git a/pyperformance/_benchmarks/data/asyncio.git/ORIG_HEAD b/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/ORIG_HEAD similarity index 100% rename from pyperformance/_benchmarks/data/asyncio.git/ORIG_HEAD rename to pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/ORIG_HEAD diff --git a/pyperformance/_benchmarks/data/asyncio.git/config b/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/config similarity index 100% rename from pyperformance/_benchmarks/data/asyncio.git/config rename to pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/config diff --git a/pyperformance/_benchmarks/data/asyncio.git/description b/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/description similarity index 100% rename from pyperformance/_benchmarks/data/asyncio.git/description rename to pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/description diff --git a/pyperformance/_benchmarks/data/asyncio.git/hooks/applypatch-msg.sample b/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/hooks/applypatch-msg.sample similarity index 100% rename from pyperformance/_benchmarks/data/asyncio.git/hooks/applypatch-msg.sample rename to pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/hooks/applypatch-msg.sample diff --git a/pyperformance/_benchmarks/data/asyncio.git/hooks/commit-msg.sample b/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/hooks/commit-msg.sample similarity index 100% rename from pyperformance/_benchmarks/data/asyncio.git/hooks/commit-msg.sample rename to pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/hooks/commit-msg.sample diff --git a/pyperformance/_benchmarks/data/asyncio.git/hooks/post-update.sample b/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/hooks/post-update.sample similarity index 100% rename from pyperformance/_benchmarks/data/asyncio.git/hooks/post-update.sample rename to pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/hooks/post-update.sample diff --git a/pyperformance/_benchmarks/data/asyncio.git/hooks/pre-applypatch.sample b/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/hooks/pre-applypatch.sample similarity index 100% rename from pyperformance/_benchmarks/data/asyncio.git/hooks/pre-applypatch.sample rename to pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/hooks/pre-applypatch.sample diff --git a/pyperformance/_benchmarks/data/asyncio.git/hooks/pre-commit.sample b/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/hooks/pre-commit.sample similarity index 100% rename from pyperformance/_benchmarks/data/asyncio.git/hooks/pre-commit.sample rename to pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/hooks/pre-commit.sample diff --git a/pyperformance/_benchmarks/data/asyncio.git/hooks/pre-push.sample b/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/hooks/pre-push.sample similarity index 100% rename from pyperformance/_benchmarks/data/asyncio.git/hooks/pre-push.sample rename to pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/hooks/pre-push.sample diff --git a/pyperformance/_benchmarks/data/asyncio.git/hooks/pre-rebase.sample b/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/hooks/pre-rebase.sample similarity index 100% rename from pyperformance/_benchmarks/data/asyncio.git/hooks/pre-rebase.sample rename to pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/hooks/pre-rebase.sample diff --git a/pyperformance/_benchmarks/data/asyncio.git/hooks/prepare-commit-msg.sample b/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/hooks/prepare-commit-msg.sample similarity index 100% rename from pyperformance/_benchmarks/data/asyncio.git/hooks/prepare-commit-msg.sample rename to pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/hooks/prepare-commit-msg.sample diff --git a/pyperformance/_benchmarks/data/asyncio.git/hooks/update.sample b/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/hooks/update.sample similarity index 100% rename from pyperformance/_benchmarks/data/asyncio.git/hooks/update.sample rename to pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/hooks/update.sample diff --git a/pyperformance/_benchmarks/data/asyncio.git/index b/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/index similarity index 100% rename from pyperformance/_benchmarks/data/asyncio.git/index rename to pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/index diff --git a/pyperformance/_benchmarks/data/asyncio.git/info/exclude b/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/info/exclude similarity index 100% rename from pyperformance/_benchmarks/data/asyncio.git/info/exclude rename to pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/info/exclude diff --git a/pyperformance/_benchmarks/data/asyncio.git/info/refs b/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/info/refs similarity index 100% rename from pyperformance/_benchmarks/data/asyncio.git/info/refs rename to pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/info/refs diff --git a/pyperformance/_benchmarks/data/asyncio.git/logs/HEAD b/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/logs/HEAD similarity index 100% rename from pyperformance/_benchmarks/data/asyncio.git/logs/HEAD rename to pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/logs/HEAD diff --git a/pyperformance/_benchmarks/data/asyncio.git/logs/refs/heads/master b/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/logs/refs/heads/master similarity index 100% rename from pyperformance/_benchmarks/data/asyncio.git/logs/refs/heads/master rename to pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/logs/refs/heads/master diff --git a/pyperformance/_benchmarks/data/asyncio.git/logs/refs/remotes/origin/HEAD b/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/logs/refs/remotes/origin/HEAD similarity index 100% rename from pyperformance/_benchmarks/data/asyncio.git/logs/refs/remotes/origin/HEAD rename to pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/logs/refs/remotes/origin/HEAD diff --git a/pyperformance/_benchmarks/data/asyncio.git/logs/refs/remotes/origin/bind_modules b/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/logs/refs/remotes/origin/bind_modules similarity index 100% rename from pyperformance/_benchmarks/data/asyncio.git/logs/refs/remotes/origin/bind_modules rename to pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/logs/refs/remotes/origin/bind_modules diff --git a/pyperformance/_benchmarks/data/asyncio.git/logs/refs/remotes/origin/master b/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/logs/refs/remotes/origin/master similarity index 100% rename from pyperformance/_benchmarks/data/asyncio.git/logs/refs/remotes/origin/master rename to pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/logs/refs/remotes/origin/master diff --git a/pyperformance/_benchmarks/data/asyncio.git/logs/refs/remotes/origin/zero_timeout b/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/logs/refs/remotes/origin/zero_timeout similarity index 100% rename from pyperformance/_benchmarks/data/asyncio.git/logs/refs/remotes/origin/zero_timeout rename to pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/logs/refs/remotes/origin/zero_timeout diff --git a/pyperformance/_benchmarks/data/asyncio.git/objects/info/packs b/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/objects/info/packs similarity index 100% rename from pyperformance/_benchmarks/data/asyncio.git/objects/info/packs rename to pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/objects/info/packs diff --git a/pyperformance/_benchmarks/data/asyncio.git/objects/pack/pack-7e1b1ace85030071ca314cd565ae038bacc302a4.idx b/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/objects/pack/pack-7e1b1ace85030071ca314cd565ae038bacc302a4.idx similarity index 100% rename from pyperformance/_benchmarks/data/asyncio.git/objects/pack/pack-7e1b1ace85030071ca314cd565ae038bacc302a4.idx rename to pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/objects/pack/pack-7e1b1ace85030071ca314cd565ae038bacc302a4.idx diff --git a/pyperformance/_benchmarks/data/asyncio.git/objects/pack/pack-7e1b1ace85030071ca314cd565ae038bacc302a4.pack b/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/objects/pack/pack-7e1b1ace85030071ca314cd565ae038bacc302a4.pack similarity index 100% rename from pyperformance/_benchmarks/data/asyncio.git/objects/pack/pack-7e1b1ace85030071ca314cd565ae038bacc302a4.pack rename to pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/objects/pack/pack-7e1b1ace85030071ca314cd565ae038bacc302a4.pack diff --git a/pyperformance/_benchmarks/data/asyncio.git/packed-refs b/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/packed-refs similarity index 100% rename from pyperformance/_benchmarks/data/asyncio.git/packed-refs rename to pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/packed-refs diff --git a/pyperformance/_benchmarks/data/asyncio.git/refs/remotes/origin/HEAD b/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/refs/remotes/origin/HEAD similarity index 100% rename from pyperformance/_benchmarks/data/asyncio.git/refs/remotes/origin/HEAD rename to pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/refs/remotes/origin/HEAD diff --git a/pyperformance/_benchmarks/bm_dulwich_log.py b/pyperformance/_benchmarks/bm_dulwich_log/run.py similarity index 100% rename from pyperformance/_benchmarks/bm_dulwich_log.py rename to pyperformance/_benchmarks/bm_dulwich_log/run.py diff --git a/pyperformance/_benchmarks/bm_fannkuch.py b/pyperformance/_benchmarks/bm_fannkuch/run.py similarity index 100% rename from pyperformance/_benchmarks/bm_fannkuch.py rename to pyperformance/_benchmarks/bm_fannkuch/run.py diff --git a/pyperformance/_benchmarks/bm_float.py b/pyperformance/_benchmarks/bm_float/run.py similarity index 100% rename from pyperformance/_benchmarks/bm_float.py rename to pyperformance/_benchmarks/bm_float/run.py diff --git a/pyperformance/_benchmarks/bm_genshi.py b/pyperformance/_benchmarks/bm_genshi/run.py similarity index 100% rename from pyperformance/_benchmarks/bm_genshi.py rename to pyperformance/_benchmarks/bm_genshi/run.py diff --git a/pyperformance/_benchmarks/bm_go.py b/pyperformance/_benchmarks/bm_go/run.py similarity index 100% rename from pyperformance/_benchmarks/bm_go.py rename to pyperformance/_benchmarks/bm_go/run.py diff --git a/pyperformance/_benchmarks/bm_hexiom.py b/pyperformance/_benchmarks/bm_hexiom/run.py similarity index 100% rename from pyperformance/_benchmarks/bm_hexiom.py rename to pyperformance/_benchmarks/bm_hexiom/run.py diff --git a/pyperformance/_benchmarks/bm_hg_startup.py b/pyperformance/_benchmarks/bm_hg_startup/run.py similarity index 100% rename from pyperformance/_benchmarks/bm_hg_startup.py rename to pyperformance/_benchmarks/bm_hg_startup/run.py diff --git a/pyperformance/_benchmarks/data/w3_tr_html5.html b/pyperformance/_benchmarks/bm_html5lib/data/w3_tr_html5.html similarity index 100% rename from pyperformance/_benchmarks/data/w3_tr_html5.html rename to pyperformance/_benchmarks/bm_html5lib/data/w3_tr_html5.html diff --git a/pyperformance/_benchmarks/bm_html5lib.py b/pyperformance/_benchmarks/bm_html5lib/run.py similarity index 100% rename from pyperformance/_benchmarks/bm_html5lib.py rename to pyperformance/_benchmarks/bm_html5lib/run.py diff --git a/pyperformance/_benchmarks/bm_json_dumps.py b/pyperformance/_benchmarks/bm_json_dumps/run.py similarity index 100% rename from pyperformance/_benchmarks/bm_json_dumps.py rename to pyperformance/_benchmarks/bm_json_dumps/run.py diff --git a/pyperformance/_benchmarks/bm_json_loads.py b/pyperformance/_benchmarks/bm_json_loads/run.py similarity index 100% rename from pyperformance/_benchmarks/bm_json_loads.py rename to pyperformance/_benchmarks/bm_json_loads/run.py diff --git a/pyperformance/_benchmarks/bm_logging.py b/pyperformance/_benchmarks/bm_logging/run.py similarity index 100% rename from pyperformance/_benchmarks/bm_logging.py rename to pyperformance/_benchmarks/bm_logging/run.py diff --git a/pyperformance/_benchmarks/bm_mako.py b/pyperformance/_benchmarks/bm_mako/run.py similarity index 100% rename from pyperformance/_benchmarks/bm_mako.py rename to pyperformance/_benchmarks/bm_mako/run.py diff --git a/pyperformance/_benchmarks/bm_mdp.py b/pyperformance/_benchmarks/bm_mdp/run.py similarity index 100% rename from pyperformance/_benchmarks/bm_mdp.py rename to pyperformance/_benchmarks/bm_mdp/run.py diff --git a/pyperformance/_benchmarks/bm_meteor_contest.py b/pyperformance/_benchmarks/bm_meteor_contest/run.py similarity index 100% rename from pyperformance/_benchmarks/bm_meteor_contest.py rename to pyperformance/_benchmarks/bm_meteor_contest/run.py diff --git a/pyperformance/_benchmarks/bm_nbody.py b/pyperformance/_benchmarks/bm_nbody/run.py similarity index 100% rename from pyperformance/_benchmarks/bm_nbody.py rename to pyperformance/_benchmarks/bm_nbody/run.py diff --git a/pyperformance/_benchmarks/bm_nqueens.py b/pyperformance/_benchmarks/bm_nqueens/run.py similarity index 100% rename from pyperformance/_benchmarks/bm_nqueens.py rename to pyperformance/_benchmarks/bm_nqueens/run.py diff --git a/pyperformance/_benchmarks/bm_pathlib.py b/pyperformance/_benchmarks/bm_pathlib/run.py similarity index 100% rename from pyperformance/_benchmarks/bm_pathlib.py rename to pyperformance/_benchmarks/bm_pathlib/run.py diff --git a/pyperformance/_benchmarks/bm_pickle.py b/pyperformance/_benchmarks/bm_pickle/run.py similarity index 100% rename from pyperformance/_benchmarks/bm_pickle.py rename to pyperformance/_benchmarks/bm_pickle/run.py diff --git a/pyperformance/_benchmarks/bm_pidigits.py b/pyperformance/_benchmarks/bm_pidigits/run.py similarity index 100% rename from pyperformance/_benchmarks/bm_pidigits.py rename to pyperformance/_benchmarks/bm_pidigits/run.py diff --git a/pyperformance/_benchmarks/data/interpreter.tar.bz2 b/pyperformance/_benchmarks/bm_pyflate/data/interpreter.tar.bz2 similarity index 100% rename from pyperformance/_benchmarks/data/interpreter.tar.bz2 rename to pyperformance/_benchmarks/bm_pyflate/data/interpreter.tar.bz2 diff --git a/pyperformance/_benchmarks/bm_pyflate.py b/pyperformance/_benchmarks/bm_pyflate/run.py similarity index 100% rename from pyperformance/_benchmarks/bm_pyflate.py rename to pyperformance/_benchmarks/bm_pyflate/run.py diff --git a/pyperformance/_benchmarks/bm_pyston_aiohttp_requirements.txt b/pyperformance/_benchmarks/bm_pyston_aiohttp_requirements.txt new file mode 100644 index 00000000..04470944 --- /dev/null +++ b/pyperformance/_benchmarks/bm_pyston_aiohttp_requirements.txt @@ -0,0 +1,4 @@ +requests +aiohttp +uvloop +django-cms diff --git a/pyperformance/_benchmarks/bm_pyston_djangocms_requirements.txt b/pyperformance/_benchmarks/bm_pyston_djangocms_requirements.txt new file mode 100644 index 00000000..da6f299d --- /dev/null +++ b/pyperformance/_benchmarks/bm_pyston_djangocms_requirements.txt @@ -0,0 +1,9 @@ +requests +django-cms +djangocms-bootstrap4 +djangocms-installer +djangocms-file +djangocms-googlemap +djangocms-snippet +djangocms-style +djangocms-video diff --git a/pyperformance/_benchmarks/bm_pyston_flaskblogging_requirements.txt b/pyperformance/_benchmarks/bm_pyston_flaskblogging_requirements.txt new file mode 100644 index 00000000..15a99c7e --- /dev/null +++ b/pyperformance/_benchmarks/bm_pyston_flaskblogging_requirements.txt @@ -0,0 +1,5 @@ +requests +django-cms +Flask +Flask-Blogging +Flask-Login diff --git a/pyperformance/_benchmarks/bm_pyston_gevent_bench_hu_requirements.txt b/pyperformance/_benchmarks/bm_pyston_gevent_bench_hu_requirements.txt new file mode 100644 index 00000000..6cebbc56 --- /dev/null +++ b/pyperformance/_benchmarks/bm_pyston_gevent_bench_hu_requirements.txt @@ -0,0 +1,2 @@ +gevent +greenlet diff --git a/pyperformance/_benchmarks/bm_pyston_gunicor_requirements.txt b/pyperformance/_benchmarks/bm_pyston_gunicor_requirements.txt new file mode 100644 index 00000000..093ad2c7 --- /dev/null +++ b/pyperformance/_benchmarks/bm_pyston_gunicor_requirements.txt @@ -0,0 +1,5 @@ +requests +gunicorn +aiohttp +uvloop +django-cms diff --git a/pyperformance/_benchmarks/bm_pyston_mypy_requirements.txt b/pyperformance/_benchmarks/bm_pyston_mypy_requirements.txt new file mode 100644 index 00000000..f0aa93ac --- /dev/null +++ b/pyperformance/_benchmarks/bm_pyston_mypy_requirements.txt @@ -0,0 +1 @@ +mypy diff --git a/pyperformance/_benchmarks/bm_pyston_pycparser_requirements.txt b/pyperformance/_benchmarks/bm_pyston_pycparser_requirements.txt new file mode 100644 index 00000000..dc1c9e10 --- /dev/null +++ b/pyperformance/_benchmarks/bm_pyston_pycparser_requirements.txt @@ -0,0 +1 @@ +pycparser diff --git a/pyperformance/_benchmarks/bm_pyston_pylint_requirements.txt b/pyperformance/_benchmarks/bm_pyston_pylint_requirements.txt new file mode 100644 index 00000000..7fb0ea15 --- /dev/null +++ b/pyperformance/_benchmarks/bm_pyston_pylint_requirements.txt @@ -0,0 +1 @@ +pylint diff --git a/pyperformance/_benchmarks/bm_pyston_pytorch_alexnet_infer_requirements.txt b/pyperformance/_benchmarks/bm_pyston_pytorch_alexnet_infer_requirements.txt new file mode 100644 index 00000000..a119e2b5 --- /dev/null +++ b/pyperformance/_benchmarks/bm_pyston_pytorch_alexnet_infer_requirements.txt @@ -0,0 +1,2 @@ +Pillow +torch diff --git a/pyperformance/_benchmarks/bm_pyston_thrift_requirements.txt b/pyperformance/_benchmarks/bm_pyston_thrift_requirements.txt new file mode 100644 index 00000000..5e6e05d8 --- /dev/null +++ b/pyperformance/_benchmarks/bm_pyston_thrift_requirements.txt @@ -0,0 +1 @@ +thrift diff --git a/pyperformance/_benchmarks/bm_python_startup.py b/pyperformance/_benchmarks/bm_python_startup/run.py similarity index 100% rename from pyperformance/_benchmarks/bm_python_startup.py rename to pyperformance/_benchmarks/bm_python_startup/run.py diff --git a/pyperformance/_benchmarks/bm_raytrace.py b/pyperformance/_benchmarks/bm_raytrace/run.py similarity index 100% rename from pyperformance/_benchmarks/bm_raytrace.py rename to pyperformance/_benchmarks/bm_raytrace/run.py diff --git a/pyperformance/_benchmarks/bm_regex_compile.py b/pyperformance/_benchmarks/bm_regex_compile/run.py similarity index 100% rename from pyperformance/_benchmarks/bm_regex_compile.py rename to pyperformance/_benchmarks/bm_regex_compile/run.py diff --git a/pyperformance/_benchmarks/bm_regex_dna.py b/pyperformance/_benchmarks/bm_regex_dna/run.py similarity index 100% rename from pyperformance/_benchmarks/bm_regex_dna.py rename to pyperformance/_benchmarks/bm_regex_dna/run.py diff --git a/pyperformance/_benchmarks/bm_regex_effbot.py b/pyperformance/_benchmarks/bm_regex_effbot/run.py similarity index 100% rename from pyperformance/_benchmarks/bm_regex_effbot.py rename to pyperformance/_benchmarks/bm_regex_effbot/run.py diff --git a/pyperformance/_benchmarks/bm_regex_v8.py b/pyperformance/_benchmarks/bm_regex_v8/run.py similarity index 100% rename from pyperformance/_benchmarks/bm_regex_v8.py rename to pyperformance/_benchmarks/bm_regex_v8/run.py diff --git a/pyperformance/_benchmarks/bm_richards.py b/pyperformance/_benchmarks/bm_richards/run.py similarity index 100% rename from pyperformance/_benchmarks/bm_richards.py rename to pyperformance/_benchmarks/bm_richards/run.py diff --git a/pyperformance/_benchmarks/bm_scimark.py b/pyperformance/_benchmarks/bm_scimark/run.py similarity index 100% rename from pyperformance/_benchmarks/bm_scimark.py rename to pyperformance/_benchmarks/bm_scimark/run.py diff --git a/pyperformance/_benchmarks/bm_spectral_norm.py b/pyperformance/_benchmarks/bm_spectral_norm/run.py similarity index 100% rename from pyperformance/_benchmarks/bm_spectral_norm.py rename to pyperformance/_benchmarks/bm_spectral_norm/run.py diff --git a/pyperformance/_benchmarks/bm_sqlalchemy_declarative.py b/pyperformance/_benchmarks/bm_sqlalchemy_declarative/run.py similarity index 100% rename from pyperformance/_benchmarks/bm_sqlalchemy_declarative.py rename to pyperformance/_benchmarks/bm_sqlalchemy_declarative/run.py diff --git a/pyperformance/_benchmarks/bm_sqlalchemy_imperative.py b/pyperformance/_benchmarks/bm_sqlalchemy_imperative/run.py similarity index 100% rename from pyperformance/_benchmarks/bm_sqlalchemy_imperative.py rename to pyperformance/_benchmarks/bm_sqlalchemy_imperative/run.py diff --git a/pyperformance/_benchmarks/bm_sqlite_synth.py b/pyperformance/_benchmarks/bm_sqlite_synth/run.py similarity index 100% rename from pyperformance/_benchmarks/bm_sqlite_synth.py rename to pyperformance/_benchmarks/bm_sqlite_synth/run.py diff --git a/pyperformance/_benchmarks/bm_sympy.py b/pyperformance/_benchmarks/bm_sympy/run.py similarity index 100% rename from pyperformance/_benchmarks/bm_sympy.py rename to pyperformance/_benchmarks/bm_sympy/run.py diff --git a/pyperformance/_benchmarks/data/telco-bench.b b/pyperformance/_benchmarks/bm_telco/data/telco-bench.b similarity index 100% rename from pyperformance/_benchmarks/data/telco-bench.b rename to pyperformance/_benchmarks/bm_telco/data/telco-bench.b diff --git a/pyperformance/_benchmarks/bm_telco.py b/pyperformance/_benchmarks/bm_telco/run.py similarity index 100% rename from pyperformance/_benchmarks/bm_telco.py rename to pyperformance/_benchmarks/bm_telco/run.py diff --git a/pyperformance/_benchmarks/bm_tornado_http.py b/pyperformance/_benchmarks/bm_tornado_http/run.py similarity index 100% rename from pyperformance/_benchmarks/bm_tornado_http.py rename to pyperformance/_benchmarks/bm_tornado_http/run.py diff --git a/pyperformance/_benchmarks/bm_unpack_sequence.py b/pyperformance/_benchmarks/bm_unpack_sequence/run.py similarity index 100% rename from pyperformance/_benchmarks/bm_unpack_sequence.py rename to pyperformance/_benchmarks/bm_unpack_sequence/run.py diff --git a/pyperformance/_benchmarks/bm_xml_etree.py b/pyperformance/_benchmarks/bm_xml_etree/run.py similarity index 100% rename from pyperformance/_benchmarks/bm_xml_etree.py rename to pyperformance/_benchmarks/bm_xml_etree/run.py diff --git a/pyperformance/run.py b/pyperformance/run.py index 295f50b1..5abfb3da 100644 --- a/pyperformance/run.py +++ b/pyperformance/run.py @@ -65,7 +65,7 @@ def Relative(*path): def run_perf_script(python, options, name, extra_args=[]): - bm_path = Relative("bm_%s.py" % name) + bm_path = Relative("bm_%s" % name, "run.py") cmd = list(python) cmd.append('-u') cmd.append(bm_path) From d79eba5a78f8916687e2a8d720897aedda80b92e Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Wed, 23 Jun 2021 11:09:01 -0600 Subject: [PATCH 010/126] Make BenchmarkSpec.metafile as "secondary" attribute. --- pyperformance/benchmark/_spec.py | 9 ++++++++- pyperformance/benchmarks/__init__.py | 3 ++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/pyperformance/benchmark/_spec.py b/pyperformance/benchmark/_spec.py index 1de1d26f..32d3428b 100644 --- a/pyperformance/benchmark/_spec.py +++ b/pyperformance/benchmark/_spec.py @@ -1,7 +1,14 @@ from collections import namedtuple -BenchmarkSpec = namedtuple('BenchmarkSpec', 'name version origin metafile') +class BenchmarkSpec(namedtuple('BenchmarkSpec', 'name version origin')): + + metafile = None + + def __new__(cls, name, version=None, origin=None, metafile=None): + self = super().__new__(cls, name, version, origin) + self.metafile = metafile + return self def parse_benchmark(entry): diff --git a/pyperformance/benchmarks/__init__.py b/pyperformance/benchmarks/__init__.py index 0d1d6896..ca59e5c9 100644 --- a/pyperformance/benchmarks/__init__.py +++ b/pyperformance/benchmarks/__init__.py @@ -27,7 +27,8 @@ def resolve(bench): metafile = os.path.join(DEFAULTS_DIR, f'bm_{bench.name}', 'METADATA') - bench = bench._replace(metafile=metafile) + #bench = bench._replace(metafile=metafile) + bench.metafile = metafile return bench with open(filename) as infile: return _manifest.parse_manifest(infile, resolve=resolve) From f30421d9fced159db8499a26c8c1f51048f24d14 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Wed, 23 Jun 2021 17:57:42 -0600 Subject: [PATCH 011/126] Fix benchmark selection. --- pyperformance/_utils/__init__.py | 6 ++ pyperformance/_utils/_misc.py | 51 +++++++++++ pyperformance/benchmark/__init__.py | 2 +- pyperformance/benchmark/_benchmark.py | 16 +++- pyperformance/benchmark/_spec.py | 26 ++++-- pyperformance/benchmarks/__init__.py | 73 ++++----------- pyperformance/benchmarks/_manifest.py | 104 +++++++++++---------- pyperformance/benchmarks/_parse.py | 53 ----------- pyperformance/benchmarks/_selections.py | 116 ++++++++++++++++++++++++ pyperformance/cli.py | 12 ++- pyperformance/cli_run.py | 56 +++++------- 11 files changed, 313 insertions(+), 202 deletions(-) create mode 100644 pyperformance/_utils/_misc.py delete mode 100644 pyperformance/benchmarks/_parse.py create mode 100644 pyperformance/benchmarks/_selections.py diff --git a/pyperformance/_utils/__init__.py b/pyperformance/_utils/__init__.py index b6081744..179d1824 100644 --- a/pyperformance/_utils/__init__.py +++ b/pyperformance/_utils/__init__.py @@ -4,6 +4,12 @@ from ._fs import ( temporary_file, ) +from ._misc import ( + check_name, + parse_name_pattern, + parse_tag_pattern, + parse_selections, +) from ._platform import ( MS_WINDOWS, ) diff --git a/pyperformance/_utils/_misc.py b/pyperformance/_utils/_misc.py new file mode 100644 index 00000000..bef414bd --- /dev/null +++ b/pyperformance/_utils/_misc.py @@ -0,0 +1,51 @@ + +def check_name(name, *, loose=False): + if not name or not isinstance(name, str): + raise ValueError(f'bad name {name!r}') + if not loose: + if name.startswith('-'): + raise ValueError(name) + if not name.replace('-', '_').isidentifier(): + raise ValueError(name) + + +def parse_name_pattern(text, *, fail=True): + name = text + # XXX Support globs and/or regexes? (return a callable) + try: + check_name('_' + name) + except Exception: + if fail: + raise # re-raise + return None + return name + + +def parse_tag_pattern(text): + if not text.startswith('<'): + return None + if not text.endswith('>'): + return None + tag = text[1:-1] + # XXX Support globs and/or regexes? (return a callable) + check_name(tag) + return tag + + +def parse_selections(selections, parse_entry=None): + if isinstance(selections, str): + selections = selections.split(',') + if parse_entry is None: + parse_entry = (lambda o, e: (o, e, None, e)) + + for entry in selections: + entry = entry.strip() + if not entry: + continue + + op = '+' + if entry.startswith('-'): + op = '-' + entry = entry[1:] + + yield parse_entry(op, entry) diff --git a/pyperformance/benchmark/__init__.py b/pyperformance/benchmark/__init__.py index 0d3bb5ae..d9797bab 100644 --- a/pyperformance/benchmark/__init__.py +++ b/pyperformance/benchmark/__init__.py @@ -1,4 +1,4 @@ # aliases -from ._spec import BenchmarkSpec, parse_benchmark +from ._spec import BenchmarkSpec, parse_benchmark, check_name from ._benchmark import Benchmark diff --git a/pyperformance/benchmark/_benchmark.py b/pyperformance/benchmark/_benchmark.py index e1c8b6b2..053608d1 100644 --- a/pyperformance/benchmark/_benchmark.py +++ b/pyperformance/benchmark/_benchmark.py @@ -1,14 +1,17 @@ -from ._spec import parse_benchmark +from ._spec import BenchmarkSpec class Benchmark: - def __init__(self, spec, run): - if isinstance(spec, str): - spec = parse_benchmark(spec) + def __init__(self, spec, metafile): + spec, _metafile = BenchmarkSpec.from_raw(spec) + if not metafile: + if not _metafile: + raise ValueError(f'missing metafile for {spec!r}') + metafile = _metafile self.spec = spec - self.run = run + self.metafile = metafile def __repr__(self): return f'{type(self).__name__}(spec={self.spec}, run={self.run})' @@ -32,3 +35,6 @@ def __gt__(self, other): except AttributeError: return NotImplemented return self.spec > other_spec + + def run(self, *args): + return self._func(*args) diff --git a/pyperformance/benchmark/_spec.py b/pyperformance/benchmark/_spec.py index 32d3428b..21b89d37 100644 --- a/pyperformance/benchmark/_spec.py +++ b/pyperformance/benchmark/_spec.py @@ -1,14 +1,10 @@ from collections import namedtuple +from .. import _utils -class BenchmarkSpec(namedtuple('BenchmarkSpec', 'name version origin')): - - metafile = None - def __new__(cls, name, version=None, origin=None, metafile=None): - self = super().__new__(cls, name, version, origin) - self.metafile = metafile - return self +def check_name(name): + _utils.check_name('_' + name) def parse_benchmark(entry): @@ -18,4 +14,18 @@ def parse_benchmark(entry): metafile = None if not f'_{name}'.isidentifier(): raise ValueError(f'unsupported benchmark name in {entry!r}') - return BenchmarkSpec(name, version, origin, metafile) + bench = BenchmarkSpec(name, version, origin) + return bench, metafile + + +class BenchmarkSpec(namedtuple('BenchmarkSpec', 'name version origin')): + __slots__ = () + + @classmethod + def from_raw(cls, raw): + if isinstance(raw, BenchmarkSpec): + return raw, None + elif isinstance(raw, str): + return parse_benchmark(raw) + else: + raise ValueError(f'unsupported raw spec {raw!r}') diff --git a/pyperformance/benchmarks/__init__.py b/pyperformance/benchmarks/__init__.py index ca59e5c9..60dbbde2 100644 --- a/pyperformance/benchmarks/__init__.py +++ b/pyperformance/benchmarks/__init__.py @@ -4,8 +4,9 @@ from .. import _benchmarks, benchmark as _benchmark from . import _manifest -# an alias (but also used here) -from ._parse import parse_benchmarks +# aliases +from ._manifest import expand_benchmark_groups +from ._selections import parse_selection, iter_selections DEFAULTS_DIR = os.path.join( @@ -19,14 +20,23 @@ def load_manifest(filename, *, resolve=None): filename = DEFAULT_MANIFEST if resolve is None: def resolve(bench): - if not bench.version: - bench = bench._replace(version=__version__) - if not bench.origin: - bench = bench._replace(origin='') + if isinstance(bench, _benchmark.Benchmark): + spec = bench.spec + else: + spec = bench + bench = _benchmark.Benchmark(spec, '') + bench.metafile = None + + if not spec.version: + spec = spec._replace(version=__version__) + if not spec.origin: + spec = spec._replace(origin='') + bench.spec = spec + if not bench.metafile: metafile = os.path.join(DEFAULTS_DIR, f'bm_{bench.name}', - 'METADATA') + 'pyproject.toml') #bench = bench._replace(metafile=metafile) bench.metafile = metafile return bench @@ -37,53 +47,10 @@ def resolve(bench): def iter_benchmarks(manifest): # XXX Use the benchmark's "run" script. funcs, _ = _benchmarks.get_benchmarks() - for spec in manifest.benchmarks: - func = funcs[spec.name] - yield _benchmark.Benchmark(spec, func) + for bench in manifest.benchmarks: + bench._func = funcs[bench.name] + yield bench def get_benchmarks(manifest): return list(iter_benchmarks(manifest)) - - -def get_benchmark_groups(manifest): - return dict(manifest.groups) - - -def expand_benchmark_groups(parsed, groups): - if isinstance(parsed, str): - parsed = _benchmark.parse_benchmark(parsed) - - if not groups: - yield parsed - elif parsed.name not in groups: - yield parsed - else: - benchmarks = groups[parsed.name] - for bench in benchmarks or (): - yield from expand_benchmark_groups(bench, groups) - - -def select_benchmarks(raw, manifest, *, - expand=None, - known=None, - ): - if expand is None: - groups = get_benchmark_groups(manifest) - expand = lambda n: expand_benchmark_groups(n, groups) - if known is None: - known = get_benchmarks(manifest) - benchmarks = {b.spec: b for b in get_benchmarks(manifest)} - - included, excluded = parse_benchmarks(raw, expand=expand, known=known) - if not included: - included = set(expand('default', 'add')) - - selected = set() - for spec in included: - bench = benchmarks[spec] - selected.add(bench) - for spec in excluded: - bench = benchmarks[spec] - selected.remove(bench) - return selected diff --git a/pyperformance/benchmarks/_manifest.py b/pyperformance/benchmarks/_manifest.py index f0337227..33306d57 100644 --- a/pyperformance/benchmarks/_manifest.py +++ b/pyperformance/benchmarks/_manifest.py @@ -1,6 +1,6 @@ from collections import namedtuple -from .. import benchmark as _benchmark +from .. import benchmark as _benchmark, _utils BENCH_COLUMNS = ('name', 'version', 'origin', 'metafile') @@ -26,9 +26,33 @@ def parse_manifest(text, *, resolve=None): elif section.startswith('group '): _, _, group = section.partition(' ') groups[group] = _parse_group(group, seclines, benchmarks) + _check_groups(groups) + # XXX Update tags for each benchmark with member groups. return BenchmarksManifest(benchmarks, groups) +def expand_benchmark_groups(bench, groups): + if isinstance(bench, str): + spec, metafile = _benchmark.parse_benchmark(bench) + if metafile: + bench = _benchmark.Benchmark(spec, metafile) + else: + bench = spec + elif isinstance(bench, _benchmark.Benchmark): + spec = bench.spec + else: + spec = bench + + if not groups: + yield bench + elif bench.name not in groups: + yield bench + else: + benchmarks = groups[bench.name] + for bench in benchmarks or (): + yield from expand_benchmark_groups(bench, groups) + + def _iter_sections(lines): lines = (line.split('#')[0].strip() for line in lines) @@ -63,16 +87,18 @@ def _parse_benchmarks(lines, resolve): benchmarks = [] for line in lines: try: - name, version, origin, metafile = line.split('\t') + name, version, origin, metafile = (None if l == '-' else l + for l in line.split('\t')) except ValueError: raise ValueError(f'bad benchmark line {line!r}') - if not version or version == '-': - version = None - if not origin or origin == '-': - origin = None - if not metafile or metafile == '-': - metafile = None - bench = _benchmark.BenchmarkSpec(name, version, origin, metafile) + spec = _benchmark.BenchmarkSpec(name or None, + version or None, + origin or None, + ) + if metafile: + bench = _benchmark.Benchmark(spec, metafile) + else: + bench = spec if resolve is not None: bench = resolve(bench) benchmarks.append(bench) @@ -80,49 +106,29 @@ def _parse_benchmarks(lines, resolve): def _parse_group(name, lines, benchmarks): - benchmarks = set(benchmarks) byname = {b.name: b for b in benchmarks} + if name in byname: + raise ValueError(f'a group and a benchmark have the same name ({name})') + group = [] + seen = set() for line in lines: - bench = _benchmark.parse_benchmark(line) - if bench not in benchmarks: - try: - bench = byname[bench.name] - except KeyError: - raise ValueError(f'unknown benchmark {bench.name!r} ({name})') - group.append(bench) + benchname = line + _benchmark.check_name(benchname) + if benchname in seen: + continue + if benchname in byname: + group.append(byname[benchname]) + else: + # It may be a group. We check later. + group.append(benchname) return group -#def render_manifest(manifest): -# if isinstance(manifest, str): -# raise NotImplementedError -# manifest = manifest.splitlines() -# yield BENCH_HEADER -# for row in manifest: -# if isinstance(row, str): -# row = _parse_manifest_row(row) -# if isinstance(row, str): -# yield row -# continue -# line _render_manifest_row(row) -# -# raise NotImplementedError -# -# -#def parse_group_manifest(text): -# ... -# -# -#def render_group_manifest(group, benchmarks): -# # (manifest file, bm name) -# ... -# -# -#def parse_bench_from_manifest(line): -# raise NotImplementedError -# -# -#def render_bench_for_manifest(benchmark, columns): -# raise NotImplementedError -# name, origin, version, metafile = info +def _check_groups(groups): + for group, benchmarks in groups.items(): + for bench in benchmarks: + if not isinstance(bench, str): + continue + elif bench not in groups: + raise ValueError(f'unknown benchmark {name!r} (in group {group!r})') diff --git a/pyperformance/benchmarks/_parse.py b/pyperformance/benchmarks/_parse.py deleted file mode 100644 index e23b27a9..00000000 --- a/pyperformance/benchmarks/_parse.py +++ /dev/null @@ -1,53 +0,0 @@ -from ..benchmark import parse_benchmark - - -def parse_benchmarks(raw, *, expand=None, known=None): - included = set() - excluded = set() - for op, _, parsed in iter_parsed(raw, expand=expand, known=known): - if op == 'add': - included.add(parsed) - elif op == 'remove': - excluded.add(parsed) - else: - raise NotImplementedError(op) - return included, excluded - - -def iter_parsed(raw, *, expand=None, known=None): - if expand is None: - def expand(parsed, _op): - yield parsed - check_known = _resolve_check_known(known) - - if isinstance(raw, str): - raw = raw.split(',') - entries = (e.strip() for e in raw) - - for entry in entries: - if not entry: - continue - - op = 'add' - if entry.startswith('-'): - op = 'remove' - entry = entry[1:] - - parsed = parse_benchmark(entry) - for expanded in expand(parsed, op): - if check_known(expanded, entry): - yield op, entry, expanded - - -def _resolve_check_known(known): - if callable(known): - return known - elif not known: - return (lambda p: True) - else: - known = set(known) - def check_known(parsed, raw): - if parsed not in known: - raise ValueError(f'unknown benchmark {parsed!r} ({raw})') - return True - return check_known diff --git a/pyperformance/benchmarks/_selections.py b/pyperformance/benchmarks/_selections.py new file mode 100644 index 00000000..0dcdb6bf --- /dev/null +++ b/pyperformance/benchmarks/_selections.py @@ -0,0 +1,116 @@ +from .. import _benchmarks +from .._utils import check_name, parse_name_pattern, parse_tag_pattern +from ..benchmark import parse_benchmark, Benchmark +from ._manifest import expand_benchmark_groups + + +def parse_selection(selection, *, op=None): + # "selection" is one of the following: + # * a benchmark string + # * a benchmark name + # * a benchmark pattern + # * a tag + # * a tag pattern + parsed = parse_benchmark(selection) + spec, metafile = parsed if parsed else (None, None) + if parsed and spec.version: + kind = 'benchmark' + spec, metafile = parsed + if metafile: + parsed = Benchmark(spec, metafile) + else: + parsed = spec + elif parsed and (spec.origin or metafile): + raise NotImplementedError(selection) + else: + parsed = parse_tag_pattern(selection) + if parsed: + kind = 'tag' + else: + kind = 'name' + parsed = parse_name_pattern(selection, fail=True) +# parsed = parse_name_pattern(selection, fail=False) + if not parsed: + raise ValueError(f'unsupported selection {selection!r}') + return op or '+', selection, kind, parsed + + +def iter_selections(manifest, selections, *, unique=True): + byname = {b.name: b for b in manifest.benchmarks} + + # Compose the expanded include/exclude lists. + seen = set() + included = [] + excluded = set() + for op, _, kind, parsed in selections: + matches = _match_selection(manifest, kind, parsed, byname) + if op == '+': + for bench in matches: + if bench not in seen or not unique: + included.append(bench) + seen.add(bench) + elif op == '-': + for bench in matches: + excluded.add(bench) + else: + raise NotImplementedError(op) + if not included: + included = list(_match_selection(manifest, 'tag', 'default', byname)) + + funcs, _ = _benchmarks.get_benchmarks() + for bench in included: + if bench not in excluded: + if isinstance(bench, Benchmark): + # XXX Use the benchmark's "run" script. + bench._func = funcs[bench.name] + yield bench + + +def _match_selection(manifest, kind, parsed, byname): + if kind == 'benchmark': + bench = parsed + # XXX Match bench.metafile too? + spec = getattr(bench, 'spec', bench) + # For now we only support selection by name. + # XXX Support selection by version? + # XXX Support selection by origin? + if spec.version or spec.origin: + raise NotImplementedError(spec) + if spec.name in byname: + yield bench + else: + # No match! The caller can handle this as they like. + yield str(bench) + elif kind == 'tag': + # XXX Instad, walk all benchmarks to check the "tags" field? + groups = [] + if callable(parsed): + match_tag = parsed + for group in manifest.groups: + if match_tag(group): + groups.append(group) + elif parsed in manifest.groups: + groups.append(parsed) + else: + raise ValueError(f'unsupported selection {parsed!r}') + for group in groups: + yield from expand_benchmark_groups(group, manifest.groups) + elif kind == 'name': + if callable(parsed): + match_bench = parsed + for bench in manifest.benchmarks: + if match_bench(bench.name): + yield bench + else: + name = parsed + if name in byname: + yield byname[name] + # We also check the groups, for backward compatibility. + elif name in manifest.groups: + yield from _match_selection(manifest, 'tag', name, byname) + else: + check_name(name) + # No match! The caller can handle this as they like. + yield name + else: + raise NotImplementedError(kind) diff --git a/pyperformance/cli.py b/pyperformance/cli.py index 42396ec6..75eb216a 100644 --- a/pyperformance/cli.py +++ b/pyperformance/cli.py @@ -2,6 +2,7 @@ import os.path import sys +from pyperformance import _utils, benchmarks as _benchmarks from pyperformance.venv import exec_in_virtualenv, cmd_venv @@ -12,8 +13,8 @@ def comma_separated(values): def filter_opts(cmd): cmd.add_argument("--manifest", help="benchmark manifest file to use") - cmd.add_argument("-b", "--benchmarks", metavar="BM_LIST", default="default", - type=(lambda b: b.lower()), + + cmd.add_argument("-b", "--benchmarks", metavar="BM_LIST", default='default', help=("Comma-separated list of benchmarks to run. Can" " contain both positive and negative arguments:" " --benchmarks=run_this,also_this,-not_this. If" @@ -158,6 +159,13 @@ def parse_args(): options = parser.parse_args() + # Process benchmark selections. + if hasattr(options, 'benchmarks'): + entries = options.benchmarks.lower() + parse_entry = (lambda o, s: _benchmarks.parse_selection(s, op=o)) + parsed = _utils.parse_selections(entries, parse_entry) + options.bm_selections = list(parsed) + if options.action == 'run' and options.debug_single_value: options.fast = True diff --git a/pyperformance/cli_run.py b/pyperformance/cli_run.py index ea9e6389..c9bcb58d 100644 --- a/pyperformance/cli_run.py +++ b/pyperformance/cli_run.py @@ -5,41 +5,32 @@ import pyperf import pyperformance -from pyperformance.benchmark import parse_benchmark from pyperformance.benchmarks import ( load_manifest, iter_benchmarks, get_benchmarks, - get_benchmark_groups, - expand_benchmark_groups, - select_benchmarks, + iter_selections, ) from pyperformance.compare import display_benchmark_suite from pyperformance.run import run_benchmarks -def _select_benchmarks(raw, manifest): - groups = get_benchmark_groups(manifest) - def expand(parsed, op): - if isinstance(parsed, str): - parsed = parse_benchmark(parsed) - if parsed.name in groups and op == 'remove': +def _select_benchmarks(selections, manifest): + groups = manifest.groups + for op, _, kind, parsed in selections: + if callable(parsed): + continue + name = parsed.name if kind == 'benchmark' else parsed + if name in manifest.groups and op == '-': raise ValueError(f'negative groups not supported: -{parsed.name}') - yield from expand_benchmark_groups(parsed, groups) - - known = set(b.spec for b in iter_benchmarks(manifest)) - def check_known(parsed, raw): - if parsed not in known: - logging.warning(f"no benchmark named {parsed.name!r}") - return False - return True - return select_benchmarks( - raw, - manifest, - expand=expand, - known=check_known, - ) + selected = [] + for bench in iter_selections(manifest, selections): + if isinstance(bench, str): + logging.warning(f"no benchmark named {bench!r}") + continue + selected.append(bench) + return selected def cmd_run(parser, options): @@ -61,7 +52,7 @@ def cmd_run(parser, options): sys.exit(1) manifest = load_manifest(options.manifest) - should_run = _select_benchmarks(options.benchmarks, manifest) + should_run = _select_benchmarks(options.bm_selections, manifest) cmd_prefix = [executable] suite, errors = run_benchmarks(should_run, cmd_prefix, options) @@ -86,10 +77,14 @@ def cmd_run(parser, options): def cmd_list(options): manifest = load_manifest(options.manifest) - if 'all' in options.benchmarks.split(','): - selected = manifest.benchmarks + for op, _, kind, parsed in options.bm_selections: + if op == '+': + name = parsed.name if kind == 'benchmark' else parsed + if name == 'all': + selected = manifest.benchmarks + break else: - selected = _select_benchmarks(options.benchmarks, manifest) + selected = _select_benchmarks(options.bm_selections, manifest) print("%r benchmarks:" % options.benchmarks) for bench in sorted(selected): @@ -100,11 +95,10 @@ def cmd_list(options): def cmd_list_groups(options): manifest = load_manifest(options.manifest) - bench_groups = get_benchmark_groups(manifest) - bench_groups['all'] = list(manifest.benchmarks) + manifest.groups['all'] = list(manifest.benchmarks) all_benchmarks = set(manifest.benchmarks) - for group, specs in sorted(bench_groups.items()): + for group, specs in sorted(manifest.groups.items()): known = set(specs) & all_benchmarks if not known: # skip empty groups From 043836bff70179852f4f1c6230bb5700650d9927 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Wed, 23 Jun 2021 20:16:33 -0600 Subject: [PATCH 012/126] Fix the default benchmarks selection. --- pyperformance/benchmark/_spec.py | 6 +++++- pyperformance/benchmarks/_selections.py | 2 +- pyperformance/cli.py | 2 +- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/pyperformance/benchmark/_spec.py b/pyperformance/benchmark/_spec.py index 21b89d37..d85de8e1 100644 --- a/pyperformance/benchmark/_spec.py +++ b/pyperformance/benchmark/_spec.py @@ -7,13 +7,17 @@ def check_name(name): _utils.check_name('_' + name) -def parse_benchmark(entry): +def parse_benchmark(entry, *, fail=True): name = entry version = None origin = None metafile = None + if not f'_{name}'.isidentifier(): + if not fail: + return None raise ValueError(f'unsupported benchmark name in {entry!r}') + bench = BenchmarkSpec(name, version, origin) return bench, metafile diff --git a/pyperformance/benchmarks/_selections.py b/pyperformance/benchmarks/_selections.py index 0dcdb6bf..bb6c75e4 100644 --- a/pyperformance/benchmarks/_selections.py +++ b/pyperformance/benchmarks/_selections.py @@ -11,7 +11,7 @@ def parse_selection(selection, *, op=None): # * a benchmark pattern # * a tag # * a tag pattern - parsed = parse_benchmark(selection) + parsed = parse_benchmark(selection, fail=False) spec, metafile = parsed if parsed else (None, None) if parsed and spec.version: kind = 'benchmark' diff --git a/pyperformance/cli.py b/pyperformance/cli.py index 75eb216a..37b32cde 100644 --- a/pyperformance/cli.py +++ b/pyperformance/cli.py @@ -14,7 +14,7 @@ def comma_separated(values): def filter_opts(cmd): cmd.add_argument("--manifest", help="benchmark manifest file to use") - cmd.add_argument("-b", "--benchmarks", metavar="BM_LIST", default='default', + cmd.add_argument("-b", "--benchmarks", metavar="BM_LIST", default='', help=("Comma-separated list of benchmarks to run. Can" " contain both positive and negative arguments:" " --benchmarks=run_this,also_this,-not_this. If" From 6ba9603cde4ddd0c2ae51b7a15441f3e60282455 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Wed, 23 Jun 2021 22:01:09 -0600 Subject: [PATCH 013/126] Fix the run script filename. --- pyperformance/_benchmarks/bm_2to3/{run.py => run_benchmark.py} | 0 .../_benchmarks/bm_chameleon/{run.py => run_benchmark.py} | 0 pyperformance/_benchmarks/bm_chaos/{run.py => run_benchmark.py} | 0 .../_benchmarks/bm_crypto_pyaes/{run.py => run_benchmark.py} | 0 .../_benchmarks/bm_deltablue/{run.py => run_benchmark.py} | 0 .../_benchmarks/bm_django_template/{run.py => run_benchmark.py} | 0 .../_benchmarks/bm_dulwich_log/{run.py => run_benchmark.py} | 0 .../_benchmarks/bm_fannkuch/{run.py => run_benchmark.py} | 0 pyperformance/_benchmarks/bm_float/{run.py => run_benchmark.py} | 0 pyperformance/_benchmarks/bm_genshi/{run.py => run_benchmark.py} | 0 pyperformance/_benchmarks/bm_go/{run.py => run_benchmark.py} | 0 pyperformance/_benchmarks/bm_hexiom/{run.py => run_benchmark.py} | 0 .../_benchmarks/bm_hg_startup/{run.py => run_benchmark.py} | 0 .../_benchmarks/bm_html5lib/{run.py => run_benchmark.py} | 0 .../_benchmarks/bm_json_dumps/{run.py => run_benchmark.py} | 0 .../_benchmarks/bm_json_loads/{run.py => run_benchmark.py} | 0 pyperformance/_benchmarks/bm_logging/{run.py => run_benchmark.py} | 0 pyperformance/_benchmarks/bm_mako/{run.py => run_benchmark.py} | 0 pyperformance/_benchmarks/bm_mdp/{run.py => run_benchmark.py} | 0 .../_benchmarks/bm_meteor_contest/{run.py => run_benchmark.py} | 0 pyperformance/_benchmarks/bm_nbody/{run.py => run_benchmark.py} | 0 pyperformance/_benchmarks/bm_nqueens/{run.py => run_benchmark.py} | 0 pyperformance/_benchmarks/bm_pathlib/{run.py => run_benchmark.py} | 0 pyperformance/_benchmarks/bm_pickle/{run.py => run_benchmark.py} | 0 .../_benchmarks/bm_pidigits/{run.py => run_benchmark.py} | 0 pyperformance/_benchmarks/bm_pyflate/{run.py => run_benchmark.py} | 0 .../_benchmarks/bm_python_startup/{run.py => run_benchmark.py} | 0 .../_benchmarks/bm_raytrace/{run.py => run_benchmark.py} | 0 .../_benchmarks/bm_regex_compile/{run.py => run_benchmark.py} | 0 .../_benchmarks/bm_regex_dna/{run.py => run_benchmark.py} | 0 .../_benchmarks/bm_regex_effbot/{run.py => run_benchmark.py} | 0 .../_benchmarks/bm_regex_v8/{run.py => run_benchmark.py} | 0 .../_benchmarks/bm_richards/{run.py => run_benchmark.py} | 0 pyperformance/_benchmarks/bm_scimark/{run.py => run_benchmark.py} | 0 .../_benchmarks/bm_spectral_norm/{run.py => run_benchmark.py} | 0 .../bm_sqlalchemy_declarative/{run.py => run_benchmark.py} | 0 .../bm_sqlalchemy_imperative/{run.py => run_benchmark.py} | 0 .../_benchmarks/bm_sqlite_synth/{run.py => run_benchmark.py} | 0 pyperformance/_benchmarks/bm_sympy/{run.py => run_benchmark.py} | 0 pyperformance/_benchmarks/bm_telco/{run.py => run_benchmark.py} | 0 .../_benchmarks/bm_tornado_http/{run.py => run_benchmark.py} | 0 .../_benchmarks/bm_unpack_sequence/{run.py => run_benchmark.py} | 0 .../_benchmarks/bm_xml_etree/{run.py => run_benchmark.py} | 0 43 files changed, 0 insertions(+), 0 deletions(-) rename pyperformance/_benchmarks/bm_2to3/{run.py => run_benchmark.py} (100%) rename pyperformance/_benchmarks/bm_chameleon/{run.py => run_benchmark.py} (100%) rename pyperformance/_benchmarks/bm_chaos/{run.py => run_benchmark.py} (100%) rename pyperformance/_benchmarks/bm_crypto_pyaes/{run.py => run_benchmark.py} (100%) rename pyperformance/_benchmarks/bm_deltablue/{run.py => run_benchmark.py} (100%) rename pyperformance/_benchmarks/bm_django_template/{run.py => run_benchmark.py} (100%) rename pyperformance/_benchmarks/bm_dulwich_log/{run.py => run_benchmark.py} (100%) rename pyperformance/_benchmarks/bm_fannkuch/{run.py => run_benchmark.py} (100%) rename pyperformance/_benchmarks/bm_float/{run.py => run_benchmark.py} (100%) rename pyperformance/_benchmarks/bm_genshi/{run.py => run_benchmark.py} (100%) rename pyperformance/_benchmarks/bm_go/{run.py => run_benchmark.py} (100%) rename pyperformance/_benchmarks/bm_hexiom/{run.py => run_benchmark.py} (100%) rename pyperformance/_benchmarks/bm_hg_startup/{run.py => run_benchmark.py} (100%) rename pyperformance/_benchmarks/bm_html5lib/{run.py => run_benchmark.py} (100%) rename pyperformance/_benchmarks/bm_json_dumps/{run.py => run_benchmark.py} (100%) rename pyperformance/_benchmarks/bm_json_loads/{run.py => run_benchmark.py} (100%) rename pyperformance/_benchmarks/bm_logging/{run.py => run_benchmark.py} (100%) rename pyperformance/_benchmarks/bm_mako/{run.py => run_benchmark.py} (100%) rename pyperformance/_benchmarks/bm_mdp/{run.py => run_benchmark.py} (100%) rename pyperformance/_benchmarks/bm_meteor_contest/{run.py => run_benchmark.py} (100%) rename pyperformance/_benchmarks/bm_nbody/{run.py => run_benchmark.py} (100%) rename pyperformance/_benchmarks/bm_nqueens/{run.py => run_benchmark.py} (100%) rename pyperformance/_benchmarks/bm_pathlib/{run.py => run_benchmark.py} (100%) rename pyperformance/_benchmarks/bm_pickle/{run.py => run_benchmark.py} (100%) rename pyperformance/_benchmarks/bm_pidigits/{run.py => run_benchmark.py} (100%) rename pyperformance/_benchmarks/bm_pyflate/{run.py => run_benchmark.py} (100%) rename pyperformance/_benchmarks/bm_python_startup/{run.py => run_benchmark.py} (100%) rename pyperformance/_benchmarks/bm_raytrace/{run.py => run_benchmark.py} (100%) rename pyperformance/_benchmarks/bm_regex_compile/{run.py => run_benchmark.py} (100%) rename pyperformance/_benchmarks/bm_regex_dna/{run.py => run_benchmark.py} (100%) rename pyperformance/_benchmarks/bm_regex_effbot/{run.py => run_benchmark.py} (100%) rename pyperformance/_benchmarks/bm_regex_v8/{run.py => run_benchmark.py} (100%) rename pyperformance/_benchmarks/bm_richards/{run.py => run_benchmark.py} (100%) rename pyperformance/_benchmarks/bm_scimark/{run.py => run_benchmark.py} (100%) rename pyperformance/_benchmarks/bm_spectral_norm/{run.py => run_benchmark.py} (100%) rename pyperformance/_benchmarks/bm_sqlalchemy_declarative/{run.py => run_benchmark.py} (100%) rename pyperformance/_benchmarks/bm_sqlalchemy_imperative/{run.py => run_benchmark.py} (100%) rename pyperformance/_benchmarks/bm_sqlite_synth/{run.py => run_benchmark.py} (100%) rename pyperformance/_benchmarks/bm_sympy/{run.py => run_benchmark.py} (100%) rename pyperformance/_benchmarks/bm_telco/{run.py => run_benchmark.py} (100%) rename pyperformance/_benchmarks/bm_tornado_http/{run.py => run_benchmark.py} (100%) rename pyperformance/_benchmarks/bm_unpack_sequence/{run.py => run_benchmark.py} (100%) rename pyperformance/_benchmarks/bm_xml_etree/{run.py => run_benchmark.py} (100%) diff --git a/pyperformance/_benchmarks/bm_2to3/run.py b/pyperformance/_benchmarks/bm_2to3/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_2to3/run.py rename to pyperformance/_benchmarks/bm_2to3/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_chameleon/run.py b/pyperformance/_benchmarks/bm_chameleon/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_chameleon/run.py rename to pyperformance/_benchmarks/bm_chameleon/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_chaos/run.py b/pyperformance/_benchmarks/bm_chaos/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_chaos/run.py rename to pyperformance/_benchmarks/bm_chaos/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_crypto_pyaes/run.py b/pyperformance/_benchmarks/bm_crypto_pyaes/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_crypto_pyaes/run.py rename to pyperformance/_benchmarks/bm_crypto_pyaes/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_deltablue/run.py b/pyperformance/_benchmarks/bm_deltablue/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_deltablue/run.py rename to pyperformance/_benchmarks/bm_deltablue/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_django_template/run.py b/pyperformance/_benchmarks/bm_django_template/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_django_template/run.py rename to pyperformance/_benchmarks/bm_django_template/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_dulwich_log/run.py b/pyperformance/_benchmarks/bm_dulwich_log/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_dulwich_log/run.py rename to pyperformance/_benchmarks/bm_dulwich_log/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_fannkuch/run.py b/pyperformance/_benchmarks/bm_fannkuch/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_fannkuch/run.py rename to pyperformance/_benchmarks/bm_fannkuch/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_float/run.py b/pyperformance/_benchmarks/bm_float/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_float/run.py rename to pyperformance/_benchmarks/bm_float/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_genshi/run.py b/pyperformance/_benchmarks/bm_genshi/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_genshi/run.py rename to pyperformance/_benchmarks/bm_genshi/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_go/run.py b/pyperformance/_benchmarks/bm_go/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_go/run.py rename to pyperformance/_benchmarks/bm_go/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_hexiom/run.py b/pyperformance/_benchmarks/bm_hexiom/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_hexiom/run.py rename to pyperformance/_benchmarks/bm_hexiom/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_hg_startup/run.py b/pyperformance/_benchmarks/bm_hg_startup/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_hg_startup/run.py rename to pyperformance/_benchmarks/bm_hg_startup/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_html5lib/run.py b/pyperformance/_benchmarks/bm_html5lib/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_html5lib/run.py rename to pyperformance/_benchmarks/bm_html5lib/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_json_dumps/run.py b/pyperformance/_benchmarks/bm_json_dumps/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_json_dumps/run.py rename to pyperformance/_benchmarks/bm_json_dumps/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_json_loads/run.py b/pyperformance/_benchmarks/bm_json_loads/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_json_loads/run.py rename to pyperformance/_benchmarks/bm_json_loads/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_logging/run.py b/pyperformance/_benchmarks/bm_logging/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_logging/run.py rename to pyperformance/_benchmarks/bm_logging/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_mako/run.py b/pyperformance/_benchmarks/bm_mako/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_mako/run.py rename to pyperformance/_benchmarks/bm_mako/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_mdp/run.py b/pyperformance/_benchmarks/bm_mdp/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_mdp/run.py rename to pyperformance/_benchmarks/bm_mdp/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_meteor_contest/run.py b/pyperformance/_benchmarks/bm_meteor_contest/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_meteor_contest/run.py rename to pyperformance/_benchmarks/bm_meteor_contest/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_nbody/run.py b/pyperformance/_benchmarks/bm_nbody/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_nbody/run.py rename to pyperformance/_benchmarks/bm_nbody/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_nqueens/run.py b/pyperformance/_benchmarks/bm_nqueens/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_nqueens/run.py rename to pyperformance/_benchmarks/bm_nqueens/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_pathlib/run.py b/pyperformance/_benchmarks/bm_pathlib/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_pathlib/run.py rename to pyperformance/_benchmarks/bm_pathlib/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_pickle/run.py b/pyperformance/_benchmarks/bm_pickle/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_pickle/run.py rename to pyperformance/_benchmarks/bm_pickle/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_pidigits/run.py b/pyperformance/_benchmarks/bm_pidigits/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_pidigits/run.py rename to pyperformance/_benchmarks/bm_pidigits/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_pyflate/run.py b/pyperformance/_benchmarks/bm_pyflate/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_pyflate/run.py rename to pyperformance/_benchmarks/bm_pyflate/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_python_startup/run.py b/pyperformance/_benchmarks/bm_python_startup/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_python_startup/run.py rename to pyperformance/_benchmarks/bm_python_startup/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_raytrace/run.py b/pyperformance/_benchmarks/bm_raytrace/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_raytrace/run.py rename to pyperformance/_benchmarks/bm_raytrace/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_regex_compile/run.py b/pyperformance/_benchmarks/bm_regex_compile/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_regex_compile/run.py rename to pyperformance/_benchmarks/bm_regex_compile/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_regex_dna/run.py b/pyperformance/_benchmarks/bm_regex_dna/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_regex_dna/run.py rename to pyperformance/_benchmarks/bm_regex_dna/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_regex_effbot/run.py b/pyperformance/_benchmarks/bm_regex_effbot/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_regex_effbot/run.py rename to pyperformance/_benchmarks/bm_regex_effbot/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_regex_v8/run.py b/pyperformance/_benchmarks/bm_regex_v8/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_regex_v8/run.py rename to pyperformance/_benchmarks/bm_regex_v8/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_richards/run.py b/pyperformance/_benchmarks/bm_richards/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_richards/run.py rename to pyperformance/_benchmarks/bm_richards/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_scimark/run.py b/pyperformance/_benchmarks/bm_scimark/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_scimark/run.py rename to pyperformance/_benchmarks/bm_scimark/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_spectral_norm/run.py b/pyperformance/_benchmarks/bm_spectral_norm/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_spectral_norm/run.py rename to pyperformance/_benchmarks/bm_spectral_norm/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_sqlalchemy_declarative/run.py b/pyperformance/_benchmarks/bm_sqlalchemy_declarative/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_sqlalchemy_declarative/run.py rename to pyperformance/_benchmarks/bm_sqlalchemy_declarative/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_sqlalchemy_imperative/run.py b/pyperformance/_benchmarks/bm_sqlalchemy_imperative/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_sqlalchemy_imperative/run.py rename to pyperformance/_benchmarks/bm_sqlalchemy_imperative/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_sqlite_synth/run.py b/pyperformance/_benchmarks/bm_sqlite_synth/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_sqlite_synth/run.py rename to pyperformance/_benchmarks/bm_sqlite_synth/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_sympy/run.py b/pyperformance/_benchmarks/bm_sympy/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_sympy/run.py rename to pyperformance/_benchmarks/bm_sympy/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_telco/run.py b/pyperformance/_benchmarks/bm_telco/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_telco/run.py rename to pyperformance/_benchmarks/bm_telco/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_tornado_http/run.py b/pyperformance/_benchmarks/bm_tornado_http/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_tornado_http/run.py rename to pyperformance/_benchmarks/bm_tornado_http/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_unpack_sequence/run.py b/pyperformance/_benchmarks/bm_unpack_sequence/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_unpack_sequence/run.py rename to pyperformance/_benchmarks/bm_unpack_sequence/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_xml_etree/run.py b/pyperformance/_benchmarks/bm_xml_etree/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_xml_etree/run.py rename to pyperformance/_benchmarks/bm_xml_etree/run_benchmark.py From 0f15e8eb413a0882066e3d51cd5aa746828a2d36 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Wed, 23 Jun 2021 22:33:46 -0600 Subject: [PATCH 014/126] Run benchmarks from the metadata instead of hard-coded. --- pyperformance/_benchmarks/MANIFEST | 100 +++--- pyperformance/_benchmarks/__init__.py | 303 ----------------- pyperformance/_benchmarks/base.toml | 24 ++ .../_benchmarks/bm_2to3/pyproject.toml | 12 + .../_benchmarks/bm_chameleon/pyproject.toml | 12 + .../_benchmarks/bm_chaos/pyproject.toml | 12 + .../bm_crypto_pyaes/pyproject.toml | 12 + .../_benchmarks/bm_deltablue/pyproject.toml | 12 + .../bm_django_template/pyproject.toml | 12 + .../_benchmarks/bm_dulwich_log/pyproject.toml | 12 + .../_benchmarks/bm_fannkuch/pyproject.toml | 12 + .../_benchmarks/bm_float/pyproject.toml | 12 + .../_benchmarks/bm_genshi/pyproject.toml | 12 + .../_benchmarks/bm_go/pyproject.toml | 12 + .../_benchmarks/bm_hexiom/pyproject.toml | 12 + .../_benchmarks/bm_hg_startup/pyproject.toml | 12 + .../_benchmarks/bm_html5lib/pyproject.toml | 12 + .../_benchmarks/bm_json_dumps/pyproject.toml | 12 + .../_benchmarks/bm_json_loads/pyproject.toml | 12 + .../_benchmarks/bm_logging/pyproject.toml | 12 + .../_benchmarks/bm_mako/pyproject.toml | 12 + .../_benchmarks/bm_mdp/pyproject.toml | 12 + .../bm_meteor_contest/pyproject.toml | 12 + .../_benchmarks/bm_nbody/pyproject.toml | 12 + .../_benchmarks/bm_nqueens/pyproject.toml | 12 + .../_benchmarks/bm_pathlib/pyproject.toml | 12 + .../_benchmarks/bm_pickle/bm_pickle_dict.toml | 13 + .../_benchmarks/bm_pickle/bm_pickle_list.toml | 13 + .../bm_pickle/bm_pickle_pure_python.toml | 13 + .../_benchmarks/bm_pickle/bm_unpickle.toml | 13 + .../bm_pickle/bm_unpickle_list.toml | 13 + .../bm_pickle/bm_unpickle_pure_python.toml | 13 + .../_benchmarks/bm_pickle/pyproject.toml | 13 + .../_benchmarks/bm_pidigits/pyproject.toml | 12 + .../_benchmarks/bm_pyflate/pyproject.toml | 12 + .../bm_python_startup_no_site.toml | 13 + .../bm_python_startup/pyproject.toml | 12 + .../_benchmarks/bm_raytrace/pyproject.toml | 12 + .../bm_regex_compile/bm_regex_effbot.py | 1 + .../bm_regex_compile/bm_regex_v8.py | 1 + .../bm_regex_compile/pyproject.toml | 12 + .../_benchmarks/bm_regex_dna/pyproject.toml | 12 + .../bm_regex_effbot/pyproject.toml | 12 + .../_benchmarks/bm_regex_v8/pyproject.toml | 12 + .../_benchmarks/bm_richards/pyproject.toml | 12 + .../_benchmarks/bm_scimark/pyproject.toml | 12 + .../bm_spectral_norm/pyproject.toml | 12 + .../bm_sqlalchemy_declarative/pyproject.toml | 12 + .../bm_sqlalchemy_imperative/pyproject.toml | 12 + .../bm_sqlite_synth/pyproject.toml | 12 + .../_benchmarks/bm_sympy/pyproject.toml | 12 + .../_benchmarks/bm_telco/pyproject.toml | 12 + .../bm_tornado_http/pyproject.toml | 12 + .../bm_unpack_sequence/pyproject.toml | 12 + .../_benchmarks/bm_xml_etree/pyproject.toml | 12 + pyperformance/_utils/__init__.py | 10 + pyperformance/_utils/_fs.py | 15 + pyperformance/_utils/_platform.py | 39 +++ pyperformance/_utils/_pyproject_toml.py | 311 ++++++++++++++++++ pyperformance/_utils/platform.py | 5 + pyperformance/benchmark/_benchmark.py | 76 ++++- pyperformance/benchmark/_metadata.py | 235 +++++++++++++ pyperformance/benchmark/_run.py | 26 ++ pyperformance/benchmarks/__init__.py | 17 +- pyperformance/benchmarks/_manifest.py | 30 +- pyperformance/benchmarks/_selections.py | 5 - pyperformance/cli_run.py | 2 - pyperformance/requirements.in | 32 ++ pyperformance/requirements.txt | 279 +++++++++++++++- pyperformance/run.py | 85 +---- setup.py | 2 +- 71 files changed, 1733 insertions(+), 473 deletions(-) delete mode 100644 pyperformance/_benchmarks/__init__.py create mode 100644 pyperformance/_benchmarks/base.toml create mode 100644 pyperformance/_benchmarks/bm_2to3/pyproject.toml create mode 100644 pyperformance/_benchmarks/bm_chameleon/pyproject.toml create mode 100644 pyperformance/_benchmarks/bm_chaos/pyproject.toml create mode 100644 pyperformance/_benchmarks/bm_crypto_pyaes/pyproject.toml create mode 100644 pyperformance/_benchmarks/bm_deltablue/pyproject.toml create mode 100644 pyperformance/_benchmarks/bm_django_template/pyproject.toml create mode 100644 pyperformance/_benchmarks/bm_dulwich_log/pyproject.toml create mode 100644 pyperformance/_benchmarks/bm_fannkuch/pyproject.toml create mode 100644 pyperformance/_benchmarks/bm_float/pyproject.toml create mode 100644 pyperformance/_benchmarks/bm_genshi/pyproject.toml create mode 100644 pyperformance/_benchmarks/bm_go/pyproject.toml create mode 100644 pyperformance/_benchmarks/bm_hexiom/pyproject.toml create mode 100644 pyperformance/_benchmarks/bm_hg_startup/pyproject.toml create mode 100644 pyperformance/_benchmarks/bm_html5lib/pyproject.toml create mode 100644 pyperformance/_benchmarks/bm_json_dumps/pyproject.toml create mode 100644 pyperformance/_benchmarks/bm_json_loads/pyproject.toml create mode 100644 pyperformance/_benchmarks/bm_logging/pyproject.toml create mode 100644 pyperformance/_benchmarks/bm_mako/pyproject.toml create mode 100644 pyperformance/_benchmarks/bm_mdp/pyproject.toml create mode 100644 pyperformance/_benchmarks/bm_meteor_contest/pyproject.toml create mode 100644 pyperformance/_benchmarks/bm_nbody/pyproject.toml create mode 100644 pyperformance/_benchmarks/bm_nqueens/pyproject.toml create mode 100644 pyperformance/_benchmarks/bm_pathlib/pyproject.toml create mode 100644 pyperformance/_benchmarks/bm_pickle/bm_pickle_dict.toml create mode 100644 pyperformance/_benchmarks/bm_pickle/bm_pickle_list.toml create mode 100644 pyperformance/_benchmarks/bm_pickle/bm_pickle_pure_python.toml create mode 100644 pyperformance/_benchmarks/bm_pickle/bm_unpickle.toml create mode 100644 pyperformance/_benchmarks/bm_pickle/bm_unpickle_list.toml create mode 100644 pyperformance/_benchmarks/bm_pickle/bm_unpickle_pure_python.toml create mode 100644 pyperformance/_benchmarks/bm_pickle/pyproject.toml create mode 100644 pyperformance/_benchmarks/bm_pidigits/pyproject.toml create mode 100644 pyperformance/_benchmarks/bm_pyflate/pyproject.toml create mode 100644 pyperformance/_benchmarks/bm_python_startup/bm_python_startup_no_site.toml create mode 100644 pyperformance/_benchmarks/bm_python_startup/pyproject.toml create mode 100644 pyperformance/_benchmarks/bm_raytrace/pyproject.toml create mode 120000 pyperformance/_benchmarks/bm_regex_compile/bm_regex_effbot.py create mode 120000 pyperformance/_benchmarks/bm_regex_compile/bm_regex_v8.py create mode 100644 pyperformance/_benchmarks/bm_regex_compile/pyproject.toml create mode 100644 pyperformance/_benchmarks/bm_regex_dna/pyproject.toml create mode 100644 pyperformance/_benchmarks/bm_regex_effbot/pyproject.toml create mode 100644 pyperformance/_benchmarks/bm_regex_v8/pyproject.toml create mode 100644 pyperformance/_benchmarks/bm_richards/pyproject.toml create mode 100644 pyperformance/_benchmarks/bm_scimark/pyproject.toml create mode 100644 pyperformance/_benchmarks/bm_spectral_norm/pyproject.toml create mode 100644 pyperformance/_benchmarks/bm_sqlalchemy_declarative/pyproject.toml create mode 100644 pyperformance/_benchmarks/bm_sqlalchemy_imperative/pyproject.toml create mode 100644 pyperformance/_benchmarks/bm_sqlite_synth/pyproject.toml create mode 100644 pyperformance/_benchmarks/bm_sympy/pyproject.toml create mode 100644 pyperformance/_benchmarks/bm_telco/pyproject.toml create mode 100644 pyperformance/_benchmarks/bm_tornado_http/pyproject.toml create mode 100644 pyperformance/_benchmarks/bm_unpack_sequence/pyproject.toml create mode 100644 pyperformance/_benchmarks/bm_xml_etree/pyproject.toml create mode 100644 pyperformance/_utils/_pyproject_toml.py create mode 100644 pyperformance/_utils/platform.py create mode 100644 pyperformance/benchmark/_metadata.py create mode 100644 pyperformance/benchmark/_run.py diff --git a/pyperformance/_benchmarks/MANIFEST b/pyperformance/_benchmarks/MANIFEST index 4c76dc1e..afcdd121 100644 --- a/pyperformance/_benchmarks/MANIFEST +++ b/pyperformance/_benchmarks/MANIFEST @@ -1,58 +1,58 @@ [benchmarks] name version origin metafile -2to3 - - - -chameleon - - - -chaos - - - -crypto_pyaes - - - -deltablue - - - -django_template - - - -dulwich_log - - - -fannkuch - - - -float - - - -genshi - - - -go - - - -hexiom - - - +2to3 - - +chameleon - - +chaos - - +crypto_pyaes - - +deltablue - - +django_template - - +dulwich_log - - +fannkuch - - +float - - +genshi - - +go - - +hexiom - - # FIXME: this benchmark fails with: # Unable to get the program 'hg' from the virtual environment -#hg_startup - - - -#html5lib - - - -json_dumps - - - -json_loads - - - -logging - - - -mako - - - -mdp - - - -meteor_contest - - - -nbody - - - -nqueens - - - -pathlib - - - -pickle - - - -pickle_dict - - - -pickle_list - - - -pickle_pure_python - - - -pidigits - - - -pyflate - - - -python_startup - - - -python_startup_no_site - - - -raytrace - - - -regex_compile - - - -regex_dna - - - -regex_effbot - - - -regex_v8 - - - -richards - - - -scimark - - - -spectral_norm - - - -sqlalchemy_declarative - - - -sqlalchemy_imperative - - - -sqlite_synth - - - -sympy - - - -telco - - - -tornado_http - - - -unpack_sequence - - - -unpickle - - - -unpickle_list - - - -unpickle_pure_python - - - -xml_etree - - - +#hg_startup - - +#html5lib - - +json_dumps - - +json_loads - - +logging - - +mako - - +mdp - - +meteor_contest - - +nbody - - +nqueens - - +pathlib - - +pickle - - +pickle_dict - - +pickle_list - - +pickle_pure_python - - +pidigits - - +pyflate - - +python_startup - - +python_startup_no_site - - +raytrace - - +regex_compile - - +regex_dna - - +regex_effbot - - +regex_v8 - - +richards - - +scimark - - +spectral_norm - - +sqlalchemy_declarative - - +sqlalchemy_imperative - - +sqlite_synth - - +sympy - - +telco - - +tornado_http - - +unpack_sequence - - +unpickle - - +unpickle_list - - +unpickle_pure_python - - +xml_etree - - [group default] diff --git a/pyperformance/_benchmarks/__init__.py b/pyperformance/_benchmarks/__init__.py deleted file mode 100644 index 4671d89d..00000000 --- a/pyperformance/_benchmarks/__init__.py +++ /dev/null @@ -1,303 +0,0 @@ -from ..run import run_perf_script - - -# Benchmark groups. The "default" group is what's run if no -b option is -# specified. -DEFAULT_GROUP = [ - '2to3', - 'chameleon', - 'chaos', - 'crypto_pyaes', - 'deltablue', - 'django_template', - 'dulwich_log', - 'fannkuch', - 'float', - - # FIXME: this benchmark fails with: - # TypeError: code() argument 15 must be bytes, not tuple - # 'genshi', - - 'go', - 'hexiom', - - # FIXME: this benchmark fails with: - # Unable to get the program 'hg' from the virtual environment - # 'hg_startup', - - 'html5lib', - 'json_dumps', - 'json_loads', - 'logging', - 'mako', - 'meteor_contest', - 'nbody', - 'nqueens', - 'pathlib', - 'pickle', - 'pickle_dict', - 'pickle_list', - 'pickle_pure_python', - 'pidigits', - 'pyflate', - 'python_startup', - 'python_startup_no_site', - 'raytrace', - 'regex_compile', - 'regex_dna', - 'regex_effbot', - 'regex_v8', - 'richards', - 'scimark', - 'spectral_norm', - 'sqlalchemy_declarative', - 'sqlalchemy_imperative', - 'sqlite_synth', - 'sympy', - 'telco', - 'tornado_http', - 'unpack_sequence', - 'unpickle', - 'unpickle_list', - 'unpickle_pure_python', - 'xml_etree', -] - -BENCH_GROUPS = { - # get_benchmarks() creates an "all" group which includes every benchmark - # pyperformance knows about. - "default": DEFAULT_GROUP, - "startup": ["normal_startup", "startup_nosite", - "hg_startup"], - "regex": ["regex_v8", "regex_effbot", "regex_compile", - "regex_dna"], - "serialize": ["pickle_pure_python", "unpickle_pure_python", # Not for Python 3 - "pickle", "unpickle", - "xml_etree", - "json_dumps", "json_loads"], - "apps": ["2to3", "chameleon", "html5lib", "tornado_http"], - "math": ["float", "nbody", "pidigits"], - "template": ["django_template", "mako"], -} - - -def BM_2to3(python, options): - return run_perf_script(python, options, "2to3") - - -# def BM_hg_startup(python, options): -# return run_perf_script(python, options, "hg_startup") - - -def BM_Chameleon(python, options): - return run_perf_script(python, options, "chameleon") - - -def BM_Tornado_Http(python, options): - return run_perf_script(python, options, "tornado_http") - - -def BM_Django_Template(python, options): - return run_perf_script(python, options, "django_template") - - -def BM_Float(python, options): - return run_perf_script(python, options, "float") - - -def BM_mako(python, options): - return run_perf_script(python, options, "mako") - - -def BM_pathlib(python, options): - return run_perf_script(python, options, "pathlib") - - -def pickle_benchmark(python, options, *extra_args): - return run_perf_script(python, options, "pickle", - extra_args=list(extra_args)) - - -def BM_pickle(python, options): - return pickle_benchmark(python, options, "pickle") - - -def BM_unpickle(python, options): - return pickle_benchmark(python, options, "unpickle") - - -def BM_pickle_list(python, options): - return pickle_benchmark(python, options, "pickle_list") - - -def BM_pickle_dict(python, options): - return pickle_benchmark(python, options, "pickle_dict") - - -def BM_unpickle_list(python, options): - return pickle_benchmark(python, options, "unpickle_list") - - -def BM_pickle_pure_python(python, options): - return pickle_benchmark(python, options, "--pure-python", "pickle") - - -def BM_unpickle_pure_python(python, options): - return pickle_benchmark(python, options, "--pure-python", "unpickle") - - -def BM_xml_etree(python, options): - return run_perf_script(python, options, "xml_etree") - - -def BM_json_loads(python, options): - return run_perf_script(python, options, "json_loads") - - -def BM_json_dumps(python, options): - return run_perf_script(python, options, "json_dumps") - - -def BM_NQueens(python, options): - return run_perf_script(python, options, "nqueens") - - -def BM_Chaos(python, options): - return run_perf_script(python, options, "chaos") - - -def BM_Fannkuch(python, options): - return run_perf_script(python, options, "fannkuch") - - -def BM_Go(python, options): - return run_perf_script(python, options, "go") - - -def BM_Meteor_Contest(python, options): - return run_perf_script(python, options, "meteor_contest") - - -def BM_Spectral_Norm(python, options): - return run_perf_script(python, options, "spectral_norm") - - -def BM_Telco(python, options): - return run_perf_script(python, options, "telco") - - -def BM_hexiom(python, options): - return run_perf_script(python, options, "hexiom") - - -def BM_raytrace(python, options): - return run_perf_script(python, options, "raytrace") - - -def BM_logging(python, options): - return run_perf_script(python, options, "logging") - - -def BM_python_startup(python, options): - return run_perf_script(python, options, "python_startup") - - -def BM_python_startup_no_site(python, options): - return run_perf_script(python, options, "python_startup", - extra_args=["--no-site"]) - - -def BM_regex_v8(python, options): - return run_perf_script(python, options, "regex_v8") - - -def BM_regex_effbot(python, options): - return run_perf_script(python, options, "regex_effbot") - - -def BM_regex_compile(python, options): - return run_perf_script(python, options, "regex_compile") - - -def BM_regex_dna(python, options): - return run_perf_script(python, options, "regex_dna") - - -def BM_unpack_sequence(python, options): - return run_perf_script(python, options, "unpack_sequence") - - -def BM_nbody(python, options): - return run_perf_script(python, options, "nbody") - - -# def BM_html5lib(python, options): -# return run_perf_script(python, options, "html5lib") - - -def BM_richards(python, options): - return run_perf_script(python, options, "richards") - - -def BM_pidigits(python, options): - return run_perf_script(python, options, "pidigits") - - -def BM_crypto_pyaes(python, options): - return run_perf_script(python, options, "crypto_pyaes") - - -def BM_sympy(python, options): - return run_perf_script(python, options, "sympy") - - -def BM_deltablue(python, options): - return run_perf_script(python, options, "deltablue") - - -def BM_scimark(python, options): - return run_perf_script(python, options, "scimark") - - -def BM_dulwich_log(python, options): - return run_perf_script(python, options, "dulwich_log") - - -def BM_pyflate(python, options): - return run_perf_script(python, options, "pyflate") - - -def BM_sqlite_synth(python, options): - return run_perf_script(python, options, "sqlite_synth") - - -# def BM_genshi(python, options): -# return run_perf_script(python, options, "genshi") - - -def BM_sqlalchemy_declarative(python, options): - return run_perf_script(python, options, "sqlalchemy_declarative") - - -def BM_sqlalchemy_imperative(python, options): - return run_perf_script(python, options, "sqlalchemy_imperative") - - -def BM_mdp(python, options): - return run_perf_script(python, options, "mdp") - - -# End benchmarks, begin main entry point support. - - -def get_benchmarks(): - bench_funcs = dict((name[3:].lower(), func) - for name, func in globals().items() - if name.startswith("BM_")) - - bench_groups = BENCH_GROUPS.copy() - - # create the 'all' group - bench_groups["all"] = sorted(bench_funcs) - - return (bench_funcs, bench_groups) diff --git a/pyperformance/_benchmarks/base.toml b/pyperformance/_benchmarks/base.toml new file mode 100644 index 00000000..37b53c10 --- /dev/null +++ b/pyperformance/_benchmarks/base.toml @@ -0,0 +1,24 @@ +[project] +#description = "a pyperformance benchmark" +#readme = "README.rst" +#requires-python = ">=3.8" +#license = {file = "LICENSE.txt"} + +dependencies = [ + "pyperf", +] + +urls = {repository = "https://github.com/python/pyperformance"} + +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +metabase = "" +#metafile = "" +#tags = [] +#prescript = "" +#runscript = "" +#extra_opts = "" diff --git a/pyperformance/_benchmarks/bm_2to3/pyproject.toml b/pyperformance/_benchmarks/bm_2to3/pyproject.toml new file mode 100644 index 00000000..bf447649 --- /dev/null +++ b/pyperformance/_benchmarks/bm_2to3/pyproject.toml @@ -0,0 +1,12 @@ +[project] +#name = "bm_" + +# XXX This should be inherited from metabase. +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +# "name" is set automatically. +metabase = ".." diff --git a/pyperformance/_benchmarks/bm_chameleon/pyproject.toml b/pyperformance/_benchmarks/bm_chameleon/pyproject.toml new file mode 100644 index 00000000..bf447649 --- /dev/null +++ b/pyperformance/_benchmarks/bm_chameleon/pyproject.toml @@ -0,0 +1,12 @@ +[project] +#name = "bm_" + +# XXX This should be inherited from metabase. +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +# "name" is set automatically. +metabase = ".." diff --git a/pyperformance/_benchmarks/bm_chaos/pyproject.toml b/pyperformance/_benchmarks/bm_chaos/pyproject.toml new file mode 100644 index 00000000..bf447649 --- /dev/null +++ b/pyperformance/_benchmarks/bm_chaos/pyproject.toml @@ -0,0 +1,12 @@ +[project] +#name = "bm_" + +# XXX This should be inherited from metabase. +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +# "name" is set automatically. +metabase = ".." diff --git a/pyperformance/_benchmarks/bm_crypto_pyaes/pyproject.toml b/pyperformance/_benchmarks/bm_crypto_pyaes/pyproject.toml new file mode 100644 index 00000000..bf447649 --- /dev/null +++ b/pyperformance/_benchmarks/bm_crypto_pyaes/pyproject.toml @@ -0,0 +1,12 @@ +[project] +#name = "bm_" + +# XXX This should be inherited from metabase. +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +# "name" is set automatically. +metabase = ".." diff --git a/pyperformance/_benchmarks/bm_deltablue/pyproject.toml b/pyperformance/_benchmarks/bm_deltablue/pyproject.toml new file mode 100644 index 00000000..bf447649 --- /dev/null +++ b/pyperformance/_benchmarks/bm_deltablue/pyproject.toml @@ -0,0 +1,12 @@ +[project] +#name = "bm_" + +# XXX This should be inherited from metabase. +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +# "name" is set automatically. +metabase = ".." diff --git a/pyperformance/_benchmarks/bm_django_template/pyproject.toml b/pyperformance/_benchmarks/bm_django_template/pyproject.toml new file mode 100644 index 00000000..bf447649 --- /dev/null +++ b/pyperformance/_benchmarks/bm_django_template/pyproject.toml @@ -0,0 +1,12 @@ +[project] +#name = "bm_" + +# XXX This should be inherited from metabase. +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +# "name" is set automatically. +metabase = ".." diff --git a/pyperformance/_benchmarks/bm_dulwich_log/pyproject.toml b/pyperformance/_benchmarks/bm_dulwich_log/pyproject.toml new file mode 100644 index 00000000..bf447649 --- /dev/null +++ b/pyperformance/_benchmarks/bm_dulwich_log/pyproject.toml @@ -0,0 +1,12 @@ +[project] +#name = "bm_" + +# XXX This should be inherited from metabase. +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +# "name" is set automatically. +metabase = ".." diff --git a/pyperformance/_benchmarks/bm_fannkuch/pyproject.toml b/pyperformance/_benchmarks/bm_fannkuch/pyproject.toml new file mode 100644 index 00000000..bf447649 --- /dev/null +++ b/pyperformance/_benchmarks/bm_fannkuch/pyproject.toml @@ -0,0 +1,12 @@ +[project] +#name = "bm_" + +# XXX This should be inherited from metabase. +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +# "name" is set automatically. +metabase = ".." diff --git a/pyperformance/_benchmarks/bm_float/pyproject.toml b/pyperformance/_benchmarks/bm_float/pyproject.toml new file mode 100644 index 00000000..bf447649 --- /dev/null +++ b/pyperformance/_benchmarks/bm_float/pyproject.toml @@ -0,0 +1,12 @@ +[project] +#name = "bm_" + +# XXX This should be inherited from metabase. +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +# "name" is set automatically. +metabase = ".." diff --git a/pyperformance/_benchmarks/bm_genshi/pyproject.toml b/pyperformance/_benchmarks/bm_genshi/pyproject.toml new file mode 100644 index 00000000..bf447649 --- /dev/null +++ b/pyperformance/_benchmarks/bm_genshi/pyproject.toml @@ -0,0 +1,12 @@ +[project] +#name = "bm_" + +# XXX This should be inherited from metabase. +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +# "name" is set automatically. +metabase = ".." diff --git a/pyperformance/_benchmarks/bm_go/pyproject.toml b/pyperformance/_benchmarks/bm_go/pyproject.toml new file mode 100644 index 00000000..bf447649 --- /dev/null +++ b/pyperformance/_benchmarks/bm_go/pyproject.toml @@ -0,0 +1,12 @@ +[project] +#name = "bm_" + +# XXX This should be inherited from metabase. +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +# "name" is set automatically. +metabase = ".." diff --git a/pyperformance/_benchmarks/bm_hexiom/pyproject.toml b/pyperformance/_benchmarks/bm_hexiom/pyproject.toml new file mode 100644 index 00000000..bf447649 --- /dev/null +++ b/pyperformance/_benchmarks/bm_hexiom/pyproject.toml @@ -0,0 +1,12 @@ +[project] +#name = "bm_" + +# XXX This should be inherited from metabase. +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +# "name" is set automatically. +metabase = ".." diff --git a/pyperformance/_benchmarks/bm_hg_startup/pyproject.toml b/pyperformance/_benchmarks/bm_hg_startup/pyproject.toml new file mode 100644 index 00000000..bf447649 --- /dev/null +++ b/pyperformance/_benchmarks/bm_hg_startup/pyproject.toml @@ -0,0 +1,12 @@ +[project] +#name = "bm_" + +# XXX This should be inherited from metabase. +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +# "name" is set automatically. +metabase = ".." diff --git a/pyperformance/_benchmarks/bm_html5lib/pyproject.toml b/pyperformance/_benchmarks/bm_html5lib/pyproject.toml new file mode 100644 index 00000000..bf447649 --- /dev/null +++ b/pyperformance/_benchmarks/bm_html5lib/pyproject.toml @@ -0,0 +1,12 @@ +[project] +#name = "bm_" + +# XXX This should be inherited from metabase. +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +# "name" is set automatically. +metabase = ".." diff --git a/pyperformance/_benchmarks/bm_json_dumps/pyproject.toml b/pyperformance/_benchmarks/bm_json_dumps/pyproject.toml new file mode 100644 index 00000000..bf447649 --- /dev/null +++ b/pyperformance/_benchmarks/bm_json_dumps/pyproject.toml @@ -0,0 +1,12 @@ +[project] +#name = "bm_" + +# XXX This should be inherited from metabase. +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +# "name" is set automatically. +metabase = ".." diff --git a/pyperformance/_benchmarks/bm_json_loads/pyproject.toml b/pyperformance/_benchmarks/bm_json_loads/pyproject.toml new file mode 100644 index 00000000..bf447649 --- /dev/null +++ b/pyperformance/_benchmarks/bm_json_loads/pyproject.toml @@ -0,0 +1,12 @@ +[project] +#name = "bm_" + +# XXX This should be inherited from metabase. +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +# "name" is set automatically. +metabase = ".." diff --git a/pyperformance/_benchmarks/bm_logging/pyproject.toml b/pyperformance/_benchmarks/bm_logging/pyproject.toml new file mode 100644 index 00000000..bf447649 --- /dev/null +++ b/pyperformance/_benchmarks/bm_logging/pyproject.toml @@ -0,0 +1,12 @@ +[project] +#name = "bm_" + +# XXX This should be inherited from metabase. +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +# "name" is set automatically. +metabase = ".." diff --git a/pyperformance/_benchmarks/bm_mako/pyproject.toml b/pyperformance/_benchmarks/bm_mako/pyproject.toml new file mode 100644 index 00000000..bf447649 --- /dev/null +++ b/pyperformance/_benchmarks/bm_mako/pyproject.toml @@ -0,0 +1,12 @@ +[project] +#name = "bm_" + +# XXX This should be inherited from metabase. +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +# "name" is set automatically. +metabase = ".." diff --git a/pyperformance/_benchmarks/bm_mdp/pyproject.toml b/pyperformance/_benchmarks/bm_mdp/pyproject.toml new file mode 100644 index 00000000..bf447649 --- /dev/null +++ b/pyperformance/_benchmarks/bm_mdp/pyproject.toml @@ -0,0 +1,12 @@ +[project] +#name = "bm_" + +# XXX This should be inherited from metabase. +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +# "name" is set automatically. +metabase = ".." diff --git a/pyperformance/_benchmarks/bm_meteor_contest/pyproject.toml b/pyperformance/_benchmarks/bm_meteor_contest/pyproject.toml new file mode 100644 index 00000000..bf447649 --- /dev/null +++ b/pyperformance/_benchmarks/bm_meteor_contest/pyproject.toml @@ -0,0 +1,12 @@ +[project] +#name = "bm_" + +# XXX This should be inherited from metabase. +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +# "name" is set automatically. +metabase = ".." diff --git a/pyperformance/_benchmarks/bm_nbody/pyproject.toml b/pyperformance/_benchmarks/bm_nbody/pyproject.toml new file mode 100644 index 00000000..bf447649 --- /dev/null +++ b/pyperformance/_benchmarks/bm_nbody/pyproject.toml @@ -0,0 +1,12 @@ +[project] +#name = "bm_" + +# XXX This should be inherited from metabase. +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +# "name" is set automatically. +metabase = ".." diff --git a/pyperformance/_benchmarks/bm_nqueens/pyproject.toml b/pyperformance/_benchmarks/bm_nqueens/pyproject.toml new file mode 100644 index 00000000..bf447649 --- /dev/null +++ b/pyperformance/_benchmarks/bm_nqueens/pyproject.toml @@ -0,0 +1,12 @@ +[project] +#name = "bm_" + +# XXX This should be inherited from metabase. +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +# "name" is set automatically. +metabase = ".." diff --git a/pyperformance/_benchmarks/bm_pathlib/pyproject.toml b/pyperformance/_benchmarks/bm_pathlib/pyproject.toml new file mode 100644 index 00000000..bf447649 --- /dev/null +++ b/pyperformance/_benchmarks/bm_pathlib/pyproject.toml @@ -0,0 +1,12 @@ +[project] +#name = "bm_" + +# XXX This should be inherited from metabase. +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +# "name" is set automatically. +metabase = ".." diff --git a/pyperformance/_benchmarks/bm_pickle/bm_pickle_dict.toml b/pyperformance/_benchmarks/bm_pickle/bm_pickle_dict.toml new file mode 100644 index 00000000..dbc4f2dd --- /dev/null +++ b/pyperformance/_benchmarks/bm_pickle/bm_pickle_dict.toml @@ -0,0 +1,13 @@ +[project] +#name = "bm_" + +# XXX This should be inherited from metabase. +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +# "name" is set automatically. +# "metabase" is set automatically +extra_opts = ["pickle_dict"] diff --git a/pyperformance/_benchmarks/bm_pickle/bm_pickle_list.toml b/pyperformance/_benchmarks/bm_pickle/bm_pickle_list.toml new file mode 100644 index 00000000..04bc23d5 --- /dev/null +++ b/pyperformance/_benchmarks/bm_pickle/bm_pickle_list.toml @@ -0,0 +1,13 @@ +[project] +#name = "bm_" + +# XXX This should be inherited from metabase. +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +# "name" is set automatically. +# "metabase" is set automatically +extra_opts = ["pickle_list"] diff --git a/pyperformance/_benchmarks/bm_pickle/bm_pickle_pure_python.toml b/pyperformance/_benchmarks/bm_pickle/bm_pickle_pure_python.toml new file mode 100644 index 00000000..a36e4c27 --- /dev/null +++ b/pyperformance/_benchmarks/bm_pickle/bm_pickle_pure_python.toml @@ -0,0 +1,13 @@ +[project] +#name = "bm_" + +# XXX This should be inherited from metabase. +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +# "name" is set automatically. +# "metabase" is set automatically +extra_opts = ["--pure-python", "pickle"] diff --git a/pyperformance/_benchmarks/bm_pickle/bm_unpickle.toml b/pyperformance/_benchmarks/bm_pickle/bm_unpickle.toml new file mode 100644 index 00000000..fde38133 --- /dev/null +++ b/pyperformance/_benchmarks/bm_pickle/bm_unpickle.toml @@ -0,0 +1,13 @@ +[project] +#name = "bm_" + +# XXX This should be inherited from metabase. +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +# "name" is set automatically. +# "metabase" is set automatically +extra_opts = ["unpickle"] diff --git a/pyperformance/_benchmarks/bm_pickle/bm_unpickle_list.toml b/pyperformance/_benchmarks/bm_pickle/bm_unpickle_list.toml new file mode 100644 index 00000000..074cd744 --- /dev/null +++ b/pyperformance/_benchmarks/bm_pickle/bm_unpickle_list.toml @@ -0,0 +1,13 @@ +[project] +#name = "bm_" + +# XXX This should be inherited from metabase. +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +# "name" is set automatically. +# "metabase" is set automatically +extra_opts = ["unpickle_list"] diff --git a/pyperformance/_benchmarks/bm_pickle/bm_unpickle_pure_python.toml b/pyperformance/_benchmarks/bm_pickle/bm_unpickle_pure_python.toml new file mode 100644 index 00000000..d364229e --- /dev/null +++ b/pyperformance/_benchmarks/bm_pickle/bm_unpickle_pure_python.toml @@ -0,0 +1,13 @@ +[project] +#name = "bm_" + +# XXX This should be inherited from metabase. +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +# "name" is set automatically. +# "metabase" is set automatically +extra_opts = ["--pure-python", "unpickle"] diff --git a/pyperformance/_benchmarks/bm_pickle/pyproject.toml b/pyperformance/_benchmarks/bm_pickle/pyproject.toml new file mode 100644 index 00000000..1106ff4f --- /dev/null +++ b/pyperformance/_benchmarks/bm_pickle/pyproject.toml @@ -0,0 +1,13 @@ +[project] +#name = "bm_" + +# XXX This should be inherited from metabase. +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +# "name" is set automatically. +metabase = ".." +extra_opts = ["pickle"] diff --git a/pyperformance/_benchmarks/bm_pidigits/pyproject.toml b/pyperformance/_benchmarks/bm_pidigits/pyproject.toml new file mode 100644 index 00000000..bf447649 --- /dev/null +++ b/pyperformance/_benchmarks/bm_pidigits/pyproject.toml @@ -0,0 +1,12 @@ +[project] +#name = "bm_" + +# XXX This should be inherited from metabase. +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +# "name" is set automatically. +metabase = ".." diff --git a/pyperformance/_benchmarks/bm_pyflate/pyproject.toml b/pyperformance/_benchmarks/bm_pyflate/pyproject.toml new file mode 100644 index 00000000..bf447649 --- /dev/null +++ b/pyperformance/_benchmarks/bm_pyflate/pyproject.toml @@ -0,0 +1,12 @@ +[project] +#name = "bm_" + +# XXX This should be inherited from metabase. +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +# "name" is set automatically. +metabase = ".." diff --git a/pyperformance/_benchmarks/bm_python_startup/bm_python_startup_no_site.toml b/pyperformance/_benchmarks/bm_python_startup/bm_python_startup_no_site.toml new file mode 100644 index 00000000..65049d40 --- /dev/null +++ b/pyperformance/_benchmarks/bm_python_startup/bm_python_startup_no_site.toml @@ -0,0 +1,13 @@ +[project] +#name = "bm_" + +# XXX This should be inherited from metabase. +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +# "name" is set automatically. +# "metabase" is set automatically +extra_opts = ["--no-site"] diff --git a/pyperformance/_benchmarks/bm_python_startup/pyproject.toml b/pyperformance/_benchmarks/bm_python_startup/pyproject.toml new file mode 100644 index 00000000..bf447649 --- /dev/null +++ b/pyperformance/_benchmarks/bm_python_startup/pyproject.toml @@ -0,0 +1,12 @@ +[project] +#name = "bm_" + +# XXX This should be inherited from metabase. +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +# "name" is set automatically. +metabase = ".." diff --git a/pyperformance/_benchmarks/bm_raytrace/pyproject.toml b/pyperformance/_benchmarks/bm_raytrace/pyproject.toml new file mode 100644 index 00000000..bf447649 --- /dev/null +++ b/pyperformance/_benchmarks/bm_raytrace/pyproject.toml @@ -0,0 +1,12 @@ +[project] +#name = "bm_" + +# XXX This should be inherited from metabase. +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +# "name" is set automatically. +metabase = ".." diff --git a/pyperformance/_benchmarks/bm_regex_compile/bm_regex_effbot.py b/pyperformance/_benchmarks/bm_regex_compile/bm_regex_effbot.py new file mode 120000 index 00000000..99624945 --- /dev/null +++ b/pyperformance/_benchmarks/bm_regex_compile/bm_regex_effbot.py @@ -0,0 +1 @@ +../bm_regex_effbot/run_benchmark.py \ No newline at end of file diff --git a/pyperformance/_benchmarks/bm_regex_compile/bm_regex_v8.py b/pyperformance/_benchmarks/bm_regex_compile/bm_regex_v8.py new file mode 120000 index 00000000..fe5f5259 --- /dev/null +++ b/pyperformance/_benchmarks/bm_regex_compile/bm_regex_v8.py @@ -0,0 +1 @@ +../bm_regex_v8/run_benchmark.py \ No newline at end of file diff --git a/pyperformance/_benchmarks/bm_regex_compile/pyproject.toml b/pyperformance/_benchmarks/bm_regex_compile/pyproject.toml new file mode 100644 index 00000000..bf447649 --- /dev/null +++ b/pyperformance/_benchmarks/bm_regex_compile/pyproject.toml @@ -0,0 +1,12 @@ +[project] +#name = "bm_" + +# XXX This should be inherited from metabase. +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +# "name" is set automatically. +metabase = ".." diff --git a/pyperformance/_benchmarks/bm_regex_dna/pyproject.toml b/pyperformance/_benchmarks/bm_regex_dna/pyproject.toml new file mode 100644 index 00000000..bf447649 --- /dev/null +++ b/pyperformance/_benchmarks/bm_regex_dna/pyproject.toml @@ -0,0 +1,12 @@ +[project] +#name = "bm_" + +# XXX This should be inherited from metabase. +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +# "name" is set automatically. +metabase = ".." diff --git a/pyperformance/_benchmarks/bm_regex_effbot/pyproject.toml b/pyperformance/_benchmarks/bm_regex_effbot/pyproject.toml new file mode 100644 index 00000000..bf447649 --- /dev/null +++ b/pyperformance/_benchmarks/bm_regex_effbot/pyproject.toml @@ -0,0 +1,12 @@ +[project] +#name = "bm_" + +# XXX This should be inherited from metabase. +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +# "name" is set automatically. +metabase = ".." diff --git a/pyperformance/_benchmarks/bm_regex_v8/pyproject.toml b/pyperformance/_benchmarks/bm_regex_v8/pyproject.toml new file mode 100644 index 00000000..bf447649 --- /dev/null +++ b/pyperformance/_benchmarks/bm_regex_v8/pyproject.toml @@ -0,0 +1,12 @@ +[project] +#name = "bm_" + +# XXX This should be inherited from metabase. +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +# "name" is set automatically. +metabase = ".." diff --git a/pyperformance/_benchmarks/bm_richards/pyproject.toml b/pyperformance/_benchmarks/bm_richards/pyproject.toml new file mode 100644 index 00000000..bf447649 --- /dev/null +++ b/pyperformance/_benchmarks/bm_richards/pyproject.toml @@ -0,0 +1,12 @@ +[project] +#name = "bm_" + +# XXX This should be inherited from metabase. +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +# "name" is set automatically. +metabase = ".." diff --git a/pyperformance/_benchmarks/bm_scimark/pyproject.toml b/pyperformance/_benchmarks/bm_scimark/pyproject.toml new file mode 100644 index 00000000..bf447649 --- /dev/null +++ b/pyperformance/_benchmarks/bm_scimark/pyproject.toml @@ -0,0 +1,12 @@ +[project] +#name = "bm_" + +# XXX This should be inherited from metabase. +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +# "name" is set automatically. +metabase = ".." diff --git a/pyperformance/_benchmarks/bm_spectral_norm/pyproject.toml b/pyperformance/_benchmarks/bm_spectral_norm/pyproject.toml new file mode 100644 index 00000000..bf447649 --- /dev/null +++ b/pyperformance/_benchmarks/bm_spectral_norm/pyproject.toml @@ -0,0 +1,12 @@ +[project] +#name = "bm_" + +# XXX This should be inherited from metabase. +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +# "name" is set automatically. +metabase = ".." diff --git a/pyperformance/_benchmarks/bm_sqlalchemy_declarative/pyproject.toml b/pyperformance/_benchmarks/bm_sqlalchemy_declarative/pyproject.toml new file mode 100644 index 00000000..bf447649 --- /dev/null +++ b/pyperformance/_benchmarks/bm_sqlalchemy_declarative/pyproject.toml @@ -0,0 +1,12 @@ +[project] +#name = "bm_" + +# XXX This should be inherited from metabase. +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +# "name" is set automatically. +metabase = ".." diff --git a/pyperformance/_benchmarks/bm_sqlalchemy_imperative/pyproject.toml b/pyperformance/_benchmarks/bm_sqlalchemy_imperative/pyproject.toml new file mode 100644 index 00000000..bf447649 --- /dev/null +++ b/pyperformance/_benchmarks/bm_sqlalchemy_imperative/pyproject.toml @@ -0,0 +1,12 @@ +[project] +#name = "bm_" + +# XXX This should be inherited from metabase. +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +# "name" is set automatically. +metabase = ".." diff --git a/pyperformance/_benchmarks/bm_sqlite_synth/pyproject.toml b/pyperformance/_benchmarks/bm_sqlite_synth/pyproject.toml new file mode 100644 index 00000000..bf447649 --- /dev/null +++ b/pyperformance/_benchmarks/bm_sqlite_synth/pyproject.toml @@ -0,0 +1,12 @@ +[project] +#name = "bm_" + +# XXX This should be inherited from metabase. +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +# "name" is set automatically. +metabase = ".." diff --git a/pyperformance/_benchmarks/bm_sympy/pyproject.toml b/pyperformance/_benchmarks/bm_sympy/pyproject.toml new file mode 100644 index 00000000..bf447649 --- /dev/null +++ b/pyperformance/_benchmarks/bm_sympy/pyproject.toml @@ -0,0 +1,12 @@ +[project] +#name = "bm_" + +# XXX This should be inherited from metabase. +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +# "name" is set automatically. +metabase = ".." diff --git a/pyperformance/_benchmarks/bm_telco/pyproject.toml b/pyperformance/_benchmarks/bm_telco/pyproject.toml new file mode 100644 index 00000000..bf447649 --- /dev/null +++ b/pyperformance/_benchmarks/bm_telco/pyproject.toml @@ -0,0 +1,12 @@ +[project] +#name = "bm_" + +# XXX This should be inherited from metabase. +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +# "name" is set automatically. +metabase = ".." diff --git a/pyperformance/_benchmarks/bm_tornado_http/pyproject.toml b/pyperformance/_benchmarks/bm_tornado_http/pyproject.toml new file mode 100644 index 00000000..bf447649 --- /dev/null +++ b/pyperformance/_benchmarks/bm_tornado_http/pyproject.toml @@ -0,0 +1,12 @@ +[project] +#name = "bm_" + +# XXX This should be inherited from metabase. +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +# "name" is set automatically. +metabase = ".." diff --git a/pyperformance/_benchmarks/bm_unpack_sequence/pyproject.toml b/pyperformance/_benchmarks/bm_unpack_sequence/pyproject.toml new file mode 100644 index 00000000..bf447649 --- /dev/null +++ b/pyperformance/_benchmarks/bm_unpack_sequence/pyproject.toml @@ -0,0 +1,12 @@ +[project] +#name = "bm_" + +# XXX This should be inherited from metabase. +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +# "name" is set automatically. +metabase = ".." diff --git a/pyperformance/_benchmarks/bm_xml_etree/pyproject.toml b/pyperformance/_benchmarks/bm_xml_etree/pyproject.toml new file mode 100644 index 00000000..bf447649 --- /dev/null +++ b/pyperformance/_benchmarks/bm_xml_etree/pyproject.toml @@ -0,0 +1,12 @@ +[project] +#name = "bm_" + +# XXX This should be inherited from metabase. +dynamic = [ + "name", + "version", +] + +[tool.pyperformance] +# "name" is set automatically. +metabase = ".." diff --git a/pyperformance/_utils/__init__.py b/pyperformance/_utils/__init__.py index 179d1824..5dff3256 100644 --- a/pyperformance/_utils/__init__.py +++ b/pyperformance/_utils/__init__.py @@ -3,6 +3,8 @@ # aliases from ._fs import ( temporary_file, + check_file, + check_dir, ) from ._misc import ( check_name, @@ -12,4 +14,12 @@ ) from ._platform import ( MS_WINDOWS, + run_command, +) +from ._pyproject_toml import ( + parse_person, + parse_classifier, + parse_entry_point, + parse_pyproject_toml, + load_pyproject_toml, ) diff --git a/pyperformance/_utils/_fs.py b/pyperformance/_utils/_fs.py index 3ef507f8..11bf19bf 100644 --- a/pyperformance/_utils/_fs.py +++ b/pyperformance/_utils/_fs.py @@ -1,6 +1,7 @@ import contextlib import errno import os +import os.path import tempfile @@ -15,3 +16,17 @@ def temporary_file(): except OSError as exc: if exc.errno != errno.ENOENT: raise + + +def check_file(filename): + if not os.path.isabs(filename): + raise ValueError(f'expected absolute path, got {filename!r}') + if not os.path.isfile(filename): + raise ValueError(f'file missing ({filename})') + + +def check_dir(dirname): + if not os.path.isabs(dirname): + raise ValueError(f'expected absolute path, got {dirname!r}') + if not os.path.isdir(dirname): + raise ValueError(f'directory missing ({dirname})') diff --git a/pyperformance/_utils/_platform.py b/pyperformance/_utils/_platform.py index e1b4390c..2c803012 100644 --- a/pyperformance/_utils/_platform.py +++ b/pyperformance/_utils/_platform.py @@ -1,5 +1,44 @@ +import logging +import subprocess import sys MS_WINDOWS = (sys.platform == 'win32') + +def run_command(command, hide_stderr=True): + if hide_stderr: + kw = {'stderr': subprocess.PIPE} + else: + kw = {} + + logging.info("Running `%s`", + " ".join(list(map(str, command)))) + + # Explicitly flush standard streams, required if streams are buffered + # (not TTY) to write lines in the expected order + sys.stdout.flush() + sys.stderr.flush() + + proc = subprocess.Popen(command, + universal_newlines=True, + **kw) + try: + stderr = proc.communicate()[1] + except: # noqa + if proc.stderr: + proc.stderr.close() + try: + proc.kill() + except OSError: + # process already exited + pass + proc.wait() + raise + + if proc.returncode != 0: + if hide_stderr: + sys.stderr.flush() + sys.stderr.write(stderr) + sys.stderr.flush() + raise RuntimeError("Benchmark died") diff --git a/pyperformance/_utils/_pyproject_toml.py b/pyperformance/_utils/_pyproject_toml.py new file mode 100644 index 00000000..7c8edbc0 --- /dev/null +++ b/pyperformance/_utils/_pyproject_toml.py @@ -0,0 +1,311 @@ +import os.path +import re +import urllib.parse + +import packaging.requirements +import packaging.specifiers +import packaging.utils +import packaging.version +import toml + +from ._misc import check_name + + +NAME_RE = re.compile('^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$', re.IGNORECASE) + + +def parse_person(text): + # XXX + return text + + +def parse_classifier(text): + # XXX + return text + + +def parse_entry_point(text): + # See: + # * https://packaging.python.org/specifications/entry-points/#data-model + # * https://www.python.org/dev/peps/pep-0517/#source-trees + module, sep, qualname = text.parition(':') + if all(p.isidentifier() for p in module.split('.')): + if not sep or all(p.isidentifier() for p in qualname.split('.')): + return module, qualname + + raise ValueError(f'invalid entry point {text!r}') + + +def _check_relfile(relname, rootdir, kind): + if os.path.isabs(relname): + raise ValuError(f'{relname!r} is absolute, expected relative') + actual = os.path.join(rootdir, relname) + if kind == 'dir': + if not os.path.isdir(actual): + raise ValueError(f'directory {actual!r} does not exist') + elif kind == 'file': + if not os.path.isfile(actual): + raise ValueError(f'file {actual!r} does not exist') + elif kind == 'any': + if not os.path.exists(actual): + raise ValueError(f'{actual!r} does not exist') + elif kind: + raise NotImplementedError(kind) + + +def _check_file_or_text(table, rootdir, requirefiles, extra=None): + unsupported = set(table) - set(['file', 'text']) - set(extra or ()) + if unsupported: + raise ValueError(f'unsupported license data {table!r}') + + if 'file' in table: + if 'text' in table: + raise ValueError(f'"file" and "text" are mutually exclusive') + kind = 'file' if requirefiles else None + _check_relfile(table['file'], rootdir, kind) + else: + text = table['text'] + # XXX Validate it? + + +def load_pyproject_toml(filename, *, name=None, tools=None, requirefiles=True): + if os.path.isdir(filename): + rootdir = filename + filename = os.path.join(rootdir, 'pyproject.toml') + else: + rootdir = os.path.dirname(filename) + + with open(filename) as infile: + text = infile.read() + data = parse_pyproject_toml(text, rootdir, + tools=tools, + requirefiles=requirefiles, + ) + return data, filename + + +def parse_pyproject_toml(text, rootdir, name=None, *, + tools=None, + requirefiles=True, + ): + data = toml.loads(text) + unused = list(data) + + for section, normalize in SECTIONS.items(): + try: + secdata = data[section] + except KeyError: + data[section] = None + else: + data[section] = normalize(secdata, + name=name, + tools=tools, + rootdir=rootdir, + requirefiles=requirefiles, + ) + unused.remove(section) + + if unused: + raise ValueError(f'unsupported sections ({", ".join(sorted(unused))})') + + return data + + +def _normalize_project(data, rootdir, name, requirefiles, **_ignored): + # See PEP 621. + unused = set(data) + + ########## + # First handle the required fields. + + name = data.get('name', name) + if name: + if not NAME_RE.match(name): + raise ValueError(f'invalid name {name!r}') + name = packaging.utils.canonicalize_name(name) + data['name'] = name + unused.remove('name') + else: + if 'name' not in data.get('dynamic', []): + raise ValueError('missing required "name" field') + + try: + version = data['version'] + except KeyError: + if 'version' not in data.get('dynamic', []): + raise ValueError('missing required "version" field') + else: + # This also validates it. + version = packaging.utils.canonicalize_version(version) + data['version'] = version + unused.remove('version') + + ########## + # Now we handle the optional fields. + + # We leave "description" as-is. + + key = 'readme' + if key in data: + readme = data[key] + if isinstance(readme, 'str'): + readme = data[key] = {'file': readme} + # XXX Check the suffix. + # XXX Handle 'content-type'. + # XXX Handle "charset" parameter. + _check_file_or_text(data[key], rootdir, requirefiles, + ['content-type', 'charset']) + unused.remove(key) + + key = 'requires-python' + if key in data: + # We keep it as a string. + data[key] = str(packaging.specifiers.SpecifierSet(data[key])) + unused.remove(key) + + key = 'license' + if key in data: + _check_file_or_text(data[key], rootdir, requirefiles) + unused.remove(key) + + key = 'keywords' + if key in data: + for keyword in data[key]: + # XXX Is this the right check? + check_name(name, loose=True) + unused.remove(key) + + key = 'authors' + if key in data: + for person in data[key]: + # We only make sure it is valid. + parse_person(person) + unused.remove(key) + + key = 'maintainers' + if key in data: + for person in data[key]: + # We only make sure it is valid. + parse_person(person) + unused.remove(key) + + key = 'classifiers' + if key in data: + for classifier in data[key]: + # We only make sure it is valid. + parse_classifier(classifier) + unused.remove(key) + + key = 'dependencies' + if key in data: + for dep in data[key]: + # We only make sure it is valid. + packaging.requirements.Requirement(dep) + unused.remove(key) + + key = 'optional-dependencies' + if key in data: + # XXX + unused.remove(key) + + key = 'urls' + if key in data: + for name, url in data[key].items(): + # XXX Is there a stricter check? + check_name(name, loose=True) + # We only make sure it is valid. + urllib.parse.urlparse(url) + unused.remove(key) + + key = 'scripts' + if key in data: + for name, value in data[key].items(): + # XXX Is there a stricter check? + check_name(name, loose=True) + # We only make sure it is valid. + parse_entry_point(value) + unused.remove(key) + + key = 'gui-scripts' + if key in data: + for _, value in data[key].items(): + # XXX Is there a stricter check? + check_name(name, loose=True) + # We only make sure it is valid. + parse_entry_point(value) + unused.remove(key) + + key = 'entry-points' + if key in data: + for groupname, group in data[key].items(): + # XXX Is there a stricter check? + check_name(groupname, loose=True) + for epname, value in group.items(): + # XXX Is there a stricter check? + check_name(epname, loose=True) + # We only make sure it is valid. + parse_entry_point(value) + unused.remove(key) + + key = 'dynamic' + if key in data: + for field in data[key]: + check_name(field, loose=True) + # XXX Fail it isn't one of the supported fields. + unused.remove(key) + + return data + + +def _normalize_build_system(data, rootdir, requirefiles, **_ignored): + # See PEP 518 and 517. + unused = set(data) + + key = 'requires' + if key in data: + reqs = data[key] + for i, raw in enumerate(reqs): + # We only make sure it is valid. + packaging.requirements.Requirement(raw) + unused.remove(key) + else: + raise ValueError('missing "requires" field') + + key = 'build-backend' + if key in data: + # We only make sure it is valid. + parse_entry_point(data[key]) + unused.remove(key) + + key = 'backend-path' + if key in data: + if 'build-backend' not in data: + raise ValueError('missing "build-backend" field') + kind = 'dir' if requirefiles else None + for dirname in data[key]: + _check_relfile(dirname, rootdir, kind=kind) + unused.remove(key) + + if unused: + raise ValueError(f'unsupported keys ({", ".join(sorted(unused))})') + + return data + + +def _normalize_tool(data, tools, rootdir, **_ignored): + # See PEP 518. + tools = tools or {} + for name, tooldata in list(data.items()): + if name in tools: + normalize = tools[name] + data[name] = normalize(name, tooldata, rootdir=rootdir) + if data[name] is None: + del data[name] + return data + + +SECTIONS = { + 'project': _normalize_project, + 'build-system': _normalize_build_system, + 'tool': _normalize_tool, +} diff --git a/pyperformance/_utils/platform.py b/pyperformance/_utils/platform.py new file mode 100644 index 00000000..e1b4390c --- /dev/null +++ b/pyperformance/_utils/platform.py @@ -0,0 +1,5 @@ +import sys + + +MS_WINDOWS = (sys.platform == 'win32') + diff --git a/pyperformance/benchmark/_benchmark.py b/pyperformance/benchmark/_benchmark.py index 053608d1..3357514b 100644 --- a/pyperformance/benchmark/_benchmark.py +++ b/pyperformance/benchmark/_benchmark.py @@ -1,8 +1,12 @@ from ._spec import BenchmarkSpec +from ._metadata import load_metadata +from ._run import run_perf_script class Benchmark: + _metadata = None + def __init__(self, spec, metafile): spec, _metafile = BenchmarkSpec.from_raw(spec) if not metafile: @@ -16,9 +20,6 @@ def __init__(self, spec, metafile): def __repr__(self): return f'{type(self).__name__}(spec={self.spec}, run={self.run})' - def __getattr__(self, name): - return getattr(self.spec, name) - def __hash__(self): return hash(self.spec) @@ -36,5 +37,70 @@ def __gt__(self, other): return NotImplemented return self.spec > other_spec - def run(self, *args): - return self._func(*args) + # __getattr__() gets weird when AttributeError comes out of + # properties so we spell out all the aliased attributes. + + @property + def name(self): + return self.spec.name + + @property + def version(self): + return self.spec.version + + @property + def origin(self): + return self.spec.origin + + def _init_metadata(self): + if self._metadata is not None: + raise NotImplementedError + + def _get_metadata_value(self, key, default): + try: + return self._metadata[key] + except TypeError: + if self._metadata is None: + defaults = { + 'name': self.name, + 'version': self.version, + } + self._metadata, _ = load_metadata(self.metafile, defaults) + except KeyError: + pass + return self._metadata.setdefault(key, default) + + @property + def tags(self): + return self._get_metadata_value('tags', ()) + + @property + def datadir(self): + return self._get_metadata_value('datadir', None) + + @property + def prescript(self): + return self._get_metadata_value('prescript', None) + + @property + def runscript(self): + return self._get_metadata_value('runscript', None) + + @property + def extra_opts(self): + return self._get_metadata_value('extra_opts', ()) + + # Other metadata keys: + # * base + # * python + # * dependencies + # * requirements + + def run(self, python, pyperf_opts=None, *, verbose=False): + return run_perf_script( + python, + self.runscript, + extra_opts=self.extra_opts, + pyperf_opts=pyperf_opts, + verbose=verbose, + ) diff --git a/pyperformance/benchmark/_metadata.py b/pyperformance/benchmark/_metadata.py new file mode 100644 index 00000000..97ee4057 --- /dev/null +++ b/pyperformance/benchmark/_metadata.py @@ -0,0 +1,235 @@ +import os.path + +from .. import _utils +from ._spec import BenchmarkSpec + + +METADATA = 'pyproject.toml' +DEPENDENCIES = 'requirements.in' +REQUIREMENTS = 'requirements.txt' +DATA = 'data' +PREP = 'prep_benchmark.py' +RUN = 'run_benchmark.py' + +PEP_621_FIELDS = { + 'name': None, + 'version': None, + 'requires-python': 'python', + 'dependencies': None, + #'optional-dependencies': '', + #'urls': '', +} +TOOL_FIELDS = { + #'metabase': None, + 'metafile': None, + 'name': None, + 'tags': None, + 'datadir': None, + 'prescript': None, + 'runscript': None, + 'extra_opts': None, +} + + +#class BenchmarkMetadata: +# spec +# base +# metafile +# tags +# python +# dependencies # (from requirements.in) +# requirements # (from lock file or requirements.txt) +# datadir +# prescript +# runscript +# extra_opts + + +def load_metadata(metafile, defaults=None): + if isinstance(metafile, str): + name, rootdir = _name_from_filename(metafile) + data, filename = _utils.load_pyproject_toml(metafile, + name=name or '_unknown_', + requirefiles=False, + ) + else: + text = metafile.read() + filename = metafile.name + name, rootdir = _name_from_filename(filename) + data = _utils.parse_pyproject_toml(text, rootdir, name, + requirefiles=False) + project = data.get('project') + tool = data.get('tool', {}).get('pyperformance', {}) + + defaults = _ensure_defaults(defaults, rootdir) + base, basefile = _resolve_base( + tool, + project, + filename, + project.get('version') or defaults.get('version'), + ) + top = _resolve(project or {}, tool, filename) + merged = _merge_metadata(top, base, defaults) + + if not merged.get('name'): + raise ValueError('missing benchmark name') + if not merged.get('version'): + raise ValueError('missing benchmark version') + + metafile = merged.pop('metafile') + merged['spec'] = BenchmarkSpec( + merged.pop('name'), + merged.pop('version'), + # XXX Should we leave this (origin) blank? + metafile, + ) + if basefile: + merged['base'] = basefile + + return merged, filename + + +def _name_from_filename(metafile): + rootdir, basename = os.path.split(metafile) + if basename == 'pyproject.toml': + dirname = os.path.dirname(rootdir) + name = dirname[3:] if dirname.startswith('bm_') else None + elif basename.startswith('bm_') and basename.endswith('.toml'): + name = basename[3:-5] + else: + name = None + return name, rootdir + + +def _ensure_defaults(defaults, rootdir): + if not defaults: + defaults = {} + + if not defaults.get('datadir'): + datadir = os.path.join(rootdir, DATA) + if os.path.isdir(datadir): + defaults['datadir'] = datadir + + if not defaults.get('prescript'): + prescript = os.path.join(rootdir, PREP) + if os.path.isfile(prescript): + defaults['prescript'] = prescript + + if not defaults.get('runscript'): + runscript = os.path.join(rootdir, RUN) + if os.path.isfile(runscript): + defaults['runscript'] = runscript + + return defaults + + +def _resolve_base(tool, project, filename, version): + rootdir, basename = os.path.split(filename) + + metabase = tool.get('metabase') + if not metabase: + if basename == 'pyproject.toml': + return None, None + elif not (basename.startswith('bm_') and basename.endswith('.toml')): + return None, None + elif not os.path.basename(rootdir).startswith('bm_'): + return None, None + else: + metabase = os.path.join(rootdir, 'pyproject.toml') + if not os.path.isfile(metabase): + return None, None + + if project is not None: + unexpected = set(project) - {'name', 'dynamic'} + if unexpected: + raise ValueError(f'[project] should be minimal if "metabase" is provided, got extra {sorted(unexpected)}') + + if metabase == '..': + metabase = os.path.join( + os.path.dirname(rootdir), + 'base.toml', + ) + if metabase == filename: + raise Exception('circular') + + if not os.path.isabs(metabase): + metabase = os.path.join(rootdir, metabase) + if metabase == filename: + raise Exception('circular') + return load_metadata(metabase, + {'version': version, 'name': '_base_'}) + + +def _resolve(project, tool, filename): + resolved = { + 'metafile': filename, + } + + rootdir = os.path.dirname(filename) + for field, target in TOOL_FIELDS.items(): + if target is None: + target = field + if not resolved.get(target): + value = tool.get(field) + if value is not None: + resolved[target] = _resolve_value(field, value, rootdir) + + for field, target in PEP_621_FIELDS.items(): + if target is None: + target = field + if field == 'url': + repo = project.get('urls', {}).get('repository') + raise NotImplementedError + elif not resolved.get(target): + value = project.get(field) + if value is not None: + resolved[target] = value + + return resolved + + +def _resolve_value(field, value, rootdir): + if field == 'name': + _utils.check_name(value) + elif field == 'metafile': + assert False, 'unreachable' + elif field == 'tags': + if isinstance(value, str): + value = value.replace(',', ' ').split() + for tag in value: + _utils.check_name(tag) + elif field == 'datadir': + if not os.path.isabs(value): + value = os.path.join(rootdir, value) + _utils.check_dir(value) + elif field == 'prescript': + if not os.path.isabs(value): + value = os.path.join(rootdir, value) + _utils.check_file(value) + elif field == 'runscript': + if not os.path.isabs(value): + value = os.path.join(rootdir, value) + _utils.check_file(value) + elif field == 'extra_opts': + if isinstance(value, str): + raise TypeError(f'extra_opts should be a list of strings, got {value!r}') + for opt in value: + if not opt or not isinstance(opt, str): + raise TypeError(f'extra_opts should be a list of strings, got {value!r}') + else: + raise NotImplementedError(field) + return value + + +def _merge_metadata(*tiers): + merged = {} + for data in tiers: + if not data: + continue + for field, value in data.items(): + if merged.get(field): + # XXX Merge containers? + continue + if value: + merged[field] = value + return merged diff --git a/pyperformance/benchmark/_run.py b/pyperformance/benchmark/_run.py new file mode 100644 index 00000000..74c7005a --- /dev/null +++ b/pyperformance/benchmark/_run.py @@ -0,0 +1,26 @@ +import pyperf + +from .. import _utils + + +def run_perf_script(python, runscript, *, + extra_opts=None, + pyperf_opts=None, + verbose=False, + ): + if not runscript: + raise ValueError('missing runscript') + if not isinstance(runscript, str): + raise TypeError(f'runscript must be a string, got {runscript!r}') + if isinstance(python, str): + python = [python] + cmd = [ + *python, '-u', runscript, + *(extra_opts or ()), + *(pyperf_opts or ()), + ] + + with _utils.temporary_file() as tmp: + cmd.extend(('--output', tmp)) + _utils.run_command(cmd, hide_stderr=not verbose) + return pyperf.BenchmarkSuite.load(tmp) diff --git a/pyperformance/benchmarks/__init__.py b/pyperformance/benchmarks/__init__.py index 60dbbde2..93345ff2 100644 --- a/pyperformance/benchmarks/__init__.py +++ b/pyperformance/benchmarks/__init__.py @@ -1,7 +1,7 @@ import os.path from .. import __version__ -from .. import _benchmarks, benchmark as _benchmark +from .. import benchmark as _benchmark from . import _manifest # aliases @@ -37,20 +37,7 @@ def resolve(bench): metafile = os.path.join(DEFAULTS_DIR, f'bm_{bench.name}', 'pyproject.toml') - #bench = bench._replace(metafile=metafile) bench.metafile = metafile return bench with open(filename) as infile: - return _manifest.parse_manifest(infile, resolve=resolve) - - -def iter_benchmarks(manifest): - # XXX Use the benchmark's "run" script. - funcs, _ = _benchmarks.get_benchmarks() - for bench in manifest.benchmarks: - bench._func = funcs[bench.name] - yield bench - - -def get_benchmarks(manifest): - return list(iter_benchmarks(manifest)) + return _manifest.parse_manifest(infile, resolve=resolve, filename=filename) diff --git a/pyperformance/benchmarks/_manifest.py b/pyperformance/benchmarks/_manifest.py index 33306d57..3bc3af78 100644 --- a/pyperformance/benchmarks/_manifest.py +++ b/pyperformance/benchmarks/_manifest.py @@ -1,4 +1,5 @@ from collections import namedtuple +import os.path from .. import benchmark as _benchmark, _utils @@ -10,17 +11,20 @@ BenchmarksManifest = namedtuple('BenchmarksManifest', 'benchmarks groups') -def parse_manifest(text, *, resolve=None): +def parse_manifest(text, *, resolve=None, filename=None): if isinstance(text, str): lines = text.splitlines() else: lines = iter(text) + if not filename: + # Try getting the filename from a file. + filename = getattr(text, 'name', None) benchmarks = None groups = {} for section, seclines in _iter_sections(lines): if section == 'benchmarks': - benchmarks = _parse_benchmarks(seclines, resolve) + benchmarks = _parse_benchmarks(seclines, resolve, filename) elif benchmarks is None: raise ValueError('invalid manifest file, expected "benchmarks" section') elif section.startswith('group '): @@ -77,13 +81,15 @@ def _iter_sections(lines): raise ValueError('invalid manifest file, no sections found') -def _parse_benchmarks(lines, resolve): +def _parse_benchmarks(lines, resolve, filename): if not lines: lines = [''] lines = iter(lines) if next(lines) != BENCH_HEADER: raise ValueError('invalid manifest file, expected benchmarks header') + localdir = os.path.dirname(filename) + benchmarks = [] for line in lines: try: @@ -96,6 +102,7 @@ def _parse_benchmarks(lines, resolve): origin or None, ) if metafile: + metafile = _resolve_metafile(metafile, name, localdir) bench = _benchmark.Benchmark(spec, metafile) else: bench = spec @@ -105,6 +112,23 @@ def _parse_benchmarks(lines, resolve): return benchmarks +def _resolve_metafile(metafile, name, localdir): + if not metafile.startswith('<') and not metafile.endswith('>'): + return metafile + + directive, _, extra = metafile[1:-1].partition(':') + if directive == 'local': + if extra: + rootdir = f'bm_{extra}' + basename = f'bm_{name}.toml' + else: + rootdir = f'bm_{name}' + basename = 'pyproject.toml' + return os.path.join(localdir, rootdir, basename) + else: + raise ValueError(f'unsupported metafile directive {metafile!r}') + + def _parse_group(name, lines, benchmarks): byname = {b.name: b for b in benchmarks} if name in byname: diff --git a/pyperformance/benchmarks/_selections.py b/pyperformance/benchmarks/_selections.py index bb6c75e4..5e15685d 100644 --- a/pyperformance/benchmarks/_selections.py +++ b/pyperformance/benchmarks/_selections.py @@ -1,4 +1,3 @@ -from .. import _benchmarks from .._utils import check_name, parse_name_pattern, parse_tag_pattern from ..benchmark import parse_benchmark, Benchmark from ._manifest import expand_benchmark_groups @@ -57,12 +56,8 @@ def iter_selections(manifest, selections, *, unique=True): if not included: included = list(_match_selection(manifest, 'tag', 'default', byname)) - funcs, _ = _benchmarks.get_benchmarks() for bench in included: if bench not in excluded: - if isinstance(bench, Benchmark): - # XXX Use the benchmark's "run" script. - bench._func = funcs[bench.name] yield bench diff --git a/pyperformance/cli_run.py b/pyperformance/cli_run.py index c9bcb58d..f14f7c8e 100644 --- a/pyperformance/cli_run.py +++ b/pyperformance/cli_run.py @@ -7,8 +7,6 @@ import pyperformance from pyperformance.benchmarks import ( load_manifest, - iter_benchmarks, - get_benchmarks, iter_selections, ) from pyperformance.compare import display_benchmark_suite diff --git a/pyperformance/requirements.in b/pyperformance/requirements.in index 3fbadf60..0edc16b4 100644 --- a/pyperformance/requirements.in +++ b/pyperformance/requirements.in @@ -3,6 +3,10 @@ pyperf +# for benchmark metadata: +packaging +toml + # Benchmarks dependencies # ----------------------- @@ -11,6 +15,8 @@ pyperf # should be increased to respect semantic versionning. Comparison between # two pyperformance results of two different major versions is not reliable. +# XXX Move these to the individual benchmark dirs. + Chameleon # bm_chameleon Django # bm_django_template # FIXME: reenable genshi @@ -24,6 +30,32 @@ pyaes # bm_crypto_pyaes sympy # bm_sympy tornado # bm_tornado_http +# for the pyston benchmarks: +requests +aiohttp +uvloop +greenlet +gevent +django-cms +djangocms-bootstrap4 +djangocms-installer +djangocms-file +djangocms-googlemap +djangocms-snippet +djangocms-style +djangocms-video +Flask +Flask-Login +Flask-Blogging +gunicorn +uvloop +mypy +pycparser +pylint +torch +thrift +typed-ast + # Optional dependencies # --------------------- diff --git a/pyperformance/requirements.txt b/pyperformance/requirements.txt index cd897337..0c6439a5 100644 --- a/pyperformance/requirements.txt +++ b/pyperformance/requirements.txt @@ -2,47 +2,290 @@ # This file is autogenerated by pip-compile # To update, run: # -# pip-compile requirements.in +# pip-compile pyperformance/requirements.in # +aiohttp==3.7.4.post0 + # via -r pyperformance/requirements.in asgiref==3.3.4 # via django +astroid==2.8.0 + # via pylint +async-timeout==3.0.1 + # via aiohttp +attrs==21.2.0 + # via aiohttp +backports.zoneinfo==0.2.1 + # via tzlocal +blinker==1.4 + # via + # flask-blogging + # flask-principal certifi==2020.12.5 - # via dulwich + # via + # dulwich + # requests chameleon==3.9.0 - # via -r requirements.in + # via -r pyperformance/requirements.in +chardet==4.0.0 + # via aiohttp +charset-normalizer==2.0.6 + # via requests +click==8.0.1 + # via flask +dj-database-url==0.5.0 + # via djangocms-installer +django-classy-tags==2.0.0 + # via + # django-cms + # django-sekizai +django-cms==3.9.0 + # via + # -r pyperformance/requirements.in + # djangocms-attributes-field + # djangocms-bootstrap4 + # djangocms-file + # djangocms-googlemap + # djangocms-icon + # djangocms-link + # djangocms-picture + # djangocms-snippet + # djangocms-style + # djangocms-text-ckeditor + # djangocms-video +django-filer==2.0.2 + # via + # djangocms-bootstrap4 + # djangocms-file + # djangocms-googlemap + # djangocms-link + # djangocms-picture + # djangocms-video +django-formtools==2.3 + # via django-cms +django-js-asset==1.2.2 + # via django-mptt +django-mptt==0.13.4 + # via django-filer +django-polymorphic==3.0.0 + # via django-filer +django-sekizai==2.0.0 + # via django-cms +django-treebeard==4.5.1 + # via django-cms django==3.2.5 - # via -r requirements.in + # via + # -r pyperformance/requirements.in + # django-classy-tags + # django-cms + # django-filer + # django-formtools + # django-polymorphic + # django-sekizai + # django-treebeard + # easy-thumbnails +djangocms-admin-style==2.0.2 + # via django-cms +djangocms-attributes-field==2.0.0 + # via + # djangocms-bootstrap4 + # djangocms-file + # djangocms-icon + # djangocms-link + # djangocms-picture + # djangocms-style + # djangocms-video +djangocms-bootstrap4==2.0.0 + # via -r pyperformance/requirements.in +djangocms-file==3.0.0 + # via -r pyperformance/requirements.in +djangocms-googlemap==2.0.0 + # via -r pyperformance/requirements.in +djangocms-icon==2.0.0 + # via djangocms-bootstrap4 +djangocms-installer==2.0.0 + # via -r pyperformance/requirements.in +djangocms-link==3.0.0 + # via djangocms-bootstrap4 +djangocms-picture==3.0.0 + # via djangocms-bootstrap4 +djangocms-snippet==3.0.0 + # via -r pyperformance/requirements.in +djangocms-style==3.0.0 + # via -r pyperformance/requirements.in +djangocms-text-ckeditor==4.0.0 + # via djangocms-bootstrap4 +djangocms-video==3.0.0 + # via -r pyperformance/requirements.in dulwich==0.20.21 - # via -r requirements.in + # via -r pyperformance/requirements.in +easy-thumbnails==2.7.1 + # via + # django-filer + # djangocms-picture +flask-blogging==1.2.2 + # via -r pyperformance/requirements.in +flask-caching==1.10.1 + # via flask-blogging +flask-fileupload==0.5.0 + # via flask-blogging +flask-login==0.5.0 + # via + # -r pyperformance/requirements.in + # flask-blogging + # flask-fileupload +flask-principal==0.4.0 + # via flask-blogging +flask-wtf==0.15.1 + # via + # flask-blogging + # flask-fileupload +flask==2.0.2 + # via + # -r pyperformance/requirements.in + # flask-blogging + # flask-caching + # flask-fileupload + # flask-login + # flask-principal + # flask-wtf +gevent==21.8.0 + # via -r pyperformance/requirements.in greenlet==1.1.0 - # via sqlalchemy + # via + # -r pyperformance/requirements.in + # gevent + # sqlalchemy +gunicorn==20.1.0 + # via -r pyperformance/requirements.in html5lib==1.1 - # via -r requirements.in + # via + # -r pyperformance/requirements.in + # djangocms-text-ckeditor +idna==3.2 + # via + # requests + # yarl +isort==5.9.3 + # via pylint +itsdangerous==2.0.1 + # via + # flask + # flask-wtf +jinja2==3.0.1 + # via flask +lazy-object-proxy==1.6.0 + # via astroid mako==1.1.4 - # via -r requirements.in -markupsafe==1.1.1 - # via mako + # via -r pyperformance/requirements.in +markdown==3.3.4 + # via flask-blogging +markupsafe==2.0.1 + # via + # jinja2 + # mako + # wtforms +mccabe==0.6.1 + # via pylint mpmath==1.2.1 # via sympy +multidict==5.2.0 + # via + # aiohttp + # yarl +mypy-extensions==0.4.3 + # via mypy +mypy==0.910 + # via -r pyperformance/requirements.in +packaging==21.0 + # via -r pyperformance/requirements.in +pillow==8.3.2 + # via + # djangocms-text-ckeditor + # easy-thumbnails +platformdirs==2.4.0 + # via pylint psutil==5.8.0 - # via -r requirements.in + # via -r pyperformance/requirements.in pyaes==1.6.1 - # via -r requirements.in + # via -r pyperformance/requirements.in +pycparser==2.20 + # via -r pyperformance/requirements.in +pylint==2.11.1 + # via -r pyperformance/requirements.in +pyparsing==2.4.7 + # via packaging pyperf==2.2.0 - # via -r requirements.in + # via -r pyperformance/requirements.in +python-slugify==5.0.2 + # via flask-blogging pytz==2021.1 # via django +requests==2.26.0 + # via -r pyperformance/requirements.in +shortuuid==1.0.1 + # via flask-blogging six==1.16.0 - # via html5lib + # via + # djangocms-installer + # html5lib + # thrift sqlalchemy==1.4.15 - # via -r requirements.in + # via + # -r pyperformance/requirements.in + # flask-blogging sqlparse==0.4.2 # via django sympy==1.8 - # via -r requirements.in + # via -r pyperformance/requirements.in +text-unidecode==1.3 + # via python-slugify +thrift==0.15.0 + # via -r pyperformance/requirements.in +toml==0.10.2 + # via + # -r pyperformance/requirements.in + # mypy + # pylint +torch==1.9.1 + # via -r pyperformance/requirements.in tornado==6.1 - # via -r requirements.in + # via -r pyperformance/requirements.in +typed-ast==1.4.3 + # via -r pyperformance/requirements.in +typing-extensions==3.10.0.2 + # via + # aiohttp + # astroid + # mypy + # pylint + # torch +tzlocal==3.0 + # via djangocms-installer +unidecode==1.1.2 + # via django-filer urllib3==1.26.5 - # via dulwich + # via + # dulwich + # requests +uvloop==0.16.0 + # via -r pyperformance/requirements.in webencodings==0.5.1 # via html5lib +werkzeug==2.0.1 + # via + # flask + # flask-blogging + # flask-fileupload +wrapt==1.12.1 + # via astroid +wtforms==2.3.3 + # via flask-wtf +yarl==1.6.3 + # via aiohttp +zope.event==4.5.0 + # via gevent +zope.interface==5.4.0 + # via gevent + +# The following packages are considered to be unsafe in a requirements file: +# setuptools diff --git a/pyperformance/run.py b/pyperformance/run.py index 5abfb3da..184304e5 100644 --- a/pyperformance/run.py +++ b/pyperformance/run.py @@ -1,6 +1,3 @@ -import logging -import os.path -import subprocess import sys import traceback try: @@ -9,10 +6,7 @@ multiprocessing = None import pyperf - import pyperformance -from pyperformance._utils import temporary_file -from pyperformance.venv import PERFORMANCE_ROOT class BenchmarkException(Exception): @@ -21,80 +15,27 @@ class BenchmarkException(Exception): # Utility functions +def get_pyperf_opts(options): + opts = [] -def run_command(command, hide_stderr=True): - if hide_stderr: - kw = {'stderr': subprocess.PIPE} - else: - kw = {} - - logging.info("Running `%s`", - " ".join(list(map(str, command)))) - - # Explicitly flush standard streams, required if streams are buffered - # (not TTY) to write lines in the expected order - sys.stdout.flush() - sys.stderr.flush() - - proc = subprocess.Popen(command, - universal_newlines=True, - **kw) - try: - stderr = proc.communicate()[1] - except: # noqa - if proc.stderr: - proc.stderr.close() - try: - proc.kill() - except OSError: - # process already exited - pass - proc.wait() - raise - - if proc.returncode != 0: - if hide_stderr: - sys.stderr.flush() - sys.stderr.write(stderr) - sys.stderr.flush() - raise RuntimeError("Benchmark died") - - -def Relative(*path): - return os.path.join(PERFORMANCE_ROOT, '_benchmarks', *path) - - -def run_perf_script(python, options, name, extra_args=[]): - bm_path = Relative("bm_%s" % name, "run.py") - cmd = list(python) - cmd.append('-u') - cmd.append(bm_path) - cmd.extend(extra_args) - copy_perf_options(cmd, options) - - with temporary_file() as tmp: - cmd.extend(('--output', tmp)) - run_command(cmd, hide_stderr=not options.verbose) - return pyperf.BenchmarkSuite.load(tmp) - - -def copy_perf_options(cmd, options): if options.debug_single_value: - cmd.append('--debug-single-value') + opts.append('--debug-single-value') elif options.rigorous: - cmd.append('--rigorous') + opts.append('--rigorous') elif options.fast: - cmd.append('--fast') + opts.append('--fast') if options.verbose: - cmd.append('--verbose') + opts.append('--verbose') if options.affinity: - cmd.append('--affinity=%s' % options.affinity) + opts.append('--affinity=%s' % options.affinity) if options.track_memory: - cmd.append('--track-memory') + opts.append('--track-memory') if options.inherit_environ: - cmd.append('--inherit-environ=%s' % ','.join(options.inherit_environ)) + opts.append('--inherit-environ=%s' % ','.join(options.inherit_environ)) + + return opts def run_benchmarks(should_run, cmd_prefix, options): @@ -103,6 +44,8 @@ def run_benchmarks(should_run, cmd_prefix, options): run_count = str(len(to_run)) errors = [] + pyperf_opts = get_pyperf_opts(options) + for index, bench in enumerate(to_run): name = bench.name print("[%s/%s] %s..." % @@ -127,7 +70,7 @@ def add_bench(dest_suite, obj): return dest_suite try: - result = bench.run(cmd_prefix, options) + result = bench.run(cmd_prefix, pyperf_opts, verbose=options.verbose) except Exception as exc: print("ERROR: Benchmark %s failed: %s" % (name, exc)) traceback.print_exc() diff --git a/setup.py b/setup.py index 4bf59bd2..edd70640 100644 --- a/setup.py +++ b/setup.py @@ -106,7 +106,7 @@ def main(): 'entry_points': { 'console_scripts': ['pyperformance=pyperformance.cli:main'] }, - 'install_requires': ["pyperf"], + 'install_requires': ["pyperf", "toml", "packaging"], } setup(**options) From 91c27f913284945f94902e3e191b4ab09196e47d Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Thu, 24 Jun 2021 10:06:50 -0600 Subject: [PATCH 015/126] Fix the requirements. --- pyperformance/requirements.in | 26 ---- pyperformance/requirements.txt | 251 +-------------------------------- 2 files changed, 7 insertions(+), 270 deletions(-) diff --git a/pyperformance/requirements.in b/pyperformance/requirements.in index 0edc16b4..f8139208 100644 --- a/pyperformance/requirements.in +++ b/pyperformance/requirements.in @@ -30,32 +30,6 @@ pyaes # bm_crypto_pyaes sympy # bm_sympy tornado # bm_tornado_http -# for the pyston benchmarks: -requests -aiohttp -uvloop -greenlet -gevent -django-cms -djangocms-bootstrap4 -djangocms-installer -djangocms-file -djangocms-googlemap -djangocms-snippet -djangocms-style -djangocms-video -Flask -Flask-Login -Flask-Blogging -gunicorn -uvloop -mypy -pycparser -pylint -torch -thrift -typed-ast - # Optional dependencies # --------------------- diff --git a/pyperformance/requirements.txt b/pyperformance/requirements.txt index 0c6439a5..1924fdcd 100644 --- a/pyperformance/requirements.txt +++ b/pyperformance/requirements.txt @@ -4,288 +4,51 @@ # # pip-compile pyperformance/requirements.in # -aiohttp==3.7.4.post0 - # via -r pyperformance/requirements.in asgiref==3.3.4 # via django -astroid==2.8.0 - # via pylint -async-timeout==3.0.1 - # via aiohttp -attrs==21.2.0 - # via aiohttp -backports.zoneinfo==0.2.1 - # via tzlocal -blinker==1.4 - # via - # flask-blogging - # flask-principal certifi==2020.12.5 - # via - # dulwich - # requests + # via dulwich chameleon==3.9.0 # via -r pyperformance/requirements.in -chardet==4.0.0 - # via aiohttp -charset-normalizer==2.0.6 - # via requests -click==8.0.1 - # via flask -dj-database-url==0.5.0 - # via djangocms-installer -django-classy-tags==2.0.0 - # via - # django-cms - # django-sekizai -django-cms==3.9.0 - # via - # -r pyperformance/requirements.in - # djangocms-attributes-field - # djangocms-bootstrap4 - # djangocms-file - # djangocms-googlemap - # djangocms-icon - # djangocms-link - # djangocms-picture - # djangocms-snippet - # djangocms-style - # djangocms-text-ckeditor - # djangocms-video -django-filer==2.0.2 - # via - # djangocms-bootstrap4 - # djangocms-file - # djangocms-googlemap - # djangocms-link - # djangocms-picture - # djangocms-video -django-formtools==2.3 - # via django-cms -django-js-asset==1.2.2 - # via django-mptt -django-mptt==0.13.4 - # via django-filer -django-polymorphic==3.0.0 - # via django-filer -django-sekizai==2.0.0 - # via django-cms -django-treebeard==4.5.1 - # via django-cms django==3.2.5 - # via - # -r pyperformance/requirements.in - # django-classy-tags - # django-cms - # django-filer - # django-formtools - # django-polymorphic - # django-sekizai - # django-treebeard - # easy-thumbnails -djangocms-admin-style==2.0.2 - # via django-cms -djangocms-attributes-field==2.0.0 - # via - # djangocms-bootstrap4 - # djangocms-file - # djangocms-icon - # djangocms-link - # djangocms-picture - # djangocms-style - # djangocms-video -djangocms-bootstrap4==2.0.0 - # via -r pyperformance/requirements.in -djangocms-file==3.0.0 - # via -r pyperformance/requirements.in -djangocms-googlemap==2.0.0 - # via -r pyperformance/requirements.in -djangocms-icon==2.0.0 - # via djangocms-bootstrap4 -djangocms-installer==2.0.0 - # via -r pyperformance/requirements.in -djangocms-link==3.0.0 - # via djangocms-bootstrap4 -djangocms-picture==3.0.0 - # via djangocms-bootstrap4 -djangocms-snippet==3.0.0 - # via -r pyperformance/requirements.in -djangocms-style==3.0.0 - # via -r pyperformance/requirements.in -djangocms-text-ckeditor==4.0.0 - # via djangocms-bootstrap4 -djangocms-video==3.0.0 # via -r pyperformance/requirements.in dulwich==0.20.21 # via -r pyperformance/requirements.in -easy-thumbnails==2.7.1 - # via - # django-filer - # djangocms-picture -flask-blogging==1.2.2 - # via -r pyperformance/requirements.in -flask-caching==1.10.1 - # via flask-blogging -flask-fileupload==0.5.0 - # via flask-blogging -flask-login==0.5.0 - # via - # -r pyperformance/requirements.in - # flask-blogging - # flask-fileupload -flask-principal==0.4.0 - # via flask-blogging -flask-wtf==0.15.1 - # via - # flask-blogging - # flask-fileupload -flask==2.0.2 - # via - # -r pyperformance/requirements.in - # flask-blogging - # flask-caching - # flask-fileupload - # flask-login - # flask-principal - # flask-wtf -gevent==21.8.0 - # via -r pyperformance/requirements.in greenlet==1.1.0 - # via - # -r pyperformance/requirements.in - # gevent - # sqlalchemy -gunicorn==20.1.0 - # via -r pyperformance/requirements.in + # via sqlalchemy html5lib==1.1 - # via - # -r pyperformance/requirements.in - # djangocms-text-ckeditor -idna==3.2 - # via - # requests - # yarl -isort==5.9.3 - # via pylint -itsdangerous==2.0.1 - # via - # flask - # flask-wtf -jinja2==3.0.1 - # via flask -lazy-object-proxy==1.6.0 - # via astroid + # via -r pyperformance/requirements.in mako==1.1.4 # via -r pyperformance/requirements.in -markdown==3.3.4 - # via flask-blogging markupsafe==2.0.1 - # via - # jinja2 - # mako - # wtforms -mccabe==0.6.1 - # via pylint + # via mako mpmath==1.2.1 # via sympy -multidict==5.2.0 - # via - # aiohttp - # yarl -mypy-extensions==0.4.3 - # via mypy -mypy==0.910 - # via -r pyperformance/requirements.in packaging==21.0 # via -r pyperformance/requirements.in -pillow==8.3.2 - # via - # djangocms-text-ckeditor - # easy-thumbnails -platformdirs==2.4.0 - # via pylint psutil==5.8.0 # via -r pyperformance/requirements.in pyaes==1.6.1 # via -r pyperformance/requirements.in -pycparser==2.20 - # via -r pyperformance/requirements.in -pylint==2.11.1 - # via -r pyperformance/requirements.in pyparsing==2.4.7 # via packaging pyperf==2.2.0 # via -r pyperformance/requirements.in -python-slugify==5.0.2 - # via flask-blogging pytz==2021.1 # via django -requests==2.26.0 - # via -r pyperformance/requirements.in -shortuuid==1.0.1 - # via flask-blogging six==1.16.0 - # via - # djangocms-installer - # html5lib - # thrift + # via html5lib sqlalchemy==1.4.15 - # via - # -r pyperformance/requirements.in - # flask-blogging + # via -r pyperformance/requirements.in sqlparse==0.4.2 # via django sympy==1.8 # via -r pyperformance/requirements.in -text-unidecode==1.3 - # via python-slugify -thrift==0.15.0 - # via -r pyperformance/requirements.in toml==0.10.2 - # via - # -r pyperformance/requirements.in - # mypy - # pylint -torch==1.9.1 # via -r pyperformance/requirements.in tornado==6.1 # via -r pyperformance/requirements.in -typed-ast==1.4.3 - # via -r pyperformance/requirements.in -typing-extensions==3.10.0.2 - # via - # aiohttp - # astroid - # mypy - # pylint - # torch -tzlocal==3.0 - # via djangocms-installer -unidecode==1.1.2 - # via django-filer urllib3==1.26.5 - # via - # dulwich - # requests -uvloop==0.16.0 - # via -r pyperformance/requirements.in + # via dulwich webencodings==0.5.1 # via html5lib -werkzeug==2.0.1 - # via - # flask - # flask-blogging - # flask-fileupload -wrapt==1.12.1 - # via astroid -wtforms==2.3.3 - # via flask-wtf -yarl==1.6.3 - # via aiohttp -zope.event==4.5.0 - # via gevent -zope.interface==5.4.0 - # via gevent - -# The following packages are considered to be unsafe in a requirements file: -# setuptools From a4f97ad8a6b07da15e2af47c159779b1ad2cc9b4 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Fri, 25 Jun 2021 13:09:25 -0600 Subject: [PATCH 016/126] Pass "name" through to parse_pyproject_toml(). --- pyperformance/_utils/_pyproject_toml.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyperformance/_utils/_pyproject_toml.py b/pyperformance/_utils/_pyproject_toml.py index 7c8edbc0..57645017 100644 --- a/pyperformance/_utils/_pyproject_toml.py +++ b/pyperformance/_utils/_pyproject_toml.py @@ -77,7 +77,7 @@ def load_pyproject_toml(filename, *, name=None, tools=None, requirefiles=True): with open(filename) as infile: text = infile.read() - data = parse_pyproject_toml(text, rootdir, + data = parse_pyproject_toml(text, rootdir, name, tools=tools, requirefiles=requirefiles, ) From 6d385bf2c137883303427d21043156993ec7962a Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Fri, 25 Jun 2021 13:15:18 -0600 Subject: [PATCH 017/126] Leave a note about classifiers. --- pyperformance/_utils/_pyproject_toml.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyperformance/_utils/_pyproject_toml.py b/pyperformance/_utils/_pyproject_toml.py index 57645017..de5a3488 100644 --- a/pyperformance/_utils/_pyproject_toml.py +++ b/pyperformance/_utils/_pyproject_toml.py @@ -20,7 +20,7 @@ def parse_person(text): def parse_classifier(text): - # XXX + # XXX Use https://pypi.org/project/packaging-classifiers. return text From 5308b29ab833430584d92c64198e6baca6f6b6af Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Fri, 25 Jun 2021 20:07:44 -0600 Subject: [PATCH 018/126] Drop an unused file. --- pyperformance/_utils/platform.py | 5 ----- 1 file changed, 5 deletions(-) delete mode 100644 pyperformance/_utils/platform.py diff --git a/pyperformance/_utils/platform.py b/pyperformance/_utils/platform.py deleted file mode 100644 index e1b4390c..00000000 --- a/pyperformance/_utils/platform.py +++ /dev/null @@ -1,5 +0,0 @@ -import sys - - -MS_WINDOWS = (sys.platform == 'win32') - From 4c8a18f1c7bdf74d8595ad96c24a894d8bd20198 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Mon, 28 Jun 2021 11:45:14 -0600 Subject: [PATCH 019/126] Load manifest and select benchmarks before running the command. --- pyperformance/cli.py | 46 +++++++++++++++++++++++++++++-------- pyperformance/cli_run.py | 49 +++++----------------------------------- 2 files changed, 42 insertions(+), 53 deletions(-) diff --git a/pyperformance/cli.py b/pyperformance/cli.py index 37b32cde..dbbc6f71 100644 --- a/pyperformance/cli.py +++ b/pyperformance/cli.py @@ -159,13 +159,6 @@ def parse_args(): options = parser.parse_args() - # Process benchmark selections. - if hasattr(options, 'benchmarks'): - entries = options.benchmarks.lower() - parse_entry = (lambda o, s: _benchmarks.parse_selection(s, op=o)) - parsed = _utils.parse_selections(entries, parse_entry) - options.bm_selections = list(parsed) - if options.action == 'run' and options.debug_single_value: options.fast = True @@ -188,9 +181,42 @@ def parse_args(): return (parser, options) +def _select_benchmarks(raw, manifest): + # Get the raw list of benchmarks. + entries = raw.lower() + parse_entry = (lambda o, s: _benchmarks.parse_selection(s, op=o)) + parsed = _utils.parse_selections(entries, parse_entry) + parsed_infos = list(parsed) + + # Disallow negative groups. + for op, _, kind, parsed in parsed_infos: + if callable(parsed): + continue + name = parsed.name if kind == 'benchmark' else parsed + if name in manifest.groups and op == '-': + raise ValueError(f'negative groups not supported: -{parsed.name}') + + # Get the selections. + selected = [] + for bench in _benchmarks.iter_selections(manifest, parsed_infos): + if isinstance(bench, str): + logging.warning(f"no benchmark named {bench!r}") + continue + selected.append(bench) + return selected + + def _main(): parser, options = parse_args() + if hasattr(options, 'manifest'): + # Load and update the manifest. + manifest = _benchmarks.load_manifest(options.manifest) + if 'all' not in manifest.groups: + manifest.groups['all'] = list(manifest.benchmarks) + if hasattr(options, 'benchmarks'): + benchmarks = _select_benchmarks(options.benchmarks, manifest) + if options.action == 'venv': cmd_venv(options) sys.exit() @@ -217,14 +243,14 @@ def _main(): from pyperformance.cli_run import cmd_run, cmd_list, cmd_list_groups if options.action == 'run': - cmd_run(parser, options) + cmd_run(options, benchmarks) elif options.action == 'compare': from pyperformance.compare import cmd_compare cmd_compare(options) elif options.action == 'list': - cmd_list(options) + cmd_list(options, benchmarks) elif options.action == 'list_groups': - cmd_list_groups(options) + cmd_list_groups(manifest) else: parser.print_help() sys.exit(1) diff --git a/pyperformance/cli_run.py b/pyperformance/cli_run.py index f14f7c8e..631d3a8f 100644 --- a/pyperformance/cli_run.py +++ b/pyperformance/cli_run.py @@ -5,33 +5,11 @@ import pyperf import pyperformance -from pyperformance.benchmarks import ( - load_manifest, - iter_selections, -) from pyperformance.compare import display_benchmark_suite from pyperformance.run import run_benchmarks -def _select_benchmarks(selections, manifest): - groups = manifest.groups - for op, _, kind, parsed in selections: - if callable(parsed): - continue - name = parsed.name if kind == 'benchmark' else parsed - if name in manifest.groups and op == '-': - raise ValueError(f'negative groups not supported: -{parsed.name}') - - selected = [] - for bench in iter_selections(manifest, selections): - if isinstance(bench, str): - logging.warning(f"no benchmark named {bench!r}") - continue - selected.append(bench) - return selected - - -def cmd_run(parser, options): +def cmd_run(options, benchmarks): logging.basicConfig(level=logging.INFO) print("Python benchmark suite %s" % pyperformance.__version__) @@ -49,11 +27,8 @@ def cmd_run(parser, options): print("ERROR: \"%s\" is not an absolute path" % executable) sys.exit(1) - manifest = load_manifest(options.manifest) - should_run = _select_benchmarks(options.bm_selections, manifest) - cmd_prefix = [executable] - suite, errors = run_benchmarks(should_run, cmd_prefix, options) + suite, errors = run_benchmarks(benchmarks, cmd_prefix, options) if not suite: print("ERROR: No benchmark was run") @@ -73,27 +48,15 @@ def cmd_run(parser, options): sys.exit(1) -def cmd_list(options): - manifest = load_manifest(options.manifest) - for op, _, kind, parsed in options.bm_selections: - if op == '+': - name = parsed.name if kind == 'benchmark' else parsed - if name == 'all': - selected = manifest.benchmarks - break - else: - selected = _select_benchmarks(options.bm_selections, manifest) - +def cmd_list(options, benchmarks): print("%r benchmarks:" % options.benchmarks) - for bench in sorted(selected): + for bench in sorted(benchmarks): print("- %s" % bench.name) print() - print("Total: %s benchmarks" % len(selected)) + print("Total: %s benchmarks" % len(benchmarks)) -def cmd_list_groups(options): - manifest = load_manifest(options.manifest) - manifest.groups['all'] = list(manifest.benchmarks) +def cmd_list_groups(manifest): all_benchmarks = set(manifest.benchmarks) for group, specs in sorted(manifest.groups.items()): From 0f63a5407ad5c78af0fcf40f3c4a11a390d5257a Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Mon, 28 Jun 2021 11:48:18 -0600 Subject: [PATCH 020/126] Fix Benchmark.__repr__(). --- pyperformance/benchmark/_benchmark.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyperformance/benchmark/_benchmark.py b/pyperformance/benchmark/_benchmark.py index 3357514b..4b169e0e 100644 --- a/pyperformance/benchmark/_benchmark.py +++ b/pyperformance/benchmark/_benchmark.py @@ -18,7 +18,7 @@ def __init__(self, spec, metafile): self.metafile = metafile def __repr__(self): - return f'{type(self).__name__}(spec={self.spec}, run={self.run})' + return f'{type(self).__name__}(spec={self.spec}, metafile={self.metafile})' def __hash__(self): return hash(self.spec) From c7a92f40b67983754634ac5f279a1c050df04b34 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Mon, 28 Jun 2021 12:09:22 -0600 Subject: [PATCH 021/126] Fix a default arg in load_metadata(). --- pyperformance/benchmark/_metadata.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyperformance/benchmark/_metadata.py b/pyperformance/benchmark/_metadata.py index 97ee4057..ee533630 100644 --- a/pyperformance/benchmark/_metadata.py +++ b/pyperformance/benchmark/_metadata.py @@ -49,7 +49,7 @@ def load_metadata(metafile, defaults=None): if isinstance(metafile, str): name, rootdir = _name_from_filename(metafile) data, filename = _utils.load_pyproject_toml(metafile, - name=name or '_unknown_', + name=name or None, requirefiles=False, ) else: From c45ece7a7d448d351fabce223ad35d8375fa5b81 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 29 Jun 2021 14:46:16 -0600 Subject: [PATCH 022/126] Fix the packaging data. --- MANIFEST.in | 4 ++++ setup.py | 34 +++------------------------------- 2 files changed, 7 insertions(+), 31 deletions(-) diff --git a/MANIFEST.in b/MANIFEST.in index 3bda47f9..cc6f5f6b 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -10,3 +10,7 @@ include tox.ini include doc/*.rst doc/images/*.png doc/images/*.jpg include doc/conf.py doc/Makefile doc/make.bat + +include pyperformance/_benchmarks/MANIFEST +include pyperformance/_benchmarks/base.toml +recursive-include pyperformance/_benchmarks/bm_*/* * diff --git a/setup.py b/setup.py index edd70640..a80d0e42 100644 --- a/setup.py +++ b/setup.py @@ -59,39 +59,11 @@ def main(): import io import os.path - from setuptools import setup + from setuptools import setup, find_packages with io.open('README.rst', encoding="utf8") as fp: long_description = fp.read().strip() - packages = [ - 'pyperformance', - 'pyperformance._benchmarks', - 'pyperformance._benchmarks.data', - 'pyperformance._benchmarks.data.2to3', - 'pyperformance.tests', - 'pyperformance.tests.data', - ] - - data = { - 'pyperformance': ['requirements.txt'], - 'pyperformance.tests': ['data/*.json'], - } - - # Search for all files in pyperformance/benchmarks/data/ - data_dir = os.path.join('pyperformance', '_benchmarks', 'data') - benchmarks_data = [] - for root, dirnames, filenames in os.walk(data_dir): - # Strip pyperformance/benchmarks/ prefix - root = os.path.normpath(root) - root = root.split(os.path.sep) - root = os.path.sep.join(root[2:]) - - for filename in filenames: - filename = os.path.join(root, filename) - benchmarks_data.append(filename) - data['pyperformance.benchmarks'] = benchmarks_data - options = { 'name': 'pyperformance', 'version': VERSION, @@ -101,8 +73,8 @@ def main(): 'long_description': long_description, 'url': 'https://github.com/python/benchmarks', 'classifiers': CLASSIFIERS, - 'packages': packages, - 'package_data': data, + 'packages': find_packages(), + 'include_package_data': True, 'entry_points': { 'console_scripts': ['pyperformance=pyperformance.cli:main'] }, From e940e2421628193902cb69620c82b00e323e2254 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 29 Jun 2021 14:50:54 -0600 Subject: [PATCH 023/126] Support per-benchmark venvs in VirtualEnvironment. --- pyperformance/_utils/__init__.py | 4 + pyperformance/_utils/_pythoninfo.py | 97 ++++++++++++++++++++ pyperformance/benchmark/_benchmark.py | 21 +++++ pyperformance/venv.py | 124 ++++++++++++++------------ 4 files changed, 187 insertions(+), 59 deletions(-) create mode 100644 pyperformance/_utils/_pythoninfo.py diff --git a/pyperformance/_utils/__init__.py b/pyperformance/_utils/__init__.py index 5dff3256..d3a47a67 100644 --- a/pyperformance/_utils/__init__.py +++ b/pyperformance/_utils/__init__.py @@ -23,3 +23,7 @@ parse_pyproject_toml, load_pyproject_toml, ) +from ._pythoninfo import ( + get_python_id, + get_python_info, +) diff --git a/pyperformance/_utils/_pythoninfo.py b/pyperformance/_utils/_pythoninfo.py new file mode 100644 index 00000000..a659459b --- /dev/null +++ b/pyperformance/_utils/_pythoninfo.py @@ -0,0 +1,97 @@ +import hashlib +import json +import os +import subprocess +import sys + + +try: + PLATLIBDIR = sys.platlibdir +except AttributeError: + PLATLIBDIR = 'lib' +STDLIB_DIR = os.path.dirname(os.__file__) +try: + from importlib.util import MAGIC_NUMBER +except ImportError: + import _imp + MAGIC_NUMBER = _imp.get_magic() + + +def get_python_id(python=sys.executable, *, prefix=None): + """Return a unique (str) identifier for the given Python executable.""" + if not python or isinstance(python, str): + info = get_python_info(python or sys.executable) + else: + info = python + python = info['executable'] + + data = [ + # "executable" represents the install location + # (and build, to an extent). + info['executable'], + # sys.version encodes version, git info, build_date, and build_tool. + info['version_str'], + info['implementation_name'], + '.'.join(str(v) for v in info['implementation_version']), + str(info['api_version']), + info['magic_number'], + ] + # XXX Add git info if a dev build. + + h = hashlib.sha256() + for value in data: + h.update(value.encode('utf-8')) + # XXX Also include the sorted output of "python -m pip freeze"? + py_id = h.hexdigest() + # XXX Return the whole string? + py_id = py_id[:12] + + if prefix: + if prefix is True: + major, minor = info['version_info'][:2] + py_id = f'{info["implementation_name"]}{major}.{minor}-{py_id}' + else: + py_id = prefix + py_id + + return py_id + + +def get_python_info(python=sys.executable): + if not python or python == sys.executable: + return _get_raw_info() + + try: + text = subprocess.check_output( + [python, __file__], + universal_newlines=True, + ) + except subprocess.CalledProcessError: + raise Exception(f'could not get info for {python}') + return json.loads(text) + + +def _get_raw_info(): + return { + 'executable': sys.executable, + 'version_str': sys.version, + 'version_info': tuple(sys.version_info), + 'hexversion': sys.hexversion, + 'api_version': sys.api_version, + 'magic_number': MAGIC_NUMBER.hex(), + 'implementation_name': sys.implementation.name.lower(), + 'implementation_version': tuple(sys.implementation.version), + 'platform': sys.platform, + 'prefix': sys.prefix, + 'exec_prefix': sys.exec_prefix, + 'base_prefix': sys.base_prefix, + 'base_exec_prefix': sys.base_exec_prefix, + 'platlibdir': PLATLIBDIR, + 'stdlib_dir': STDLIB_DIR, + # XXX Also include the build options (e.g. configure flags)? + } + + +if __name__ == '__main__': + info = _get_raw_info() + json.dump(info, sys.stdout, indent=4) + print() diff --git a/pyperformance/benchmark/_benchmark.py b/pyperformance/benchmark/_benchmark.py index 4b169e0e..bfec410e 100644 --- a/pyperformance/benchmark/_benchmark.py +++ b/pyperformance/benchmark/_benchmark.py @@ -52,6 +52,14 @@ def version(self): def origin(self): return self.spec.origin + def _get_rootdir(self): + try: + return self._rootdir + except AttributeError: + script = self.runscript + self._rootdir = os.path.dirname(script) if script else None + return self._rootdir + def _init_metadata(self): if self._metadata is not None: raise NotImplementedError @@ -78,6 +86,19 @@ def tags(self): def datadir(self): return self._get_metadata_value('datadir', None) + @property + def requirements_lockfile(self): + try: + return self._lockfile + except AttributeError: + lockfile = self._get_metadata_value('requirements_lockfile', None) + if not lockfile: + rootdir = self._get_rootdir() + if rootdir: + lockfile = os.path.join(rootdir, 'requirements.txt') + self._lockfile = lockfile + return self._lockfile + @property def prescript(self): return self._get_metadata_value('prescript', None) diff --git a/pyperformance/venv.py b/pyperformance/venv.py index de6ad242..2c9b8488 100644 --- a/pyperformance/venv.py +++ b/pyperformance/venv.py @@ -1,4 +1,5 @@ import errno +import hashlib import os import shutil import subprocess @@ -8,6 +9,7 @@ from shlex import quote as shell_quote import pyperformance +from . import _utils GET_PIP_URL = 'https://bootstrap.pypa.io/get-pip.py' @@ -15,6 +17,7 @@ REQ_OLD_SETUPTOOLS = 'setuptools==18.5' PERFORMANCE_ROOT = os.path.realpath(os.path.dirname(__file__)) +REQUIREMENTS_FILE = os.path.join(PERFORMANCE_ROOT, 'requirements.txt') def is_build_dir(): @@ -24,6 +27,17 @@ def is_build_dir(): return os.path.exists(os.path.join(root_dir, 'setup.py')) +def iter_clean_lines(filename): + with open(filename) as reqsfile: + for line in reqsfile: + # strip comment + line = line.partition('#')[0] + line = line.rstrip() + if not line: + continue + yield line + + class Requirements(object): def __init__(self, filename, optional): # if pip or setuptools is updated: @@ -50,25 +64,18 @@ def __init__(self, filename, optional): # optional requirements self.optional = [] - with open(filename) as fp: - for line in fp.readlines(): - # strip comment - line = line.partition('#')[0] - line = line.rstrip() - if not line: - continue - - # strip env markers - req = line.partition(';')[0] + for line in iter_clean_lines(filename): + # strip env markers + req = line.partition(';')[0] - # strip version - req = req.partition('==')[0] - req = req.partition('>=')[0] + # strip version + req = req.partition('==')[0] + req = req.partition('>=')[0] - if req in optional: - self.optional.append(line) - else: - self.req.append(line) + if req in optional: + self.optional.append(line) + else: + self.req.append(line) def safe_rmtree(path): @@ -135,9 +142,43 @@ def download(url, filename): fp.flush() +def get_compatibility_id(bench=None): + # XXX Do not include the pyperformance reqs if a benchmark was provided? + reqs = sorted(iter_clean_lines(REQUIREMENTS_FILE)) + if bench: + lockfile = bench.requirements_lockfile + if lockfile and os.path.exists(lockfile): + reqs += sorted(iter_clean_lines(lockfile)) + + data = [ + # XXX Favor pyperf.__version__ instead? + pyperformance.__version__, + '\n'.join(reqs), + ] + + h = hashlib.sha256() + for value in data: + h.update(value.encode('utf-8')) + compat_id = h.hexdigest() + # XXX Return the whole string? + compat_id = compat_id[:12] + + return compat_id + + +def get_run_name(python, bench=None): + py_id = _utils.get_python_id(python, prefix=True) + compat_id = get_compatibility_id(bench) + name = f'{py_id}-compat-{compat_id}' + if bench: + name = f'{name}-{bench.name}' + return name + + class VirtualEnvironment(object): - def __init__(self, options): + def __init__(self, options, bench=None): self.options = options + self.bench = bench self.python = options.python self._venv_path = options.venv self._pip_program = None @@ -202,47 +243,12 @@ def get_output_nocheck(self, *cmd): return (exitcode, stdout) def get_path(self): - if self._venv_path is not None: - return self._venv_path - - script = textwrap.dedent(""" - import hashlib - import sys - - performance_version = sys.argv[1] - requirements = sys.argv[2] - - data = performance_version + sys.executable + sys.version - - pyver = sys.version_info - - implementation = sys.implementation.name.lower() - - if not isinstance(data, bytes): - data = data.encode('utf-8') - with open(requirements, 'rb') as fp: - data += fp.read() - sha1 = hashlib.sha1(data).hexdigest() - - name = ('%s%s.%s-%s' - % (implementation, pyver.major, pyver.minor, sha1[:12])) - print(name) - """) - - requirements = os.path.join(PERFORMANCE_ROOT, 'requirements.txt') - cmd = (self.python, '-c', script, - pyperformance.__version__, requirements) - proc = subprocess.Popen(cmd, - stdout=subprocess.PIPE, - universal_newlines=True) - stdout = proc.communicate()[0] - if proc.returncode: - print("ERROR: failed to create the name of the virtual environment") - sys.exit(1) - - venv_name = stdout.rstrip() - self._venv_path = venv_path = os.path.join('venv', venv_name) - return venv_path + if not self._venv_path: + venv_name = get_run_name(self.python, self.bench) + self._venv_path = os.path.abspath( + os.path.join('venv', venv_name), + ) + return self._venv_path def _get_pip_program(self): venv_path = self.get_path() From 0420e96f2991ec6a8a901c36ea7761148fa97b5a Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Wed, 30 Jun 2021 08:50:21 -0600 Subject: [PATCH 024/126] Ignore pyproject.toml name only if provided. --- pyperformance/_utils/_pyproject_toml.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pyperformance/_utils/_pyproject_toml.py b/pyperformance/_utils/_pyproject_toml.py index de5a3488..bb3a654c 100644 --- a/pyperformance/_utils/_pyproject_toml.py +++ b/pyperformance/_utils/_pyproject_toml.py @@ -124,7 +124,8 @@ def _normalize_project(data, rootdir, name, requirefiles, **_ignored): raise ValueError(f'invalid name {name!r}') name = packaging.utils.canonicalize_name(name) data['name'] = name - unused.remove('name') + if 'name' in unused: + unused.remove('name') else: if 'name' not in data.get('dynamic', []): raise ValueError('missing required "name" field') From 6bce38648d9b10328cda15971c853a14e7c0c3d0 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Wed, 30 Jun 2021 08:51:22 -0600 Subject: [PATCH 025/126] Add requirements lock files to the benchmarks. --- pyperformance/_benchmarks/bm_chameleon/pyproject.toml | 5 ++++- pyperformance/_benchmarks/bm_chameleon/requirements.txt | 1 + pyperformance/_benchmarks/bm_crypto_pyaes/pyproject.toml | 5 ++++- pyperformance/_benchmarks/bm_crypto_pyaes/requirements.txt | 1 + pyperformance/_benchmarks/bm_django_template/pyproject.toml | 5 ++++- .../_benchmarks/bm_django_template/requirements.txt | 4 ++++ pyperformance/_benchmarks/bm_dulwich_log/pyproject.toml | 6 +++++- pyperformance/_benchmarks/bm_dulwich_log/requirements.txt | 3 +++ pyperformance/_benchmarks/bm_genshi/pyproject.toml | 5 ++++- pyperformance/_benchmarks/bm_genshi/requirements.txt | 2 ++ pyperformance/_benchmarks/bm_hg_startup/pyproject.toml | 5 ++++- pyperformance/_benchmarks/bm_hg_startup/requirements.txt | 1 + pyperformance/_benchmarks/bm_html5lib/pyproject.toml | 5 ++++- pyperformance/_benchmarks/bm_html5lib/requirements.txt | 3 +++ pyperformance/_benchmarks/bm_mako/pyproject.toml | 5 ++++- pyperformance/_benchmarks/bm_mako/requirements.txt | 2 ++ .../_benchmarks/bm_sqlalchemy_declarative/pyproject.toml | 5 ++++- .../_benchmarks/bm_sqlalchemy_declarative/requirements.txt | 2 ++ .../_benchmarks/bm_sqlalchemy_imperative/pyproject.toml | 5 ++++- .../_benchmarks/bm_sqlalchemy_imperative/requirements.txt | 2 ++ pyperformance/_benchmarks/bm_sympy/pyproject.toml | 5 ++++- pyperformance/_benchmarks/bm_sympy/requirements.txt | 2 ++ pyperformance/_benchmarks/bm_tornado_http/pyproject.toml | 5 ++++- pyperformance/_benchmarks/bm_tornado_http/requirements.txt | 1 + 24 files changed, 73 insertions(+), 12 deletions(-) create mode 100644 pyperformance/_benchmarks/bm_chameleon/requirements.txt create mode 100644 pyperformance/_benchmarks/bm_crypto_pyaes/requirements.txt create mode 100644 pyperformance/_benchmarks/bm_django_template/requirements.txt create mode 100644 pyperformance/_benchmarks/bm_dulwich_log/requirements.txt create mode 100644 pyperformance/_benchmarks/bm_genshi/requirements.txt create mode 100644 pyperformance/_benchmarks/bm_hg_startup/requirements.txt create mode 100644 pyperformance/_benchmarks/bm_html5lib/requirements.txt create mode 100644 pyperformance/_benchmarks/bm_mako/requirements.txt create mode 100644 pyperformance/_benchmarks/bm_sqlalchemy_declarative/requirements.txt create mode 100644 pyperformance/_benchmarks/bm_sqlalchemy_imperative/requirements.txt create mode 100644 pyperformance/_benchmarks/bm_sympy/requirements.txt create mode 100644 pyperformance/_benchmarks/bm_tornado_http/requirements.txt diff --git a/pyperformance/_benchmarks/bm_chameleon/pyproject.toml b/pyperformance/_benchmarks/bm_chameleon/pyproject.toml index bf447649..ba661812 100644 --- a/pyperformance/_benchmarks/bm_chameleon/pyproject.toml +++ b/pyperformance/_benchmarks/bm_chameleon/pyproject.toml @@ -1,5 +1,8 @@ [project] -#name = "bm_" +name = "bm_chameleon" +dependencies = [ + "Chameleon", +] # XXX This should be inherited from metabase. dynamic = [ diff --git a/pyperformance/_benchmarks/bm_chameleon/requirements.txt b/pyperformance/_benchmarks/bm_chameleon/requirements.txt new file mode 100644 index 00000000..260c3bc4 --- /dev/null +++ b/pyperformance/_benchmarks/bm_chameleon/requirements.txt @@ -0,0 +1 @@ +chameleon==3.9.1 diff --git a/pyperformance/_benchmarks/bm_crypto_pyaes/pyproject.toml b/pyperformance/_benchmarks/bm_crypto_pyaes/pyproject.toml index bf447649..516f1bd8 100644 --- a/pyperformance/_benchmarks/bm_crypto_pyaes/pyproject.toml +++ b/pyperformance/_benchmarks/bm_crypto_pyaes/pyproject.toml @@ -1,5 +1,8 @@ [project] -#name = "bm_" +name = "bm_crypto_pyaes" +dependencies = [ + "pyaes", +] # XXX This should be inherited from metabase. dynamic = [ diff --git a/pyperformance/_benchmarks/bm_crypto_pyaes/requirements.txt b/pyperformance/_benchmarks/bm_crypto_pyaes/requirements.txt new file mode 100644 index 00000000..68abeee5 --- /dev/null +++ b/pyperformance/_benchmarks/bm_crypto_pyaes/requirements.txt @@ -0,0 +1 @@ +pyaes==1.6.1 diff --git a/pyperformance/_benchmarks/bm_django_template/pyproject.toml b/pyperformance/_benchmarks/bm_django_template/pyproject.toml index bf447649..8035647b 100644 --- a/pyperformance/_benchmarks/bm_django_template/pyproject.toml +++ b/pyperformance/_benchmarks/bm_django_template/pyproject.toml @@ -1,5 +1,8 @@ [project] -#name = "bm_" +name = "bm_django_template" +dependencies = [ + "django", +] # XXX This should be inherited from metabase. dynamic = [ diff --git a/pyperformance/_benchmarks/bm_django_template/requirements.txt b/pyperformance/_benchmarks/bm_django_template/requirements.txt new file mode 100644 index 00000000..4a3490bf --- /dev/null +++ b/pyperformance/_benchmarks/bm_django_template/requirements.txt @@ -0,0 +1,4 @@ +asgiref==3.3.4 +django==3.2.4 +pytz==2021.1 +sqlparse==0.4.1 diff --git a/pyperformance/_benchmarks/bm_dulwich_log/pyproject.toml b/pyperformance/_benchmarks/bm_dulwich_log/pyproject.toml index bf447649..b8524790 100644 --- a/pyperformance/_benchmarks/bm_dulwich_log/pyproject.toml +++ b/pyperformance/_benchmarks/bm_dulwich_log/pyproject.toml @@ -1,5 +1,9 @@ [project] -#name = "bm_" +name = "bm_dulwich_log" +dependencies = [ + # optional? + "dulwich", +] # XXX This should be inherited from metabase. dynamic = [ diff --git a/pyperformance/_benchmarks/bm_dulwich_log/requirements.txt b/pyperformance/_benchmarks/bm_dulwich_log/requirements.txt new file mode 100644 index 00000000..0adbfb46 --- /dev/null +++ b/pyperformance/_benchmarks/bm_dulwich_log/requirements.txt @@ -0,0 +1,3 @@ +certifi==2021.5.30 +dulwich==0.20.23 +urllib3==1.26.5 diff --git a/pyperformance/_benchmarks/bm_genshi/pyproject.toml b/pyperformance/_benchmarks/bm_genshi/pyproject.toml index bf447649..fd86d6a7 100644 --- a/pyperformance/_benchmarks/bm_genshi/pyproject.toml +++ b/pyperformance/_benchmarks/bm_genshi/pyproject.toml @@ -1,5 +1,8 @@ [project] -#name = "bm_" +name = "bm_genshi" +dependencies = [ + "Genshi", +] # XXX This should be inherited from metabase. dynamic = [ diff --git a/pyperformance/_benchmarks/bm_genshi/requirements.txt b/pyperformance/_benchmarks/bm_genshi/requirements.txt new file mode 100644 index 00000000..c2444cb6 --- /dev/null +++ b/pyperformance/_benchmarks/bm_genshi/requirements.txt @@ -0,0 +1,2 @@ +genshi==0.7.5 +six==1.16.0 diff --git a/pyperformance/_benchmarks/bm_hg_startup/pyproject.toml b/pyperformance/_benchmarks/bm_hg_startup/pyproject.toml index bf447649..316f9f67 100644 --- a/pyperformance/_benchmarks/bm_hg_startup/pyproject.toml +++ b/pyperformance/_benchmarks/bm_hg_startup/pyproject.toml @@ -1,5 +1,8 @@ [project] -#name = "bm_" +name = "bm_hg_startup" +dependencies = [ + "mercurial", +] # XXX This should be inherited from metabase. dynamic = [ diff --git a/pyperformance/_benchmarks/bm_hg_startup/requirements.txt b/pyperformance/_benchmarks/bm_hg_startup/requirements.txt new file mode 100644 index 00000000..7b54aaa7 --- /dev/null +++ b/pyperformance/_benchmarks/bm_hg_startup/requirements.txt @@ -0,0 +1 @@ +mercurial==5.8 diff --git a/pyperformance/_benchmarks/bm_html5lib/pyproject.toml b/pyperformance/_benchmarks/bm_html5lib/pyproject.toml index bf447649..0c481f6d 100644 --- a/pyperformance/_benchmarks/bm_html5lib/pyproject.toml +++ b/pyperformance/_benchmarks/bm_html5lib/pyproject.toml @@ -1,5 +1,8 @@ [project] -#name = "bm_" +name = "bm_html5lib" +dependencies = [ + "html5lib", +] # XXX This should be inherited from metabase. dynamic = [ diff --git a/pyperformance/_benchmarks/bm_html5lib/requirements.txt b/pyperformance/_benchmarks/bm_html5lib/requirements.txt new file mode 100644 index 00000000..937d99a9 --- /dev/null +++ b/pyperformance/_benchmarks/bm_html5lib/requirements.txt @@ -0,0 +1,3 @@ +html5lib==1.1 +six==1.16.0 +webencodings==0.5.1 diff --git a/pyperformance/_benchmarks/bm_mako/pyproject.toml b/pyperformance/_benchmarks/bm_mako/pyproject.toml index bf447649..1c70c705 100644 --- a/pyperformance/_benchmarks/bm_mako/pyproject.toml +++ b/pyperformance/_benchmarks/bm_mako/pyproject.toml @@ -1,5 +1,8 @@ [project] -#name = "bm_" +name = "bm_mako" +dependencies = [ + "Mako", +] # XXX This should be inherited from metabase. dynamic = [ diff --git a/pyperformance/_benchmarks/bm_mako/requirements.txt b/pyperformance/_benchmarks/bm_mako/requirements.txt new file mode 100644 index 00000000..5f2fe892 --- /dev/null +++ b/pyperformance/_benchmarks/bm_mako/requirements.txt @@ -0,0 +1,2 @@ +mako==1.1.4 +markupsafe==2.0.1 diff --git a/pyperformance/_benchmarks/bm_sqlalchemy_declarative/pyproject.toml b/pyperformance/_benchmarks/bm_sqlalchemy_declarative/pyproject.toml index bf447649..9597ae5a 100644 --- a/pyperformance/_benchmarks/bm_sqlalchemy_declarative/pyproject.toml +++ b/pyperformance/_benchmarks/bm_sqlalchemy_declarative/pyproject.toml @@ -1,5 +1,8 @@ [project] -#name = "bm_" +name = "bm_sqlalchemy_declarative" +dependencies = [ + "SQLAlchemy", +] # XXX This should be inherited from metabase. dynamic = [ diff --git a/pyperformance/_benchmarks/bm_sqlalchemy_declarative/requirements.txt b/pyperformance/_benchmarks/bm_sqlalchemy_declarative/requirements.txt new file mode 100644 index 00000000..b1f2cb01 --- /dev/null +++ b/pyperformance/_benchmarks/bm_sqlalchemy_declarative/requirements.txt @@ -0,0 +1,2 @@ +greenlet==1.1.0 +sqlalchemy==1.4.19 diff --git a/pyperformance/_benchmarks/bm_sqlalchemy_imperative/pyproject.toml b/pyperformance/_benchmarks/bm_sqlalchemy_imperative/pyproject.toml index bf447649..eb5b89ef 100644 --- a/pyperformance/_benchmarks/bm_sqlalchemy_imperative/pyproject.toml +++ b/pyperformance/_benchmarks/bm_sqlalchemy_imperative/pyproject.toml @@ -1,5 +1,8 @@ [project] -#name = "bm_" +name = "bm_sqlalchemy_imperative" +dependencies = [ + "SQLAlchemy", +] # XXX This should be inherited from metabase. dynamic = [ diff --git a/pyperformance/_benchmarks/bm_sqlalchemy_imperative/requirements.txt b/pyperformance/_benchmarks/bm_sqlalchemy_imperative/requirements.txt new file mode 100644 index 00000000..b1f2cb01 --- /dev/null +++ b/pyperformance/_benchmarks/bm_sqlalchemy_imperative/requirements.txt @@ -0,0 +1,2 @@ +greenlet==1.1.0 +sqlalchemy==1.4.19 diff --git a/pyperformance/_benchmarks/bm_sympy/pyproject.toml b/pyperformance/_benchmarks/bm_sympy/pyproject.toml index bf447649..64683b2b 100644 --- a/pyperformance/_benchmarks/bm_sympy/pyproject.toml +++ b/pyperformance/_benchmarks/bm_sympy/pyproject.toml @@ -1,5 +1,8 @@ [project] -#name = "bm_" +name = "bm_sympy" +dependencies = [ + "sympy", +] # XXX This should be inherited from metabase. dynamic = [ diff --git a/pyperformance/_benchmarks/bm_sympy/requirements.txt b/pyperformance/_benchmarks/bm_sympy/requirements.txt new file mode 100644 index 00000000..652d404d --- /dev/null +++ b/pyperformance/_benchmarks/bm_sympy/requirements.txt @@ -0,0 +1,2 @@ +mpmath==1.2.1 +sympy==1.8 diff --git a/pyperformance/_benchmarks/bm_tornado_http/pyproject.toml b/pyperformance/_benchmarks/bm_tornado_http/pyproject.toml index bf447649..1bb2b709 100644 --- a/pyperformance/_benchmarks/bm_tornado_http/pyproject.toml +++ b/pyperformance/_benchmarks/bm_tornado_http/pyproject.toml @@ -1,5 +1,8 @@ [project] -#name = "bm_" +name = "bm_tornado_http" +dependencies = [ + "tornado", +] # XXX This should be inherited from metabase. dynamic = [ diff --git a/pyperformance/_benchmarks/bm_tornado_http/requirements.txt b/pyperformance/_benchmarks/bm_tornado_http/requirements.txt new file mode 100644 index 00000000..ca2eb1c6 --- /dev/null +++ b/pyperformance/_benchmarks/bm_tornado_http/requirements.txt @@ -0,0 +1 @@ +tornado==6.1 From 29cf2286d79322097d90d6f86723867fe90af763 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Wed, 30 Jun 2021 08:52:28 -0600 Subject: [PATCH 026/126] Make a venv for each benchmark instead of sharing one. --- pyperformance/_utils/__init__.py | 1 + pyperformance/_utils/_pythoninfo.py | 43 +++++++++++++++ pyperformance/benchmark/_benchmark.py | 5 +- pyperformance/benchmark/_metadata.py | 2 +- pyperformance/benchmark/_run.py | 9 ++-- pyperformance/cli_run.py | 3 +- pyperformance/requirements.in | 29 ++--------- pyperformance/requirements.txt | 38 -------------- pyperformance/run.py | 19 +++++-- pyperformance/venv.py | 75 +++++++++++++++++---------- 10 files changed, 125 insertions(+), 99 deletions(-) diff --git a/pyperformance/_utils/__init__.py b/pyperformance/_utils/__init__.py index d3a47a67..db4c08fd 100644 --- a/pyperformance/_utils/__init__.py +++ b/pyperformance/_utils/__init__.py @@ -26,4 +26,5 @@ from ._pythoninfo import ( get_python_id, get_python_info, + inspect_python_install, ) diff --git a/pyperformance/_utils/_pythoninfo.py b/pyperformance/_utils/_pythoninfo.py index a659459b..1b8b98b9 100644 --- a/pyperformance/_utils/_pythoninfo.py +++ b/pyperformance/_utils/_pythoninfo.py @@ -70,6 +70,49 @@ def get_python_info(python=sys.executable): return json.loads(text) +def inspect_python_install(python=sys.executable): + if isinstance(python, str): + info = get_python_info(python) + else: + info = python + return _inspect_python_install(**info) + + +def _inspect_python_install(executable, prefix, base_prefix, platlibdir, + stdlib_dir, version_info, **_ignored): + is_venv = prefix != base_prefix + + if os.path.basename(stdlib_dir) == 'Lib': + base_executable = os.path.join(os.path.dirname(stdlib_dir), 'python') + if not os.path.exists(base_executable): + raise NotImplementedError(base_executable) + is_dev = True + else: + major, minor = version_info[:2] + python = f'python{major}.{minor}' + if is_venv: + if '.' in os.path.basename(executable): + ext = executable.rpartition('.')[2] + python_exe = f'{python}.{ext}' + else: + python_exe = python + expected = os.path.join(base_prefix, platlibdir, python) + if stdlib_dir == expected: + bindir = os.path.basename(os.path.dirname(executable)) + base_executable = os.path.join(base_prefix, bindir, python_exe) + else: + raise NotImplementedError(stdlib_dir) + else: + expected = os.path.join(prefix, platlibdir, python) + if stdlib_dir == expected: + base_executable = executable + else: + raise NotImplementedError(stdlib_dir) + is_dev = False + + return base_executable, is_dev, is_venv + + def _get_raw_info(): return { 'executable': sys.executable, diff --git a/pyperformance/benchmark/_benchmark.py b/pyperformance/benchmark/_benchmark.py index bfec410e..8e128be8 100644 --- a/pyperformance/benchmark/_benchmark.py +++ b/pyperformance/benchmark/_benchmark.py @@ -1,3 +1,5 @@ +import os.path + from ._spec import BenchmarkSpec from ._metadata import load_metadata from ._run import run_perf_script @@ -117,10 +119,11 @@ def extra_opts(self): # * dependencies # * requirements - def run(self, python, pyperf_opts=None, *, verbose=False): + def run(self, python, pyperf_opts=None, *, venv=None, verbose=False): return run_perf_script( python, self.runscript, + venv=venv, extra_opts=self.extra_opts, pyperf_opts=pyperf_opts, verbose=verbose, diff --git a/pyperformance/benchmark/_metadata.py b/pyperformance/benchmark/_metadata.py index ee533630..b0927fef 100644 --- a/pyperformance/benchmark/_metadata.py +++ b/pyperformance/benchmark/_metadata.py @@ -140,7 +140,7 @@ def _resolve_base(tool, project, filename, version): return None, None if project is not None: - unexpected = set(project) - {'name', 'dynamic'} + unexpected = set(project) - {'name', 'dynamic', 'dependencies'} if unexpected: raise ValueError(f'[project] should be minimal if "metabase" is provided, got extra {sorted(unexpected)}') diff --git a/pyperformance/benchmark/_run.py b/pyperformance/benchmark/_run.py index 74c7005a..9ff23d39 100644 --- a/pyperformance/benchmark/_run.py +++ b/pyperformance/benchmark/_run.py @@ -1,9 +1,12 @@ +import sys + import pyperf from .. import _utils def run_perf_script(python, runscript, *, + venv=None, extra_opts=None, pyperf_opts=None, verbose=False, @@ -12,10 +15,10 @@ def run_perf_script(python, runscript, *, raise ValueError('missing runscript') if not isinstance(runscript, str): raise TypeError(f'runscript must be a string, got {runscript!r}') - if isinstance(python, str): - python = [python] + if venv and python == sys.executable: + python = venv.get_python_program() cmd = [ - *python, '-u', runscript, + python, '-u', runscript, *(extra_opts or ()), *(pyperf_opts or ()), ] diff --git a/pyperformance/cli_run.py b/pyperformance/cli_run.py index 631d3a8f..0b4eda11 100644 --- a/pyperformance/cli_run.py +++ b/pyperformance/cli_run.py @@ -27,8 +27,7 @@ def cmd_run(options, benchmarks): print("ERROR: \"%s\" is not an absolute path" % executable) sys.exit(1) - cmd_prefix = [executable] - suite, errors = run_benchmarks(benchmarks, cmd_prefix, options) + suite, errors = run_benchmarks(benchmarks, executable, options) if not suite: print("ERROR: No benchmark was run") diff --git a/pyperformance/requirements.in b/pyperformance/requirements.in index f8139208..6e95edc7 100644 --- a/pyperformance/requirements.in +++ b/pyperformance/requirements.in @@ -1,3 +1,7 @@ +# When one of these dependencies is upgraded, the pyperformance major version +# should be increased to respect semantic versionning. Comparison between +# two pyperformance results of two different major versions is not reliable. + # pyperformance dependencies # -------------------------- @@ -8,33 +12,10 @@ packaging toml -# Benchmarks dependencies -# ----------------------- -# -# When one of these dependencies is upgraded, the pyperformance major version -# should be increased to respect semantic versionning. Comparison between -# two pyperformance results of two different major versions is not reliable. - -# XXX Move these to the individual benchmark dirs. - -Chameleon # bm_chameleon -Django # bm_django_template -# FIXME: reenable genshi -# Genshi # bm_genshi -Mako # bm_mako -SQLAlchemy # bm_sqlalchemy_declarative -# FIXME: reenable hg_startup -# mercurial # bm_hg_startup -html5lib # bm_html5lib -pyaes # bm_crypto_pyaes -sympy # bm_sympy -tornado # bm_tornado_http - - # Optional dependencies # --------------------- # # The list of optional dependencies is hardcoded in pyperformance/venv.py +# XXX Do we still need this? psutil -dulwich # bm_dulwich_log diff --git a/pyperformance/requirements.txt b/pyperformance/requirements.txt index 1924fdcd..cea4f9f7 100644 --- a/pyperformance/requirements.txt +++ b/pyperformance/requirements.txt @@ -4,51 +4,13 @@ # # pip-compile pyperformance/requirements.in # -asgiref==3.3.4 - # via django -certifi==2020.12.5 - # via dulwich -chameleon==3.9.0 - # via -r pyperformance/requirements.in -django==3.2.5 - # via -r pyperformance/requirements.in -dulwich==0.20.21 - # via -r pyperformance/requirements.in -greenlet==1.1.0 - # via sqlalchemy -html5lib==1.1 - # via -r pyperformance/requirements.in -mako==1.1.4 - # via -r pyperformance/requirements.in -markupsafe==2.0.1 - # via mako -mpmath==1.2.1 - # via sympy packaging==21.0 # via -r pyperformance/requirements.in psutil==5.8.0 # via -r pyperformance/requirements.in -pyaes==1.6.1 - # via -r pyperformance/requirements.in pyparsing==2.4.7 # via packaging pyperf==2.2.0 # via -r pyperformance/requirements.in -pytz==2021.1 - # via django -six==1.16.0 - # via html5lib -sqlalchemy==1.4.15 - # via -r pyperformance/requirements.in -sqlparse==0.4.2 - # via django -sympy==1.8 - # via -r pyperformance/requirements.in toml==0.10.2 # via -r pyperformance/requirements.in -tornado==6.1 - # via -r pyperformance/requirements.in -urllib3==1.26.5 - # via dulwich -webencodings==0.5.1 - # via html5lib diff --git a/pyperformance/run.py b/pyperformance/run.py index 184304e5..dca05b0c 100644 --- a/pyperformance/run.py +++ b/pyperformance/run.py @@ -7,6 +7,7 @@ import pyperf import pyperformance +from . import venv as _venv class BenchmarkException(Exception): @@ -38,9 +39,16 @@ def get_pyperf_opts(options): return opts -def run_benchmarks(should_run, cmd_prefix, options): - suite = None +def run_benchmarks(should_run, python, options): to_run = sorted(should_run) + + venvs = {} + for bench in to_run: + venv = _venv.VirtualEnvironment(options, bench, usebase=True) + venv.create() + venvs[bench] = venv + + suite = None run_count = str(len(to_run)) errors = [] @@ -70,7 +78,12 @@ def add_bench(dest_suite, obj): return dest_suite try: - result = bench.run(cmd_prefix, pyperf_opts, verbose=options.verbose) + result = bench.run( + python, + pyperf_opts, + venv=venvs.get(bench), + verbose=options.verbose, + ) except Exception as exc: print("ERROR: Benchmark %s failed: %s" % (name, exc)) traceback.print_exc() diff --git a/pyperformance/venv.py b/pyperformance/venv.py index 2c9b8488..ecee3f05 100644 --- a/pyperformance/venv.py +++ b/pyperformance/venv.py @@ -64,18 +64,25 @@ def __init__(self, filename, optional): # optional requirements self.optional = [] - for line in iter_clean_lines(filename): - # strip env markers - req = line.partition(';')[0] + if os.path.exists(filename): + for line in iter_clean_lines(filename): + # strip env markers + req = line.partition(';')[0] - # strip version - req = req.partition('==')[0] - req = req.partition('>=')[0] + # strip version + req = req.partition('==')[0] + req = req.partition('>=')[0] - if req in optional: - self.optional.append(line) - else: - self.req.append(line) + if req in optional: + self.optional.append(line) + else: + self.req.append(line) + + def get(self, name): + for req in self.req: + if req.startswith(name): + return req + return None def safe_rmtree(path): @@ -171,15 +178,20 @@ def get_run_name(python, bench=None): compat_id = get_compatibility_id(bench) name = f'{py_id}-compat-{compat_id}' if bench: - name = f'{name}-{bench.name}' + name = f'{name}-bm-{bench.name}' return name class VirtualEnvironment(object): - def __init__(self, options, bench=None): + + def __init__(self, options, bench=None, *, usebase=False): + python = options.python + if usebase: + python, _, _ = _utils.inspect_python_install(python) + self.options = options self.bench = bench - self.python = options.python + self.python = python self._venv_path = options.venv self._pip_program = None self._force_old_pip = False @@ -393,22 +405,30 @@ def _install_req(self): pip_program = self.get_pip_program() # parse requirements - filename = os.path.join(PERFORMANCE_ROOT, 'requirements.txt') - requirements = Requirements(filename, - # FIXME: don't hardcode requirements - ['psutil', 'dulwich']) + basereqs = Requirements(REQUIREMENTS_FILE, ['psutil']) + if self.bench: + reqsfile = self.bench.requirements_lockfile + requirements = Requirements(reqsfile, []) + # Every benchmark must depend on pyperf. + if not requirements.get('pyperf'): + pyperf_req = basereqs.get('pyperf') + if not pyperf_req: + raise NotImplementedError + requirements.req.append(pyperf_req) + else: + requirements = basereqs # Upgrade pip cmd = pip_program + ['install', '-U'] if self._force_old_pip: cmd.extend((REQ_OLD_PIP, REQ_OLD_SETUPTOOLS)) else: - cmd.extend(requirements.pip) + cmd.extend(basereqs.pip) self.run_cmd(cmd) # Upgrade installer dependencies (setuptools, ...) cmd = pip_program + ['install', '-U'] - cmd.extend(requirements.installer) + cmd.extend(basereqs.installer) self.run_cmd(cmd) # install requirements @@ -424,14 +444,15 @@ def _install_req(self): print("WARNING: failed to install %s" % req) print() - # install pyperformance inside the virtual environment - if is_build_dir(): - root_dir = os.path.dirname(PERFORMANCE_ROOT) - cmd = pip_program + ['install', '-e', root_dir] - else: - version = pyperformance.__version__ - cmd = pip_program + ['install', 'pyperformance==%s' % version] - self.run_cmd(cmd) + if not self.bench: + # install pyperformance inside the virtual environment + if is_build_dir(): + root_dir = os.path.dirname(PERFORMANCE_ROOT) + cmd = pip_program + ['install', '-e', root_dir] + else: + version = pyperformance.__version__ + cmd = pip_program + ['install', 'pyperformance==%s' % version] + self.run_cmd(cmd) # Display the pip version cmd = pip_program + ['--version'] From 6cadadcf599584ccad9493056ab3c68b6cbe2f19 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Mon, 19 Jul 2021 19:55:18 -0600 Subject: [PATCH 027/126] Support "libsdir" in metadata. --- pyperformance/_utils/_platform.py | 3 ++- pyperformance/benchmark/_benchmark.py | 5 +++++ pyperformance/benchmark/_metadata.py | 11 ++++++++--- pyperformance/benchmark/_run.py | 9 ++++++++- 4 files changed, 23 insertions(+), 5 deletions(-) diff --git a/pyperformance/_utils/_platform.py b/pyperformance/_utils/_platform.py index 2c803012..25eb0242 100644 --- a/pyperformance/_utils/_platform.py +++ b/pyperformance/_utils/_platform.py @@ -6,7 +6,7 @@ MS_WINDOWS = (sys.platform == 'win32') -def run_command(command, hide_stderr=True): +def run_command(command, env=None, *, hide_stderr=True): if hide_stderr: kw = {'stderr': subprocess.PIPE} else: @@ -22,6 +22,7 @@ def run_command(command, hide_stderr=True): proc = subprocess.Popen(command, universal_newlines=True, + env=env, **kw) try: stderr = proc.communicate()[1] diff --git a/pyperformance/benchmark/_benchmark.py b/pyperformance/benchmark/_benchmark.py index 8e128be8..a8eaa21f 100644 --- a/pyperformance/benchmark/_benchmark.py +++ b/pyperformance/benchmark/_benchmark.py @@ -88,6 +88,10 @@ def tags(self): def datadir(self): return self._get_metadata_value('datadir', None) + @property + def libsdir(self): + return self._get_metadata_value('libsdir', None) + @property def requirements_lockfile(self): try: @@ -126,5 +130,6 @@ def run(self, python, pyperf_opts=None, *, venv=None, verbose=False): venv=venv, extra_opts=self.extra_opts, pyperf_opts=pyperf_opts, + libsdir=self.libsdir, verbose=verbose, ) diff --git a/pyperformance/benchmark/_metadata.py b/pyperformance/benchmark/_metadata.py index b0927fef..7e76875b 100644 --- a/pyperformance/benchmark/_metadata.py +++ b/pyperformance/benchmark/_metadata.py @@ -28,6 +28,7 @@ 'prescript': None, 'runscript': None, 'extra_opts': None, + 'libsdir': None, } @@ -63,7 +64,7 @@ def load_metadata(metafile, defaults=None): defaults = _ensure_defaults(defaults, rootdir) base, basefile = _resolve_base( - tool, + tool.get('metabase'), # XXX Pop it? project, filename, project.get('version') or defaults.get('version'), @@ -123,10 +124,9 @@ def _ensure_defaults(defaults, rootdir): return defaults -def _resolve_base(tool, project, filename, version): +def _resolve_base(metabase, project, filename, version): rootdir, basename = os.path.split(filename) - metabase = tool.get('metabase') if not metabase: if basename == 'pyproject.toml': return None, None @@ -216,6 +216,11 @@ def _resolve_value(field, value, rootdir): for opt in value: if not opt or not isinstance(opt, str): raise TypeError(f'extra_opts should be a list of strings, got {value!r}') + elif field == 'libsdir': + value = os.path.normpath( + os.path.join(rootdir, value) + ) + _utils.check_dir(value) else: raise NotImplementedError(field) return value diff --git a/pyperformance/benchmark/_run.py b/pyperformance/benchmark/_run.py index 9ff23d39..40f2eaf5 100644 --- a/pyperformance/benchmark/_run.py +++ b/pyperformance/benchmark/_run.py @@ -1,3 +1,4 @@ +import os import sys import pyperf @@ -9,6 +10,7 @@ def run_perf_script(python, runscript, *, venv=None, extra_opts=None, pyperf_opts=None, + libsdir=None, verbose=False, ): if not runscript: @@ -22,8 +24,13 @@ def run_perf_script(python, runscript, *, *(extra_opts or ()), *(pyperf_opts or ()), ] + env = dict(os.environ) + if libsdir: + PYTHONPATH = os.environ.get('PYTHONPATH', '').split(os.pathsep) + PYTHONPATH.insert(0, libsdir) + env['PYTHONPATH'] = os.pathsep.join(PYTHONPATH) with _utils.temporary_file() as tmp: cmd.extend(('--output', tmp)) - _utils.run_command(cmd, hide_stderr=not verbose) + _utils.run_command(cmd, env=env, hide_stderr=not verbose) return pyperf.BenchmarkSuite.load(tmp) From 3fb51875692049d676654657d51d6751b13bbfdc Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Mon, 19 Jul 2021 20:32:49 -0600 Subject: [PATCH 028/126] Fix an error message. --- pyperformance/benchmarks/_manifest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyperformance/benchmarks/_manifest.py b/pyperformance/benchmarks/_manifest.py index 3bc3af78..f61a300d 100644 --- a/pyperformance/benchmarks/_manifest.py +++ b/pyperformance/benchmarks/_manifest.py @@ -86,7 +86,7 @@ def _parse_benchmarks(lines, resolve, filename): lines = [''] lines = iter(lines) if next(lines) != BENCH_HEADER: - raise ValueError('invalid manifest file, expected benchmarks header') + raise ValueError('invalid manifest file, expected benchmarks table header') localdir = os.path.dirname(filename) From abf56e95c08c7f5acf68b89a76b40e9a5fcf8639 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Mon, 19 Jul 2021 20:44:26 -0600 Subject: [PATCH 029/126] Use the default resolve() if the default manifest is explicit. --- pyperformance/benchmarks/__init__.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pyperformance/benchmarks/__init__.py b/pyperformance/benchmarks/__init__.py index 93345ff2..c46dd2e9 100644 --- a/pyperformance/benchmarks/__init__.py +++ b/pyperformance/benchmarks/__init__.py @@ -18,7 +18,10 @@ def load_manifest(filename, *, resolve=None): if not filename: filename = DEFAULT_MANIFEST - if resolve is None: + else: + filename = os.path.abspath(filename) + if resolve is None: + if filename == DEFAULT_MANIFEST: def resolve(bench): if isinstance(bench, _benchmark.Benchmark): spec = bench.spec From 4bf223aaefe24b1fcf00bb2f666c397fb244d197 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Mon, 19 Jul 2021 21:30:15 -0600 Subject: [PATCH 030/126] Merge in the version properly. --- pyperformance/benchmark/_metadata.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/pyperformance/benchmark/_metadata.py b/pyperformance/benchmark/_metadata.py index 7e76875b..16e13c1b 100644 --- a/pyperformance/benchmark/_metadata.py +++ b/pyperformance/benchmark/_metadata.py @@ -67,7 +67,7 @@ def load_metadata(metafile, defaults=None): tool.get('metabase'), # XXX Pop it? project, filename, - project.get('version') or defaults.get('version'), + defaults, ) top = _resolve(project or {}, tool, filename) merged = _merge_metadata(top, base, defaults) @@ -124,7 +124,7 @@ def _ensure_defaults(defaults, rootdir): return defaults -def _resolve_base(metabase, project, filename, version): +def _resolve_base(metabase, project, filename, defaults): rootdir, basename = os.path.split(filename) if not metabase: @@ -156,8 +156,9 @@ def _resolve_base(metabase, project, filename, version): metabase = os.path.join(rootdir, metabase) if metabase == filename: raise Exception('circular') - return load_metadata(metabase, - {'version': version, 'name': '_base_'}) + + defaults = dict(defaults, name='_base_') + return load_metadata(metabase, defaults) def _resolve(project, tool, filename): @@ -232,9 +233,12 @@ def _merge_metadata(*tiers): if not data: continue for field, value in data.items(): + if field == 'spec': + field = 'version' + value = value.version if merged.get(field): # XXX Merge containers? continue - if value: + if value or isinstance(value, int): merged[field] = value return merged From 9acbbe48741eda6f4c08977ece1255777df911ed Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 20 Jul 2021 09:32:39 -0600 Subject: [PATCH 031/126] Preserve PYTHONPATH (with libsdir) when invoking pyperf via benchmark scripts. --- pyperformance/benchmark/_run.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/pyperformance/benchmark/_run.py b/pyperformance/benchmark/_run.py index 40f2eaf5..7d043d77 100644 --- a/pyperformance/benchmark/_run.py +++ b/pyperformance/benchmark/_run.py @@ -26,9 +26,15 @@ def run_perf_script(python, runscript, *, ] env = dict(os.environ) if libsdir: - PYTHONPATH = os.environ.get('PYTHONPATH', '').split(os.pathsep) - PYTHONPATH.insert(0, libsdir) - env['PYTHONPATH'] = os.pathsep.join(PYTHONPATH) + if '--copy-env' in pyperf_opts: + PYTHONPATH = os.environ.get('PYTHONPATH', '').split(os.pathsep) + PYTHONPATH.insert(0, libsdir) + env['PYTHONPATH'] = os.pathsep.join(PYTHONPATH) + else: + env['PYTHONPATH'] = libsdir + cmd.extend([ + '--inherit-environ', 'PYTHONPATH', + ]) with _utils.temporary_file() as tmp: cmd.extend(('--output', tmp)) From d06aa7b0e92aedd650fab0efcbc46e1c1b724068 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 20 Jul 2021 10:24:57 -0600 Subject: [PATCH 032/126] Add a note about using an upstream lib for parsing pyproject.toml. --- pyperformance/_utils/_pyproject_toml.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pyperformance/_utils/_pyproject_toml.py b/pyperformance/_utils/_pyproject_toml.py index bb3a654c..cdc0df8e 100644 --- a/pyperformance/_utils/_pyproject_toml.py +++ b/pyperformance/_utils/_pyproject_toml.py @@ -1,3 +1,6 @@ +# This module should be replaced with the equivalent functionality +# in the PyPI "packaging" package (once it's added there). + import os.path import re import urllib.parse From e48d29bfb14a72b917c88dc70865182793485902 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 20 Jul 2021 10:44:51 -0600 Subject: [PATCH 033/126] Use the full benchmark version rather than the canonicalized form. --- pyperformance/_utils/_pyproject_toml.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/pyperformance/_utils/_pyproject_toml.py b/pyperformance/_utils/_pyproject_toml.py index cdc0df8e..8bfe6ba0 100644 --- a/pyperformance/_utils/_pyproject_toml.py +++ b/pyperformance/_utils/_pyproject_toml.py @@ -139,9 +139,11 @@ def _normalize_project(data, rootdir, name, requirefiles, **_ignored): if 'version' not in data.get('dynamic', []): raise ValueError('missing required "version" field') else: - # This also validates it. - version = packaging.utils.canonicalize_version(version) - data['version'] = version + # We keep the full version string rather than + # the canonicalized form. However, we still validate and + # (effectively) normalize it. + version = packaging.version.parse(version) + data['version'] = str(version) unused.remove('version') ########## From 31f602109ad19d03909319d46b138ac2b8d59922 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Thu, 22 Jul 2021 10:52:30 -0600 Subject: [PATCH 034/126] Add iter_clean_lines() to _utils. --- pyperformance/_utils/__init__.py | 1 + pyperformance/_utils/_misc.py | 11 +++++++++++ 2 files changed, 12 insertions(+) diff --git a/pyperformance/_utils/__init__.py b/pyperformance/_utils/__init__.py index db4c08fd..cf03b028 100644 --- a/pyperformance/_utils/__init__.py +++ b/pyperformance/_utils/__init__.py @@ -8,6 +8,7 @@ ) from ._misc import ( check_name, + iter_clean_lines, parse_name_pattern, parse_tag_pattern, parse_selections, diff --git a/pyperformance/_utils/_misc.py b/pyperformance/_utils/_misc.py index bef414bd..e73e00a8 100644 --- a/pyperformance/_utils/_misc.py +++ b/pyperformance/_utils/_misc.py @@ -49,3 +49,14 @@ def parse_selections(selections, parse_entry=None): entry = entry[1:] yield parse_entry(op, entry) + + +def iter_clean_lines(filename): + with open(filename) as reqsfile: + for line in reqsfile: + # strip comment + line = line.partition('#')[0] + line = line.rstrip() + if not line: + continue + yield line From 4b136452325af5d259c36d98e9bfa0d6876a5d97 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Thu, 22 Jul 2021 11:00:19 -0600 Subject: [PATCH 035/126] Use a run ID when running benchmarks. --- pyperformance/benchmark/_benchmark.py | 20 ++++- pyperformance/benchmark/_run.py | 93 +++++++++++++++----- pyperformance/run.py | 120 ++++++++++++++++++++------ pyperformance/venv.py | 64 +++----------- 4 files changed, 196 insertions(+), 101 deletions(-) diff --git a/pyperformance/benchmark/_benchmark.py b/pyperformance/benchmark/_benchmark.py index a8eaa21f..dc3e4e69 100644 --- a/pyperformance/benchmark/_benchmark.py +++ b/pyperformance/benchmark/_benchmark.py @@ -1,8 +1,9 @@ import os.path +import sys from ._spec import BenchmarkSpec from ._metadata import load_metadata -from ._run import run_perf_script +from ._run import run_perf_script, run_other_script class Benchmark: @@ -123,13 +124,24 @@ def extra_opts(self): # * dependencies # * requirements - def run(self, python, pyperf_opts=None, *, venv=None, verbose=False): - return run_perf_script( + def run(self, python, runid=None, pyperf_opts=None, *, + venv=None, + verbose=False, + ): + if venv and python == sys.executable: + python = venv.get_python_program() + + if not runid: + from ..run import get_run_id + runid = get_run_id(python, self) + + bench = run_perf_script( python, self.runscript, - venv=venv, + runid, extra_opts=self.extra_opts, pyperf_opts=pyperf_opts, libsdir=self.libsdir, verbose=verbose, ) + return bench diff --git a/pyperformance/benchmark/_run.py b/pyperformance/benchmark/_run.py index 7d043d77..51af0cac 100644 --- a/pyperformance/benchmark/_run.py +++ b/pyperformance/benchmark/_run.py @@ -1,13 +1,12 @@ +import argparse import os -import sys import pyperf from .. import _utils -def run_perf_script(python, runscript, *, - venv=None, +def run_perf_script(python, runscript, runid, *, extra_opts=None, pyperf_opts=None, libsdir=None, @@ -17,26 +16,76 @@ def run_perf_script(python, runscript, *, raise ValueError('missing runscript') if not isinstance(runscript, str): raise TypeError(f'runscript must be a string, got {runscript!r}') - if venv and python == sys.executable: - python = venv.get_python_program() - cmd = [ - python, '-u', runscript, - *(extra_opts or ()), - *(pyperf_opts or ()), + + with _utils.temporary_file() as tmp: + opts = [ + *(extra_opts or ()), + *(pyperf_opts or ()), + '--output', tmp, + ] + prepargs = [python, runscript, opts, runid, libsdir] + if pyperf_opts and '--copy-env' in pyperf_opts: + argv, env = _prep_basic(*prepargs) + else: + argv, env = _prep_restricted(*prepargs) + + _utils.run_command(argv, env=env, hide_stderr=not verbose) + return pyperf.BenchmarkSuite.load(tmp) + + +def _prep_restricted(python, script, opts_orig, runid, libsdir): + # Deal with --inherit-environ. + FLAG = '--inherit-environ' + opts = [] + idx = None + for i, opt in enumerate(opts_orig): + if opt.startswith(FLAG + '='): + idx = i + 1 + opts.append(FLAG) + opts.append(opt.partition('=')[-2]) + opts.extend(opts_orig[idx:]) + break + elif opt == FLAG: + idx = i + 1 + opts.append(FLAG) + opts.append(opts_orig[idx]) + opts.extend(opts_orig[idx + 1:]) + break + else: + opts.append(opt) + else: + opts.extend(['--inherit-environ', '']) + idx = len(opts) - 1 + inherited = set(opts[idx].replace(',', ' ').split()) + def inherit_env_var(name): + inherited.add(name) + opts[idx] = ','.join(inherited) + + # Track the environment variables. + inherit_env_var('PYPERFORMANCE_RUNID') + if libsdir: + inherit_env_var('PYTHONPATH') + + return _prep_basic(python, script, opts, runid, libsdir) + + +def _prep_basic(python, script, opts, runid, libsdir): + # Build argv. + argv = [ + python, '-u', script, + *(opts or ()), ] + + # Populate the environment variables. env = dict(os.environ) + env['PYPERFORMANCE_RUNID'] = str(runid) if libsdir: - if '--copy-env' in pyperf_opts: - PYTHONPATH = os.environ.get('PYTHONPATH', '').split(os.pathsep) - PYTHONPATH.insert(0, libsdir) - env['PYTHONPATH'] = os.pathsep.join(PYTHONPATH) - else: - env['PYTHONPATH'] = libsdir - cmd.extend([ - '--inherit-environ', 'PYTHONPATH', - ]) + _insert_on_PYTHONPATH(libsdir, env) - with _utils.temporary_file() as tmp: - cmd.extend(('--output', tmp)) - _utils.run_command(cmd, env=env, hide_stderr=not verbose) - return pyperf.BenchmarkSuite.load(tmp) + return argv, env + + +def _insert_on_PYTHONPATH(entry, env): + PYTHONPATH = env.get('PYTHONPATH', '').split(os.pathsep) + PYTHONPATH.insert(0, entry) + env['PYTHONPATH'] = os.pathsep.join(PYTHONPATH) diff --git a/pyperformance/run.py b/pyperformance/run.py index dca05b0c..1e3b91cf 100644 --- a/pyperformance/run.py +++ b/pyperformance/run.py @@ -1,4 +1,7 @@ +from collections import namedtuple +import hashlib import sys +import time import traceback try: import multiprocessing @@ -7,6 +10,7 @@ import pyperf import pyperformance +from . import _utils from . import venv as _venv @@ -14,39 +18,54 @@ class BenchmarkException(Exception): pass -# Utility functions +class RunID(namedtuple('RunID', 'python compat bench timestamp')): -def get_pyperf_opts(options): - opts = [] + def __new__(cls, python, compat, bench, timestamp): + self = super().__new__( + cls, + python, + compat, + bench or None, + int(timestamp) if timestamp else None, + ) + return self - if options.debug_single_value: - opts.append('--debug-single-value') - elif options.rigorous: - opts.append('--rigorous') - elif options.fast: - opts.append('--fast') + def __str__(self): + if not self.timestamp: + return self.name + return f'{self.name}-{self.timestamp}' - if options.verbose: - opts.append('--verbose') + @property + def name(self): + try: + return self._name + except AttributeError: + name = f'{self.python}-compat-{self.compat}' + if self.bench: + name = f'{name}-bm-{self.bench.name}' + self._name = name + return self._name - if options.affinity: - opts.append('--affinity=%s' % options.affinity) - if options.track_memory: - opts.append('--track-memory') - if options.inherit_environ: - opts.append('--inherit-environ=%s' % ','.join(options.inherit_environ)) - return opts +def get_run_id(python, bench=None): + py_id = _utils.get_python_id(python, prefix=True) + compat_id = get_compatibility_id(bench) + ts = time.time() + return RunID(py_id, compat_id, bench, ts) def run_benchmarks(should_run, python, options): to_run = sorted(should_run) - venvs = {} + runid = get_run_id(python) + + benchmarks = {} for bench in to_run: - venv = _venv.VirtualEnvironment(options, bench, usebase=True) + bench_runid = runid._replace(bench=bench) + venv = _venv.VirtualEnvironment(options, bench, runid.name, + usebase=True) venv.create() - venvs[bench] = venv + benchmarks[bench] = (venv, bench_runid) suite = None run_count = str(len(to_run)) @@ -62,12 +81,12 @@ def run_benchmarks(should_run, python, options): def add_bench(dest_suite, obj): if isinstance(obj, pyperf.BenchmarkSuite): - benchmarks = obj + results = obj else: - benchmarks = (obj,) + results = (obj,) version = pyperformance.__version__ - for res in benchmarks: + for res in results: res.update_metadata({'performance_version': version}) if dest_suite is not None: @@ -77,11 +96,13 @@ def add_bench(dest_suite, obj): return dest_suite + bench_venv, bench_runid = benchmarks.get(bench) try: result = bench.run( python, + bench_runid, pyperf_opts, - venv=venvs.get(bench), + venv=bench_venv, verbose=options.verbose, ) except Exception as exc: @@ -94,3 +115,52 @@ def add_bench(dest_suite, obj): print() return (suite, errors) + + +# Utility functions + +def get_compatibility_id(bench=None): + # XXX Do not include the pyperformance reqs if a benchmark was provided? + reqs = sorted(_utils.iter_clean_lines(_venv.REQUIREMENTS_FILE)) + if bench: + lockfile = bench.requirements_lockfile + if lockfile and os.path.exists(lockfile): + reqs += sorted(_utils.iter_clean_lines(lockfile)) + + data = [ + # XXX Favor pyperf.__version__ instead? + pyperformance.__version__, + '\n'.join(reqs), + ] + + h = hashlib.sha256() + for value in data: + h.update(value.encode('utf-8')) + compat_id = h.hexdigest() + # XXX Return the whole string? + compat_id = compat_id[:12] + + return compat_id + + +def get_pyperf_opts(options): + opts = [] + + if options.debug_single_value: + opts.append('--debug-single-value') + elif options.rigorous: + opts.append('--rigorous') + elif options.fast: + opts.append('--fast') + + if options.verbose: + opts.append('--verbose') + + if options.affinity: + opts.append('--affinity=%s' % options.affinity) + if options.track_memory: + opts.append('--track-memory') + if options.inherit_environ: + opts.append('--inherit-environ=%s' % ','.join(options.inherit_environ)) + + return opts diff --git a/pyperformance/venv.py b/pyperformance/venv.py index ecee3f05..ea8bc707 100644 --- a/pyperformance/venv.py +++ b/pyperformance/venv.py @@ -1,6 +1,6 @@ import errno -import hashlib import os +import os.path import shutil import subprocess import sys @@ -27,17 +27,6 @@ def is_build_dir(): return os.path.exists(os.path.join(root_dir, 'setup.py')) -def iter_clean_lines(filename): - with open(filename) as reqsfile: - for line in reqsfile: - # strip comment - line = line.partition('#')[0] - line = line.rstrip() - if not line: - continue - yield line - - class Requirements(object): def __init__(self, filename, optional): # if pip or setuptools is updated: @@ -65,7 +54,7 @@ def __init__(self, filename, optional): self.optional = [] if os.path.exists(filename): - for line in iter_clean_lines(filename): + for line in _utils.iter_clean_lines(filename): # strip env markers req = line.partition(';')[0] @@ -149,53 +138,29 @@ def download(url, filename): fp.flush() -def get_compatibility_id(bench=None): - # XXX Do not include the pyperformance reqs if a benchmark was provided? - reqs = sorted(iter_clean_lines(REQUIREMENTS_FILE)) - if bench: - lockfile = bench.requirements_lockfile - if lockfile and os.path.exists(lockfile): - reqs += sorted(iter_clean_lines(lockfile)) - - data = [ - # XXX Favor pyperf.__version__ instead? - pyperformance.__version__, - '\n'.join(reqs), - ] - - h = hashlib.sha256() - for value in data: - h.update(value.encode('utf-8')) - compat_id = h.hexdigest() - # XXX Return the whole string? - compat_id = compat_id[:12] - - return compat_id - - -def get_run_name(python, bench=None): - py_id = _utils.get_python_id(python, prefix=True) - compat_id = get_compatibility_id(bench) - name = f'{py_id}-compat-{compat_id}' - if bench: - name = f'{name}-bm-{bench.name}' - return name - - class VirtualEnvironment(object): - def __init__(self, options, bench=None, *, usebase=False): + def __init__(self, options, bench=None, name=None, *, usebase=False): python = options.python if usebase: python, _, _ = _utils.inspect_python_install(python) self.options = options - self.bench = bench self.python = python + self.bench = bench + self._name = name or None self._venv_path = options.venv self._pip_program = None self._force_old_pip = False + @property + def name(self): + if self._name is None: + from .run import get_run_id + runid = get_run_id(self.python, self.bench) + self._name = runid.name + return self._name + def get_python_program(self): venv_path = self.get_path() if os.name == "nt": @@ -256,9 +221,8 @@ def get_output_nocheck(self, *cmd): def get_path(self): if not self._venv_path: - venv_name = get_run_name(self.python, self.bench) self._venv_path = os.path.abspath( - os.path.join('venv', venv_name), + os.path.join('venv', self.name), ) return self._venv_path From b045e9ac4af02cd39a0d20db09e22c4831a664eb Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Thu, 22 Jul 2021 11:01:41 -0600 Subject: [PATCH 036/126] Finish implementing "pre" and "post" script support. --- pyperformance/benchmark/_benchmark.py | 39 ++++++++++++++++++++++++++- pyperformance/benchmark/_metadata.py | 26 +++++++++++++++++- pyperformance/benchmark/_run.py | 9 +++++++ 3 files changed, 72 insertions(+), 2 deletions(-) diff --git a/pyperformance/benchmark/_benchmark.py b/pyperformance/benchmark/_benchmark.py index dc3e4e69..7765534d 100644 --- a/pyperformance/benchmark/_benchmark.py +++ b/pyperformance/benchmark/_benchmark.py @@ -110,6 +110,18 @@ def requirements_lockfile(self): def prescript(self): return self._get_metadata_value('prescript', None) + @property + def pre_extra_opts(self): + return self._get_metadata_value('pre_extra_opts', ()) + + @property + def postscript(self): + return self._get_metadata_value('postscript', None) + + @property + def post_extra_opts(self): + return self._get_metadata_value('post_extra_opts', ()) + @property def runscript(self): return self._get_metadata_value('runscript', None) @@ -135,13 +147,38 @@ def run(self, python, runid=None, pyperf_opts=None, *, from ..run import get_run_id runid = get_run_id(python, self) + prescript = self.prescript + runscript = self.runscript + postscript = self.postscript + + if prescript and os.path.exists(prescript): + run_other_script( + python, + prescript, + runid, + extra_opts=self.pre_extra_opts, + libsdir=self.libsdir, + verbose=verbose, + ) + bench = run_perf_script( python, - self.runscript, + runscript, runid, extra_opts=self.extra_opts, pyperf_opts=pyperf_opts, libsdir=self.libsdir, verbose=verbose, ) + + if postscript and os.path.exists(postscript): + run_other_script( + python, + postscript, + runid, + extra_opts=self.post_extra_opts, + libsdir=self.libsdir, + verbose=verbose, + ) + return bench diff --git a/pyperformance/benchmark/_metadata.py b/pyperformance/benchmark/_metadata.py index 16e13c1b..831cdf0c 100644 --- a/pyperformance/benchmark/_metadata.py +++ b/pyperformance/benchmark/_metadata.py @@ -9,6 +9,7 @@ REQUIREMENTS = 'requirements.txt' DATA = 'data' PREP = 'prep_benchmark.py' +CLEANUP = 'cleanup_benchmark.py' RUN = 'run_benchmark.py' PEP_621_FIELDS = { @@ -26,6 +27,9 @@ 'tags': None, 'datadir': None, 'prescript': None, + 'pre_extra_opts': None, + 'postscript': None, + 'post_extra_opts': None, 'runscript': None, 'extra_opts': None, 'libsdir': None, @@ -42,6 +46,7 @@ # requirements # (from lock file or requirements.txt) # datadir # prescript +# postscript # runscript # extra_opts @@ -116,6 +121,11 @@ def _ensure_defaults(defaults, rootdir): if os.path.isfile(prescript): defaults['prescript'] = prescript + if not defaults.get('postscript'): + postscript = os.path.join(rootdir, CLEANUP) + if os.path.isfile(postscript): + defaults['postscript'] = postscript + if not defaults.get('runscript'): runscript = os.path.join(rootdir, RUN) if os.path.isfile(runscript): @@ -174,6 +184,16 @@ def _resolve(project, tool, filename): value = tool.get(field) if value is not None: resolved[target] = _resolve_value(field, value, rootdir) + if resolved.get('prescript'): + resolved['prescript'] = os.path.join( + os.path.dirname(filename), + resolved['prescript'], + ) + if resolved.get('postscript'): + resolved['postscript'] = os.path.join( + os.path.dirname(filename), + resolved['postscript'], + ) for field, target in PEP_621_FIELDS.items(): if target is None: @@ -207,11 +227,15 @@ def _resolve_value(field, value, rootdir): if not os.path.isabs(value): value = os.path.join(rootdir, value) _utils.check_file(value) + elif field == 'postscript': + if not os.path.isabs(value): + value = os.path.join(rootdir, value) + _utils.check_file(value) elif field == 'runscript': if not os.path.isabs(value): value = os.path.join(rootdir, value) _utils.check_file(value) - elif field == 'extra_opts': + elif field == 'extra_opts' or field.endswith('_extra_opts'): if isinstance(value, str): raise TypeError(f'extra_opts should be a list of strings, got {value!r}') for opt in value: diff --git a/pyperformance/benchmark/_run.py b/pyperformance/benchmark/_run.py index 51af0cac..dbf29700 100644 --- a/pyperformance/benchmark/_run.py +++ b/pyperformance/benchmark/_run.py @@ -33,6 +33,15 @@ def run_perf_script(python, runscript, runid, *, return pyperf.BenchmarkSuite.load(tmp) +def run_other_script(python, script, runid, *, + extra_opts=None, + libsdir=None, + verbose=False + ): + argv, env = _prep_basic(python, script, extra_opts, runid, libsdir) + _utils.run_command(argv, env=env, hide_stderr=not verbose) + + def _prep_restricted(python, script, opts_orig, runid, libsdir): # Deal with --inherit-environ. FLAG = '--inherit-environ' From 54b8ba0dc77a1f149e2336e0be89c7af9c987b4e Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Thu, 22 Jul 2021 11:06:23 -0600 Subject: [PATCH 037/126] Drop "pre" and "post" script support. (It isn't necessary.) --- pyperformance/benchmark/_benchmark.py | 38 --------------------------- pyperformance/benchmark/_metadata.py | 38 +-------------------------- 2 files changed, 1 insertion(+), 75 deletions(-) diff --git a/pyperformance/benchmark/_benchmark.py b/pyperformance/benchmark/_benchmark.py index 7765534d..5eb6ae1d 100644 --- a/pyperformance/benchmark/_benchmark.py +++ b/pyperformance/benchmark/_benchmark.py @@ -106,22 +106,6 @@ def requirements_lockfile(self): self._lockfile = lockfile return self._lockfile - @property - def prescript(self): - return self._get_metadata_value('prescript', None) - - @property - def pre_extra_opts(self): - return self._get_metadata_value('pre_extra_opts', ()) - - @property - def postscript(self): - return self._get_metadata_value('postscript', None) - - @property - def post_extra_opts(self): - return self._get_metadata_value('post_extra_opts', ()) - @property def runscript(self): return self._get_metadata_value('runscript', None) @@ -147,19 +131,7 @@ def run(self, python, runid=None, pyperf_opts=None, *, from ..run import get_run_id runid = get_run_id(python, self) - prescript = self.prescript runscript = self.runscript - postscript = self.postscript - - if prescript and os.path.exists(prescript): - run_other_script( - python, - prescript, - runid, - extra_opts=self.pre_extra_opts, - libsdir=self.libsdir, - verbose=verbose, - ) bench = run_perf_script( python, @@ -171,14 +143,4 @@ def run(self, python, runid=None, pyperf_opts=None, *, verbose=verbose, ) - if postscript and os.path.exists(postscript): - run_other_script( - python, - postscript, - runid, - extra_opts=self.post_extra_opts, - libsdir=self.libsdir, - verbose=verbose, - ) - return bench diff --git a/pyperformance/benchmark/_metadata.py b/pyperformance/benchmark/_metadata.py index 831cdf0c..958f2d0f 100644 --- a/pyperformance/benchmark/_metadata.py +++ b/pyperformance/benchmark/_metadata.py @@ -8,8 +8,6 @@ DEPENDENCIES = 'requirements.in' REQUIREMENTS = 'requirements.txt' DATA = 'data' -PREP = 'prep_benchmark.py' -CLEANUP = 'cleanup_benchmark.py' RUN = 'run_benchmark.py' PEP_621_FIELDS = { @@ -26,10 +24,6 @@ 'name': None, 'tags': None, 'datadir': None, - 'prescript': None, - 'pre_extra_opts': None, - 'postscript': None, - 'post_extra_opts': None, 'runscript': None, 'extra_opts': None, 'libsdir': None, @@ -45,8 +39,6 @@ # dependencies # (from requirements.in) # requirements # (from lock file or requirements.txt) # datadir -# prescript -# postscript # runscript # extra_opts @@ -116,16 +108,6 @@ def _ensure_defaults(defaults, rootdir): if os.path.isdir(datadir): defaults['datadir'] = datadir - if not defaults.get('prescript'): - prescript = os.path.join(rootdir, PREP) - if os.path.isfile(prescript): - defaults['prescript'] = prescript - - if not defaults.get('postscript'): - postscript = os.path.join(rootdir, CLEANUP) - if os.path.isfile(postscript): - defaults['postscript'] = postscript - if not defaults.get('runscript'): runscript = os.path.join(rootdir, RUN) if os.path.isfile(runscript): @@ -184,16 +166,6 @@ def _resolve(project, tool, filename): value = tool.get(field) if value is not None: resolved[target] = _resolve_value(field, value, rootdir) - if resolved.get('prescript'): - resolved['prescript'] = os.path.join( - os.path.dirname(filename), - resolved['prescript'], - ) - if resolved.get('postscript'): - resolved['postscript'] = os.path.join( - os.path.dirname(filename), - resolved['postscript'], - ) for field, target in PEP_621_FIELDS.items(): if target is None: @@ -223,19 +195,11 @@ def _resolve_value(field, value, rootdir): if not os.path.isabs(value): value = os.path.join(rootdir, value) _utils.check_dir(value) - elif field == 'prescript': - if not os.path.isabs(value): - value = os.path.join(rootdir, value) - _utils.check_file(value) - elif field == 'postscript': - if not os.path.isabs(value): - value = os.path.join(rootdir, value) - _utils.check_file(value) elif field == 'runscript': if not os.path.isabs(value): value = os.path.join(rootdir, value) _utils.check_file(value) - elif field == 'extra_opts' or field.endswith('_extra_opts'): + elif field == 'extra_opts': if isinstance(value, str): raise TypeError(f'extra_opts should be a list of strings, got {value!r}') for opt in value: From 362bb6edd208889a477d35f9e29f103d80e5478b Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Thu, 22 Jul 2021 11:27:26 -0600 Subject: [PATCH 038/126] Use the correct venv name for each benchmark. --- pyperformance/run.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyperformance/run.py b/pyperformance/run.py index 1e3b91cf..112db710 100644 --- a/pyperformance/run.py +++ b/pyperformance/run.py @@ -62,8 +62,8 @@ def run_benchmarks(should_run, python, options): benchmarks = {} for bench in to_run: bench_runid = runid._replace(bench=bench) - venv = _venv.VirtualEnvironment(options, bench, runid.name, - usebase=True) + venv = _venv.VirtualEnvironment(options, bench, bench_runid.name, + usebase=True) venv.create() benchmarks[bench] = (venv, bench_runid) From e4fc5d47bf910e27eccea1540faad80c9460287f Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Thu, 22 Jul 2021 11:31:48 -0600 Subject: [PATCH 039/126] Stop supporting "libsdir" in the benchmark metadata. --- pyperformance/benchmark/_benchmark.py | 6 ------ pyperformance/benchmark/_metadata.py | 6 ------ pyperformance/benchmark/_run.py | 16 +++++----------- 3 files changed, 5 insertions(+), 23 deletions(-) diff --git a/pyperformance/benchmark/_benchmark.py b/pyperformance/benchmark/_benchmark.py index 5eb6ae1d..9c46ae46 100644 --- a/pyperformance/benchmark/_benchmark.py +++ b/pyperformance/benchmark/_benchmark.py @@ -89,10 +89,6 @@ def tags(self): def datadir(self): return self._get_metadata_value('datadir', None) - @property - def libsdir(self): - return self._get_metadata_value('libsdir', None) - @property def requirements_lockfile(self): try: @@ -132,14 +128,12 @@ def run(self, python, runid=None, pyperf_opts=None, *, runid = get_run_id(python, self) runscript = self.runscript - bench = run_perf_script( python, runscript, runid, extra_opts=self.extra_opts, pyperf_opts=pyperf_opts, - libsdir=self.libsdir, verbose=verbose, ) diff --git a/pyperformance/benchmark/_metadata.py b/pyperformance/benchmark/_metadata.py index 958f2d0f..5afde52d 100644 --- a/pyperformance/benchmark/_metadata.py +++ b/pyperformance/benchmark/_metadata.py @@ -26,7 +26,6 @@ 'datadir': None, 'runscript': None, 'extra_opts': None, - 'libsdir': None, } @@ -205,11 +204,6 @@ def _resolve_value(field, value, rootdir): for opt in value: if not opt or not isinstance(opt, str): raise TypeError(f'extra_opts should be a list of strings, got {value!r}') - elif field == 'libsdir': - value = os.path.normpath( - os.path.join(rootdir, value) - ) - _utils.check_dir(value) else: raise NotImplementedError(field) return value diff --git a/pyperformance/benchmark/_run.py b/pyperformance/benchmark/_run.py index dbf29700..9d8a5a59 100644 --- a/pyperformance/benchmark/_run.py +++ b/pyperformance/benchmark/_run.py @@ -9,7 +9,6 @@ def run_perf_script(python, runscript, runid, *, extra_opts=None, pyperf_opts=None, - libsdir=None, verbose=False, ): if not runscript: @@ -23,7 +22,7 @@ def run_perf_script(python, runscript, runid, *, *(pyperf_opts or ()), '--output', tmp, ] - prepargs = [python, runscript, opts, runid, libsdir] + prepargs = [python, runscript, opts, runid] if pyperf_opts and '--copy-env' in pyperf_opts: argv, env = _prep_basic(*prepargs) else: @@ -35,14 +34,13 @@ def run_perf_script(python, runscript, runid, *, def run_other_script(python, script, runid, *, extra_opts=None, - libsdir=None, verbose=False ): - argv, env = _prep_basic(python, script, extra_opts, runid, libsdir) + argv, env = _prep_basic(python, script, extra_opts, runid) _utils.run_command(argv, env=env, hide_stderr=not verbose) -def _prep_restricted(python, script, opts_orig, runid, libsdir): +def _prep_restricted(python, script, opts_orig, runid): # Deal with --inherit-environ. FLAG = '--inherit-environ' opts = [] @@ -72,13 +70,11 @@ def inherit_env_var(name): # Track the environment variables. inherit_env_var('PYPERFORMANCE_RUNID') - if libsdir: - inherit_env_var('PYTHONPATH') - return _prep_basic(python, script, opts, runid, libsdir) + return _prep_basic(python, script, opts, runid) -def _prep_basic(python, script, opts, runid, libsdir): +def _prep_basic(python, script, opts, runid): # Build argv. argv = [ python, '-u', script, @@ -88,8 +84,6 @@ def _prep_basic(python, script, opts, runid, libsdir): # Populate the environment variables. env = dict(os.environ) env['PYPERFORMANCE_RUNID'] = str(runid) - if libsdir: - _insert_on_PYTHONPATH(libsdir, env) return argv, env From 9816103ff87cd7d2e1be70754afc7626145a66a7 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Thu, 22 Jul 2021 12:05:53 -0600 Subject: [PATCH 040/126] Clean up run_perf_script() and related code. --- pyperformance/benchmark/_run.py | 78 +++++++++++++++++---------------- 1 file changed, 40 insertions(+), 38 deletions(-) diff --git a/pyperformance/benchmark/_run.py b/pyperformance/benchmark/_run.py index 9d8a5a59..4ef7ca30 100644 --- a/pyperformance/benchmark/_run.py +++ b/pyperformance/benchmark/_run.py @@ -1,4 +1,3 @@ -import argparse import os import pyperf @@ -22,13 +21,13 @@ def run_perf_script(python, runscript, runid, *, *(pyperf_opts or ()), '--output', tmp, ] - prepargs = [python, runscript, opts, runid] if pyperf_opts and '--copy-env' in pyperf_opts: - argv, env = _prep_basic(*prepargs) + argv, env = _prep_cmd(python, runscript, opts, runid, NOOP) else: - argv, env = _prep_restricted(*prepargs) - + opts, inherit_envvar = _resolve_restricted_opts(opts) + argv, env = _prep_cmd(python, runscript, opts, runid, inherit_envvar) _utils.run_command(argv, env=env, hide_stderr=not verbose) + return pyperf.BenchmarkSuite.load(tmp) @@ -36,56 +35,59 @@ def run_other_script(python, script, runid, *, extra_opts=None, verbose=False ): - argv, env = _prep_basic(python, script, extra_opts, runid) + argv, env = _prep_cmd(python, script, extra_opts, runid) _utils.run_command(argv, env=env, hide_stderr=not verbose) -def _prep_restricted(python, script, opts_orig, runid): +def _prep_cmd(python, script, opts, runid, on_set_envvar=None): + # Populate the environment variables. + env = dict(os.environ) + def set_envvar(name, value): + env[name] = value + if on_set_envvar is not None: + on_set_envvar(name) + # on_set_envvar() may update "opts" so all calls to set_envvar() + # must happen before building argv. + set_envvar('PYPERFORMANCE_RUNID', str(runid)) + + # Build argv. + argv = [ + python, '-u', script, + *(opts or ()), + ] + + return argv, env + + +def _resolve_restricted_opts(opts): # Deal with --inherit-environ. FLAG = '--inherit-environ' - opts = [] + resolved = [] idx = None - for i, opt in enumerate(opts_orig): + for i, opt in enumerate(opts): if opt.startswith(FLAG + '='): idx = i + 1 - opts.append(FLAG) - opts.append(opt.partition('=')[-2]) - opts.extend(opts_orig[idx:]) + resolved.append(FLAG) + resolved.append(opt.partition('=')[-2]) + resolved.extend(opts[idx:]) break elif opt == FLAG: idx = i + 1 - opts.append(FLAG) - opts.append(opts_orig[idx]) - opts.extend(opts_orig[idx + 1:]) + resolved.append(FLAG) + resolved.append(opts[idx]) + resolved.extend(opts[idx + 1:]) break else: - opts.append(opt) + resolved.append(opt) else: - opts.extend(['--inherit-environ', '']) - idx = len(opts) - 1 - inherited = set(opts[idx].replace(',', ' ').split()) + resolved.extend(['--inherit-environ', '']) + idx = len(resolved) - 1 + inherited = set(resolved[idx].replace(',', ' ').split()) def inherit_env_var(name): inherited.add(name) - opts[idx] = ','.join(inherited) + resolved[idx] = ','.join(inherited) - # Track the environment variables. - inherit_env_var('PYPERFORMANCE_RUNID') - - return _prep_basic(python, script, opts, runid) - - -def _prep_basic(python, script, opts, runid): - # Build argv. - argv = [ - python, '-u', script, - *(opts or ()), - ] - - # Populate the environment variables. - env = dict(os.environ) - env['PYPERFORMANCE_RUNID'] = str(runid) - - return argv, env + return resolved, inherit_env_var def _insert_on_PYTHONPATH(entry, env): From b7f90f3be9ff3fb82d74a4ac311dcf21f00ad8d6 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Thu, 30 Sep 2021 08:08:35 -0600 Subject: [PATCH 041/126] Add a missing import. --- pyperformance/cli.py | 1 + 1 file changed, 1 insertion(+) diff --git a/pyperformance/cli.py b/pyperformance/cli.py index dbbc6f71..a9bcc83d 100644 --- a/pyperformance/cli.py +++ b/pyperformance/cli.py @@ -1,4 +1,5 @@ import argparse +import logging import os.path import sys From 7d42a40574b327987be4b839019833b844007604 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Mon, 4 Oct 2021 13:53:27 -0600 Subject: [PATCH 042/126] Drop accidental files. --- .../_benchmarks/bm_pyston_aiohttp_requirements.txt | 4 ---- .../_benchmarks/bm_pyston_djangocms_requirements.txt | 9 --------- .../_benchmarks/bm_pyston_flaskblogging_requirements.txt | 5 ----- .../bm_pyston_gevent_bench_hu_requirements.txt | 2 -- .../_benchmarks/bm_pyston_gunicor_requirements.txt | 5 ----- .../_benchmarks/bm_pyston_mypy_requirements.txt | 1 - .../_benchmarks/bm_pyston_pycparser_requirements.txt | 1 - .../_benchmarks/bm_pyston_pylint_requirements.txt | 1 - .../bm_pyston_pytorch_alexnet_infer_requirements.txt | 2 -- .../_benchmarks/bm_pyston_thrift_requirements.txt | 1 - 10 files changed, 31 deletions(-) delete mode 100644 pyperformance/_benchmarks/bm_pyston_aiohttp_requirements.txt delete mode 100644 pyperformance/_benchmarks/bm_pyston_djangocms_requirements.txt delete mode 100644 pyperformance/_benchmarks/bm_pyston_flaskblogging_requirements.txt delete mode 100644 pyperformance/_benchmarks/bm_pyston_gevent_bench_hu_requirements.txt delete mode 100644 pyperformance/_benchmarks/bm_pyston_gunicor_requirements.txt delete mode 100644 pyperformance/_benchmarks/bm_pyston_mypy_requirements.txt delete mode 100644 pyperformance/_benchmarks/bm_pyston_pycparser_requirements.txt delete mode 100644 pyperformance/_benchmarks/bm_pyston_pylint_requirements.txt delete mode 100644 pyperformance/_benchmarks/bm_pyston_pytorch_alexnet_infer_requirements.txt delete mode 100644 pyperformance/_benchmarks/bm_pyston_thrift_requirements.txt diff --git a/pyperformance/_benchmarks/bm_pyston_aiohttp_requirements.txt b/pyperformance/_benchmarks/bm_pyston_aiohttp_requirements.txt deleted file mode 100644 index 04470944..00000000 --- a/pyperformance/_benchmarks/bm_pyston_aiohttp_requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -requests -aiohttp -uvloop -django-cms diff --git a/pyperformance/_benchmarks/bm_pyston_djangocms_requirements.txt b/pyperformance/_benchmarks/bm_pyston_djangocms_requirements.txt deleted file mode 100644 index da6f299d..00000000 --- a/pyperformance/_benchmarks/bm_pyston_djangocms_requirements.txt +++ /dev/null @@ -1,9 +0,0 @@ -requests -django-cms -djangocms-bootstrap4 -djangocms-installer -djangocms-file -djangocms-googlemap -djangocms-snippet -djangocms-style -djangocms-video diff --git a/pyperformance/_benchmarks/bm_pyston_flaskblogging_requirements.txt b/pyperformance/_benchmarks/bm_pyston_flaskblogging_requirements.txt deleted file mode 100644 index 15a99c7e..00000000 --- a/pyperformance/_benchmarks/bm_pyston_flaskblogging_requirements.txt +++ /dev/null @@ -1,5 +0,0 @@ -requests -django-cms -Flask -Flask-Blogging -Flask-Login diff --git a/pyperformance/_benchmarks/bm_pyston_gevent_bench_hu_requirements.txt b/pyperformance/_benchmarks/bm_pyston_gevent_bench_hu_requirements.txt deleted file mode 100644 index 6cebbc56..00000000 --- a/pyperformance/_benchmarks/bm_pyston_gevent_bench_hu_requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -gevent -greenlet diff --git a/pyperformance/_benchmarks/bm_pyston_gunicor_requirements.txt b/pyperformance/_benchmarks/bm_pyston_gunicor_requirements.txt deleted file mode 100644 index 093ad2c7..00000000 --- a/pyperformance/_benchmarks/bm_pyston_gunicor_requirements.txt +++ /dev/null @@ -1,5 +0,0 @@ -requests -gunicorn -aiohttp -uvloop -django-cms diff --git a/pyperformance/_benchmarks/bm_pyston_mypy_requirements.txt b/pyperformance/_benchmarks/bm_pyston_mypy_requirements.txt deleted file mode 100644 index f0aa93ac..00000000 --- a/pyperformance/_benchmarks/bm_pyston_mypy_requirements.txt +++ /dev/null @@ -1 +0,0 @@ -mypy diff --git a/pyperformance/_benchmarks/bm_pyston_pycparser_requirements.txt b/pyperformance/_benchmarks/bm_pyston_pycparser_requirements.txt deleted file mode 100644 index dc1c9e10..00000000 --- a/pyperformance/_benchmarks/bm_pyston_pycparser_requirements.txt +++ /dev/null @@ -1 +0,0 @@ -pycparser diff --git a/pyperformance/_benchmarks/bm_pyston_pylint_requirements.txt b/pyperformance/_benchmarks/bm_pyston_pylint_requirements.txt deleted file mode 100644 index 7fb0ea15..00000000 --- a/pyperformance/_benchmarks/bm_pyston_pylint_requirements.txt +++ /dev/null @@ -1 +0,0 @@ -pylint diff --git a/pyperformance/_benchmarks/bm_pyston_pytorch_alexnet_infer_requirements.txt b/pyperformance/_benchmarks/bm_pyston_pytorch_alexnet_infer_requirements.txt deleted file mode 100644 index a119e2b5..00000000 --- a/pyperformance/_benchmarks/bm_pyston_pytorch_alexnet_infer_requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -Pillow -torch diff --git a/pyperformance/_benchmarks/bm_pyston_thrift_requirements.txt b/pyperformance/_benchmarks/bm_pyston_thrift_requirements.txt deleted file mode 100644 index 5e6e05d8..00000000 --- a/pyperformance/_benchmarks/bm_pyston_thrift_requirements.txt +++ /dev/null @@ -1 +0,0 @@ -thrift From bb599e9f26a0a832b9aa6ecfc61be607e8222d29 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Mon, 4 Oct 2021 16:35:24 -0600 Subject: [PATCH 043/126] Install all dependencies when running tests. --- pyperformance/cli.py | 3 +- pyperformance/venv.py | 101 +++++++++++++++++++++++++++++------------- runtests.py | 2 +- 3 files changed, 72 insertions(+), 34 deletions(-) diff --git a/pyperformance/cli.py b/pyperformance/cli.py index a9bcc83d..a075497c 100644 --- a/pyperformance/cli.py +++ b/pyperformance/cli.py @@ -157,6 +157,7 @@ def parse_args(): default=sys.executable) cmd.add_argument("--venv", help="Path to the virtual environment") + filter_opts(cmd) options = parser.parse_args() @@ -219,7 +220,7 @@ def _main(): benchmarks = _select_benchmarks(options.benchmarks, manifest) if options.action == 'venv': - cmd_venv(options) + cmd_venv(options, benchmarks) sys.exit() elif options.action == 'compile': from pyperformance.compile import cmd_compile diff --git a/pyperformance/venv.py b/pyperformance/venv.py index ea8bc707..f9dc4154 100644 --- a/pyperformance/venv.py +++ b/pyperformance/venv.py @@ -28,7 +28,22 @@ def is_build_dir(): class Requirements(object): - def __init__(self, filename, optional): + + @classmethod + def from_file(cls, filename, optional=None): + self = cls() + self._add_from_file(filename, optional) + return self + + @classmethod + def from_benchmarks(cls, benchmarks): + self = cls() + for bench in benchmarks or (): + filename = bench.requirements_lockfile + self._add_from_file(filename) + return self + + def __init__(self): # if pip or setuptools is updated: # .github/workflows/main.yml should be updated as well @@ -48,27 +63,42 @@ def __init__(self, filename, optional): ] # requirements - self.req = [] + self.specs = [] # optional requirements - self.optional = [] + self._optional = set() - if os.path.exists(filename): - for line in _utils.iter_clean_lines(filename): - # strip env markers - req = line.partition(';')[0] + def iter_non_optional(self): + for spec in self.specs: + if spec in self._optional: + continue + yield spec - # strip version - req = req.partition('==')[0] - req = req.partition('>=')[0] + def iter_optional(self): + for spec in self.specs: + if spec not in self._optional: + continue + yield spec - if req in optional: - self.optional.append(line) - else: - self.req.append(line) + def _add_from_file(self, filename, optional=None): + if not os.path.exists(filename): + return + for line in _utils.iter_clean_lines(filename): + self._add(line, optional) + + def _add(self, line, optional=None): + self.specs.append(line) + if optional: + # strip env markers + req = line.partition(';')[0] + # strip version + req = req.partition('==')[0] + req = req.partition('>=')[0] + if req in optional: + self._optional.add(line) def get(self, name): - for req in self.req: + for req in self.specs: if req.startswith(name): return req return None @@ -140,7 +170,10 @@ def download(url, filename): class VirtualEnvironment(object): - def __init__(self, options, bench=None, name=None, *, usebase=False): + def __init__(self, options, bench=None, name=None, *, + requirements=None, + usebase=False, + ): python = options.python if usebase: python, _, _ = _utils.inspect_python_install(python) @@ -152,6 +185,7 @@ def __init__(self, options, bench=None, name=None, *, usebase=False): self._venv_path = options.venv self._pip_program = None self._force_old_pip = False + self.requirements = requirements @property def name(self): @@ -365,22 +399,23 @@ def exists(self): venv_python = self.get_python_program() return os.path.exists(venv_python) - def _install_req(self): + def _install_reqs(self): pip_program = self.get_pip_program() # parse requirements - basereqs = Requirements(REQUIREMENTS_FILE, ['psutil']) - if self.bench: - reqsfile = self.bench.requirements_lockfile - requirements = Requirements(reqsfile, []) - # Every benchmark must depend on pyperf. - if not requirements.get('pyperf'): - pyperf_req = basereqs.get('pyperf') - if not pyperf_req: - raise NotImplementedError - requirements.req.append(pyperf_req) + basereqs = Requirements.from_file(REQUIREMENTS_FILE, ['psutil']) + if self.requirements: + requirements = self.requirements + elif self.bench: + requirements = Requirements.from_benchmarks([self.bench]) else: requirements = basereqs + # Every benchmark must depend on pyperf. + if not requirements.get('pyperf'): + pyperf_req = basereqs.get('pyperf') + if not pyperf_req: + raise NotImplementedError + requirements.specs.append(pyperf_req) # Upgrade pip cmd = pip_program + ['install', '-U'] @@ -397,11 +432,11 @@ def _install_req(self): # install requirements cmd = pip_program + ['install'] - cmd.extend(requirements.req) + cmd.extend(requirements.iter_non_optional()) self.run_cmd(cmd) # install optional requirements - for req in requirements.optional: + for req in requirements.iter_optional(): cmd = pip_program + ['install', '-U', req] exitcode = self.run_cmd_nocheck(cmd) if exitcode: @@ -435,7 +470,7 @@ def create(self): print("Creating the virtual environment %s" % venv_path) try: self._create_venv() - self._install_req() + self._install_reqs() except: # noqa print() safe_rmtree(venv_path) @@ -461,10 +496,12 @@ def exec_in_virtualenv(options): os.execv(args[0], args) -def cmd_venv(options): +def cmd_venv(options, benchmarks=None): action = options.venv_action - venv = VirtualEnvironment(options) + requirements = Requirements.from_benchmarks(benchmarks) + + venv = VirtualEnvironment(options, requirements=requirements) venv_path = venv.get_path() if action in ('create', 'recreate'): diff --git a/runtests.py b/runtests.py index 34ac702a..d9268d54 100755 --- a/runtests.py +++ b/runtests.py @@ -39,7 +39,7 @@ def run_bench(*cmd): cmd = cmd + ('--venv', venv) run_cmd(cmd) - run_bench(python, script, 'venv', 'create') + run_bench(python, script, 'venv', 'create', '-b', 'all') egg_info = "pyperformance.egg-info" print("Remove directory %s" % egg_info) From df307117236ceb92de5a12b59ace358894155b16 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Mon, 4 Oct 2021 16:55:12 -0600 Subject: [PATCH 044/126] Temporarily get tests passing on Windows. --- pyperformance/_utils/_pythoninfo.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pyperformance/_utils/_pythoninfo.py b/pyperformance/_utils/_pythoninfo.py index 1b8b98b9..57618fb9 100644 --- a/pyperformance/_utils/_pythoninfo.py +++ b/pyperformance/_utils/_pythoninfo.py @@ -100,6 +100,9 @@ def _inspect_python_install(executable, prefix, base_prefix, platlibdir, if stdlib_dir == expected: bindir = os.path.basename(os.path.dirname(executable)) base_executable = os.path.join(base_prefix, bindir, python_exe) + elif os.name == 'nt': + # XXX This is good enough for now. + base_executable = executable else: raise NotImplementedError(stdlib_dir) else: From 8d563f9df1a99d644d548b13b764f2429bb692fb Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Mon, 4 Oct 2021 17:06:16 -0600 Subject: [PATCH 045/126] Ignore stdlib_dir mismatch for now. --- pyperformance/_utils/_pythoninfo.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/pyperformance/_utils/_pythoninfo.py b/pyperformance/_utils/_pythoninfo.py index 57618fb9..e503da9e 100644 --- a/pyperformance/_utils/_pythoninfo.py +++ b/pyperformance/_utils/_pythoninfo.py @@ -100,11 +100,10 @@ def _inspect_python_install(executable, prefix, base_prefix, platlibdir, if stdlib_dir == expected: bindir = os.path.basename(os.path.dirname(executable)) base_executable = os.path.join(base_prefix, bindir, python_exe) - elif os.name == 'nt': + else: # XXX This is good enough for now. base_executable = executable - else: - raise NotImplementedError(stdlib_dir) + #raise NotImplementedError(stdlib_dir) else: expected = os.path.join(prefix, platlibdir, python) if stdlib_dir == expected: From d05b07c4573e53ea126cfe7a7ca61173971bcf3a Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 2 Nov 2021 10:42:33 -0600 Subject: [PATCH 046/126] Move requirements.txt into the data dir. --- MANIFEST.in | 2 ++ pyperformance/__init__.py | 6 ++++++ pyperformance/{ => data-files}/requirements.txt | 0 pyperformance/venv.py | 2 +- pyperformance/requirements.in => requirements.in | 4 ++++ requirements.txt | 1 + setup.py | 2 +- 7 files changed, 15 insertions(+), 2 deletions(-) rename pyperformance/{ => data-files}/requirements.txt (100%) rename pyperformance/requirements.in => requirements.in (83%) create mode 120000 requirements.txt diff --git a/MANIFEST.in b/MANIFEST.in index cc6f5f6b..b0d860e6 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -3,6 +3,7 @@ include COPYING include MANIFEST.in include README.rst include TODO.rst +include requirements.in include requirements.txt include runtests.py include pyperformance @@ -11,6 +12,7 @@ include tox.ini include doc/*.rst doc/images/*.png doc/images/*.jpg include doc/conf.py doc/Makefile doc/make.bat +include pyperformance/data-files/requirements.txt include pyperformance/_benchmarks/MANIFEST include pyperformance/_benchmarks/base.toml recursive-include pyperformance/_benchmarks/bm_*/* * diff --git a/pyperformance/__init__.py b/pyperformance/__init__.py index 81598dbc..9dd60a33 100644 --- a/pyperformance/__init__.py +++ b/pyperformance/__init__.py @@ -1,2 +1,8 @@ +import os.path + + VERSION = (1, 0, 3) __version__ = '.'.join(map(str, VERSION)) + + +DATA_DIR = os.path.join(os.path.dirname(__file__), 'data-files') diff --git a/pyperformance/requirements.txt b/pyperformance/data-files/requirements.txt similarity index 100% rename from pyperformance/requirements.txt rename to pyperformance/data-files/requirements.txt diff --git a/pyperformance/venv.py b/pyperformance/venv.py index f9dc4154..b04d2e48 100644 --- a/pyperformance/venv.py +++ b/pyperformance/venv.py @@ -17,7 +17,7 @@ REQ_OLD_SETUPTOOLS = 'setuptools==18.5' PERFORMANCE_ROOT = os.path.realpath(os.path.dirname(__file__)) -REQUIREMENTS_FILE = os.path.join(PERFORMANCE_ROOT, 'requirements.txt') +REQUIREMENTS_FILE = os.path.join(pyperformance.DATA_DIR, 'requirements.txt') def is_build_dir(): diff --git a/pyperformance/requirements.in b/requirements.in similarity index 83% rename from pyperformance/requirements.in rename to requirements.in index 6e95edc7..0d76bbd8 100644 --- a/pyperformance/requirements.in +++ b/requirements.in @@ -1,6 +1,10 @@ # When one of these dependencies is upgraded, the pyperformance major version # should be increased to respect semantic versionning. Comparison between # two pyperformance results of two different major versions is not reliable. +# +# To rebuild requirements.txt: +# +# pip-compile --upgrade -o requirements.txt requirements.in # pyperformance dependencies # -------------------------- diff --git a/requirements.txt b/requirements.txt new file mode 120000 index 00000000..4ce56a00 --- /dev/null +++ b/requirements.txt @@ -0,0 +1 @@ +pyperformance/data-files/requirements.txt \ No newline at end of file diff --git a/setup.py b/setup.py index a80d0e42..5570e27e 100644 --- a/setup.py +++ b/setup.py @@ -9,7 +9,7 @@ # # - python3 -m pip install --user --upgrade pip-tools # - git clean -fdx # remove all untracked files! -# - (cd pyperformance; pip-compile --upgrade requirements.in) +# - (pip-compile --upgrade -o requirements.txt requirements.in) # # Prepare a release: # From 701c91061d5874491051fa945169de3ca30db9d0 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 2 Nov 2021 09:56:58 -0600 Subject: [PATCH 047/126] Move the benchmarks to the data dir. --- MANIFEST.in | 6 +++--- pyperformance/benchmarks/__init__.py | 6 ++---- .../{_benchmarks => data-files/benchmarks}/MANIFEST | 0 .../benchmarks}/base.toml | 0 .../benchmarks}/bm_2to3/data/2to3/README.txt | 0 .../benchmarks}/bm_2to3/data/2to3/__init__.py.txt | 0 .../bm_2to3/data/2to3/context_processors.py.txt | 0 .../benchmarks}/bm_2to3/data/2to3/exceptions.py.txt | 0 .../benchmarks}/bm_2to3/data/2to3/mail.py.txt | 0 .../benchmarks}/bm_2to3/data/2to3/paginator.py.txt | 0 .../benchmarks}/bm_2to3/data/2to3/signals.py.txt | 0 .../bm_2to3/data/2to3/template_loader.py.txt | 0 .../bm_2to3/data/2to3/urlresolvers.py.txt | 0 .../benchmarks}/bm_2to3/data/2to3/xheaders.py.txt | 0 .../benchmarks}/bm_2to3/pyproject.toml | 0 .../benchmarks}/bm_2to3/run_benchmark.py | 0 .../benchmarks}/bm_chameleon/pyproject.toml | 0 .../benchmarks}/bm_chameleon/requirements.txt | 0 .../benchmarks}/bm_chameleon/run_benchmark.py | 0 .../benchmarks}/bm_chaos/pyproject.toml | 0 .../benchmarks}/bm_chaos/run_benchmark.py | 0 .../benchmarks}/bm_crypto_pyaes/pyproject.toml | 0 .../benchmarks}/bm_crypto_pyaes/requirements.txt | 0 .../benchmarks}/bm_crypto_pyaes/run_benchmark.py | 0 .../benchmarks}/bm_deltablue/pyproject.toml | 0 .../benchmarks}/bm_deltablue/run_benchmark.py | 0 .../benchmarks}/bm_django_template/pyproject.toml | 0 .../benchmarks}/bm_django_template/requirements.txt | 0 .../benchmarks}/bm_django_template/run_benchmark.py | 0 .../bm_dulwich_log/data/asyncio.git/COMMIT_EDITMSG | 0 .../bm_dulwich_log/data/asyncio.git/FETCH_HEAD | 0 .../bm_dulwich_log/data/asyncio.git/HEAD | 0 .../bm_dulwich_log/data/asyncio.git/ORIG_HEAD | 0 .../bm_dulwich_log/data/asyncio.git/config | 0 .../bm_dulwich_log/data/asyncio.git/description | 0 .../data/asyncio.git/hooks/applypatch-msg.sample | 0 .../data/asyncio.git/hooks/commit-msg.sample | 0 .../data/asyncio.git/hooks/post-update.sample | 0 .../data/asyncio.git/hooks/pre-applypatch.sample | 0 .../data/asyncio.git/hooks/pre-commit.sample | 0 .../data/asyncio.git/hooks/pre-push.sample | 0 .../data/asyncio.git/hooks/pre-rebase.sample | 0 .../asyncio.git/hooks/prepare-commit-msg.sample | 0 .../data/asyncio.git/hooks/update.sample | 0 .../bm_dulwich_log/data/asyncio.git/index | Bin .../bm_dulwich_log/data/asyncio.git/info/exclude | 0 .../bm_dulwich_log/data/asyncio.git/info/refs | 0 .../bm_dulwich_log/data/asyncio.git/logs/HEAD | 0 .../data/asyncio.git/logs/refs/heads/master | 0 .../data/asyncio.git/logs/refs/remotes/origin/HEAD | 0 .../logs/refs/remotes/origin/bind_modules | 0 .../asyncio.git/logs/refs/remotes/origin/master | 0 .../logs/refs/remotes/origin/zero_timeout | 0 .../data/asyncio.git/objects/info/packs | 0 ...ack-7e1b1ace85030071ca314cd565ae038bacc302a4.idx | Bin ...ck-7e1b1ace85030071ca314cd565ae038bacc302a4.pack | Bin .../bm_dulwich_log/data/asyncio.git/packed-refs | 0 .../data/asyncio.git/refs/remotes/origin/HEAD | 0 .../benchmarks}/bm_dulwich_log/pyproject.toml | 0 .../benchmarks}/bm_dulwich_log/requirements.txt | 0 .../benchmarks}/bm_dulwich_log/run_benchmark.py | 0 .../benchmarks}/bm_fannkuch/pyproject.toml | 0 .../benchmarks}/bm_fannkuch/run_benchmark.py | 0 .../benchmarks}/bm_float/pyproject.toml | 0 .../benchmarks}/bm_float/run_benchmark.py | 0 .../benchmarks}/bm_genshi/pyproject.toml | 0 .../benchmarks}/bm_genshi/requirements.txt | 0 .../benchmarks}/bm_genshi/run_benchmark.py | 0 .../benchmarks}/bm_go/pyproject.toml | 0 .../benchmarks}/bm_go/run_benchmark.py | 0 .../benchmarks}/bm_hexiom/pyproject.toml | 0 .../benchmarks}/bm_hexiom/run_benchmark.py | 0 .../benchmarks}/bm_hg_startup/pyproject.toml | 0 .../benchmarks}/bm_hg_startup/requirements.txt | 0 .../benchmarks}/bm_hg_startup/run_benchmark.py | 0 .../benchmarks}/bm_html5lib/data/w3_tr_html5.html | 0 .../benchmarks}/bm_html5lib/pyproject.toml | 0 .../benchmarks}/bm_html5lib/requirements.txt | 0 .../benchmarks}/bm_html5lib/run_benchmark.py | 0 .../benchmarks}/bm_json_dumps/pyproject.toml | 0 .../benchmarks}/bm_json_dumps/run_benchmark.py | 0 .../benchmarks}/bm_json_loads/pyproject.toml | 0 .../benchmarks}/bm_json_loads/run_benchmark.py | 0 .../benchmarks}/bm_logging/pyproject.toml | 0 .../benchmarks}/bm_logging/run_benchmark.py | 0 .../benchmarks}/bm_mako/pyproject.toml | 0 .../benchmarks}/bm_mako/requirements.txt | 0 .../benchmarks}/bm_mako/run_benchmark.py | 0 .../benchmarks}/bm_mdp/pyproject.toml | 0 .../benchmarks}/bm_mdp/run_benchmark.py | 0 .../benchmarks}/bm_meteor_contest/pyproject.toml | 0 .../benchmarks}/bm_meteor_contest/run_benchmark.py | 0 .../benchmarks}/bm_nbody/pyproject.toml | 0 .../benchmarks}/bm_nbody/run_benchmark.py | 0 .../benchmarks}/bm_nqueens/pyproject.toml | 0 .../benchmarks}/bm_nqueens/run_benchmark.py | 0 .../benchmarks}/bm_pathlib/pyproject.toml | 0 .../benchmarks}/bm_pathlib/run_benchmark.py | 0 .../benchmarks}/bm_pickle/bm_pickle_dict.toml | 0 .../benchmarks}/bm_pickle/bm_pickle_list.toml | 0 .../bm_pickle/bm_pickle_pure_python.toml | 0 .../benchmarks}/bm_pickle/bm_unpickle.toml | 0 .../benchmarks}/bm_pickle/bm_unpickle_list.toml | 0 .../bm_pickle/bm_unpickle_pure_python.toml | 0 .../benchmarks}/bm_pickle/pyproject.toml | 0 .../benchmarks}/bm_pickle/run_benchmark.py | 0 .../benchmarks}/bm_pidigits/pyproject.toml | 0 .../benchmarks}/bm_pidigits/run_benchmark.py | 0 .../benchmarks}/bm_pyflate/data/interpreter.tar.bz2 | Bin .../benchmarks}/bm_pyflate/pyproject.toml | 0 .../benchmarks}/bm_pyflate/run_benchmark.py | 0 .../bm_python_startup_no_site.toml | 0 .../benchmarks}/bm_python_startup/pyproject.toml | 0 .../benchmarks}/bm_python_startup/run_benchmark.py | 0 .../benchmarks}/bm_raytrace/pyproject.toml | 0 .../benchmarks}/bm_raytrace/run_benchmark.py | 0 .../benchmarks}/bm_regex_compile/bm_regex_effbot.py | 0 .../benchmarks}/bm_regex_compile/bm_regex_v8.py | 0 .../benchmarks}/bm_regex_compile/pyproject.toml | 0 .../benchmarks}/bm_regex_compile/run_benchmark.py | 0 .../benchmarks}/bm_regex_dna/pyproject.toml | 0 .../benchmarks}/bm_regex_dna/run_benchmark.py | 0 .../benchmarks}/bm_regex_effbot/pyproject.toml | 0 .../benchmarks}/bm_regex_effbot/run_benchmark.py | 0 .../benchmarks}/bm_regex_v8/pyproject.toml | 0 .../benchmarks}/bm_regex_v8/run_benchmark.py | 0 .../benchmarks}/bm_richards/pyproject.toml | 0 .../benchmarks}/bm_richards/run_benchmark.py | 0 .../benchmarks}/bm_scimark/pyproject.toml | 0 .../benchmarks}/bm_scimark/run_benchmark.py | 0 .../benchmarks}/bm_spectral_norm/pyproject.toml | 0 .../benchmarks}/bm_spectral_norm/run_benchmark.py | 0 .../bm_sqlalchemy_declarative/pyproject.toml | 0 .../bm_sqlalchemy_declarative/requirements.txt | 0 .../bm_sqlalchemy_declarative/run_benchmark.py | 0 .../bm_sqlalchemy_imperative/pyproject.toml | 0 .../bm_sqlalchemy_imperative/requirements.txt | 0 .../bm_sqlalchemy_imperative/run_benchmark.py | 0 .../benchmarks}/bm_sqlite_synth/pyproject.toml | 0 .../benchmarks}/bm_sqlite_synth/run_benchmark.py | 0 .../benchmarks}/bm_sympy/pyproject.toml | 0 .../benchmarks}/bm_sympy/requirements.txt | 0 .../benchmarks}/bm_sympy/run_benchmark.py | 0 .../benchmarks}/bm_telco/data/telco-bench.b | Bin .../benchmarks}/bm_telco/pyproject.toml | 0 .../benchmarks}/bm_telco/run_benchmark.py | 0 .../benchmarks}/bm_tornado_http/pyproject.toml | 0 .../benchmarks}/bm_tornado_http/requirements.txt | 0 .../benchmarks}/bm_tornado_http/run_benchmark.py | 0 .../benchmarks}/bm_unpack_sequence/pyproject.toml | 0 .../benchmarks}/bm_unpack_sequence/run_benchmark.py | 0 .../benchmarks}/bm_xml_etree/pyproject.toml | 0 .../benchmarks}/bm_xml_etree/run_benchmark.py | 0 153 files changed, 5 insertions(+), 7 deletions(-) rename pyperformance/{_benchmarks => data-files/benchmarks}/MANIFEST (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/base.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_2to3/data/2to3/README.txt (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_2to3/data/2to3/__init__.py.txt (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_2to3/data/2to3/context_processors.py.txt (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_2to3/data/2to3/exceptions.py.txt (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_2to3/data/2to3/mail.py.txt (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_2to3/data/2to3/paginator.py.txt (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_2to3/data/2to3/signals.py.txt (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_2to3/data/2to3/template_loader.py.txt (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_2to3/data/2to3/urlresolvers.py.txt (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_2to3/data/2to3/xheaders.py.txt (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_2to3/pyproject.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_2to3/run_benchmark.py (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_chameleon/pyproject.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_chameleon/requirements.txt (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_chameleon/run_benchmark.py (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_chaos/pyproject.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_chaos/run_benchmark.py (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_crypto_pyaes/pyproject.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_crypto_pyaes/requirements.txt (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_crypto_pyaes/run_benchmark.py (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_deltablue/pyproject.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_deltablue/run_benchmark.py (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_django_template/pyproject.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_django_template/requirements.txt (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_django_template/run_benchmark.py (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_dulwich_log/data/asyncio.git/COMMIT_EDITMSG (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_dulwich_log/data/asyncio.git/FETCH_HEAD (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_dulwich_log/data/asyncio.git/HEAD (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_dulwich_log/data/asyncio.git/ORIG_HEAD (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_dulwich_log/data/asyncio.git/config (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_dulwich_log/data/asyncio.git/description (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_dulwich_log/data/asyncio.git/hooks/applypatch-msg.sample (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_dulwich_log/data/asyncio.git/hooks/commit-msg.sample (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_dulwich_log/data/asyncio.git/hooks/post-update.sample (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_dulwich_log/data/asyncio.git/hooks/pre-applypatch.sample (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_dulwich_log/data/asyncio.git/hooks/pre-commit.sample (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_dulwich_log/data/asyncio.git/hooks/pre-push.sample (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_dulwich_log/data/asyncio.git/hooks/pre-rebase.sample (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_dulwich_log/data/asyncio.git/hooks/prepare-commit-msg.sample (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_dulwich_log/data/asyncio.git/hooks/update.sample (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_dulwich_log/data/asyncio.git/index (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_dulwich_log/data/asyncio.git/info/exclude (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_dulwich_log/data/asyncio.git/info/refs (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_dulwich_log/data/asyncio.git/logs/HEAD (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_dulwich_log/data/asyncio.git/logs/refs/heads/master (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_dulwich_log/data/asyncio.git/logs/refs/remotes/origin/HEAD (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_dulwich_log/data/asyncio.git/logs/refs/remotes/origin/bind_modules (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_dulwich_log/data/asyncio.git/logs/refs/remotes/origin/master (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_dulwich_log/data/asyncio.git/logs/refs/remotes/origin/zero_timeout (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_dulwich_log/data/asyncio.git/objects/info/packs (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_dulwich_log/data/asyncio.git/objects/pack/pack-7e1b1ace85030071ca314cd565ae038bacc302a4.idx (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_dulwich_log/data/asyncio.git/objects/pack/pack-7e1b1ace85030071ca314cd565ae038bacc302a4.pack (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_dulwich_log/data/asyncio.git/packed-refs (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_dulwich_log/data/asyncio.git/refs/remotes/origin/HEAD (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_dulwich_log/pyproject.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_dulwich_log/requirements.txt (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_dulwich_log/run_benchmark.py (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_fannkuch/pyproject.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_fannkuch/run_benchmark.py (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_float/pyproject.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_float/run_benchmark.py (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_genshi/pyproject.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_genshi/requirements.txt (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_genshi/run_benchmark.py (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_go/pyproject.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_go/run_benchmark.py (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_hexiom/pyproject.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_hexiom/run_benchmark.py (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_hg_startup/pyproject.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_hg_startup/requirements.txt (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_hg_startup/run_benchmark.py (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_html5lib/data/w3_tr_html5.html (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_html5lib/pyproject.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_html5lib/requirements.txt (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_html5lib/run_benchmark.py (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_json_dumps/pyproject.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_json_dumps/run_benchmark.py (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_json_loads/pyproject.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_json_loads/run_benchmark.py (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_logging/pyproject.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_logging/run_benchmark.py (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_mako/pyproject.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_mako/requirements.txt (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_mako/run_benchmark.py (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_mdp/pyproject.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_mdp/run_benchmark.py (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_meteor_contest/pyproject.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_meteor_contest/run_benchmark.py (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_nbody/pyproject.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_nbody/run_benchmark.py (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_nqueens/pyproject.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_nqueens/run_benchmark.py (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_pathlib/pyproject.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_pathlib/run_benchmark.py (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_pickle/bm_pickle_dict.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_pickle/bm_pickle_list.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_pickle/bm_pickle_pure_python.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_pickle/bm_unpickle.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_pickle/bm_unpickle_list.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_pickle/bm_unpickle_pure_python.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_pickle/pyproject.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_pickle/run_benchmark.py (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_pidigits/pyproject.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_pidigits/run_benchmark.py (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_pyflate/data/interpreter.tar.bz2 (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_pyflate/pyproject.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_pyflate/run_benchmark.py (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_python_startup/bm_python_startup_no_site.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_python_startup/pyproject.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_python_startup/run_benchmark.py (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_raytrace/pyproject.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_raytrace/run_benchmark.py (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_regex_compile/bm_regex_effbot.py (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_regex_compile/bm_regex_v8.py (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_regex_compile/pyproject.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_regex_compile/run_benchmark.py (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_regex_dna/pyproject.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_regex_dna/run_benchmark.py (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_regex_effbot/pyproject.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_regex_effbot/run_benchmark.py (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_regex_v8/pyproject.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_regex_v8/run_benchmark.py (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_richards/pyproject.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_richards/run_benchmark.py (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_scimark/pyproject.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_scimark/run_benchmark.py (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_spectral_norm/pyproject.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_spectral_norm/run_benchmark.py (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_sqlalchemy_declarative/pyproject.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_sqlalchemy_declarative/requirements.txt (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_sqlalchemy_declarative/run_benchmark.py (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_sqlalchemy_imperative/pyproject.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_sqlalchemy_imperative/requirements.txt (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_sqlalchemy_imperative/run_benchmark.py (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_sqlite_synth/pyproject.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_sqlite_synth/run_benchmark.py (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_sympy/pyproject.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_sympy/requirements.txt (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_sympy/run_benchmark.py (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_telco/data/telco-bench.b (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_telco/pyproject.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_telco/run_benchmark.py (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_tornado_http/pyproject.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_tornado_http/requirements.txt (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_tornado_http/run_benchmark.py (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_unpack_sequence/pyproject.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_unpack_sequence/run_benchmark.py (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_xml_etree/pyproject.toml (100%) rename pyperformance/{_benchmarks => data-files/benchmarks}/bm_xml_etree/run_benchmark.py (100%) diff --git a/MANIFEST.in b/MANIFEST.in index b0d860e6..46553807 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -13,6 +13,6 @@ include doc/*.rst doc/images/*.png doc/images/*.jpg include doc/conf.py doc/Makefile doc/make.bat include pyperformance/data-files/requirements.txt -include pyperformance/_benchmarks/MANIFEST -include pyperformance/_benchmarks/base.toml -recursive-include pyperformance/_benchmarks/bm_*/* * +include pyperformance/data-files/benchmarks/MANIFEST +include pyperformance/data-files/benchmarks/base.toml +recursive-include pyperformance/data-files/benchmarks/bm_*/* * diff --git a/pyperformance/benchmarks/__init__.py b/pyperformance/benchmarks/__init__.py index c46dd2e9..99d9034c 100644 --- a/pyperformance/benchmarks/__init__.py +++ b/pyperformance/benchmarks/__init__.py @@ -1,6 +1,6 @@ import os.path -from .. import __version__ +from .. import __version__, DATA_DIR from .. import benchmark as _benchmark from . import _manifest @@ -9,9 +9,7 @@ from ._selections import parse_selection, iter_selections -DEFAULTS_DIR = os.path.join( - os.path.dirname(os.path.dirname(__file__)), - '_benchmarks') +DEFAULTS_DIR = os.path.join(DATA_DIR, 'benchmarks') DEFAULT_MANIFEST = os.path.join(DEFAULTS_DIR, 'MANIFEST') diff --git a/pyperformance/_benchmarks/MANIFEST b/pyperformance/data-files/benchmarks/MANIFEST similarity index 100% rename from pyperformance/_benchmarks/MANIFEST rename to pyperformance/data-files/benchmarks/MANIFEST diff --git a/pyperformance/_benchmarks/base.toml b/pyperformance/data-files/benchmarks/base.toml similarity index 100% rename from pyperformance/_benchmarks/base.toml rename to pyperformance/data-files/benchmarks/base.toml diff --git a/pyperformance/_benchmarks/bm_2to3/data/2to3/README.txt b/pyperformance/data-files/benchmarks/bm_2to3/data/2to3/README.txt similarity index 100% rename from pyperformance/_benchmarks/bm_2to3/data/2to3/README.txt rename to pyperformance/data-files/benchmarks/bm_2to3/data/2to3/README.txt diff --git a/pyperformance/_benchmarks/bm_2to3/data/2to3/__init__.py.txt b/pyperformance/data-files/benchmarks/bm_2to3/data/2to3/__init__.py.txt similarity index 100% rename from pyperformance/_benchmarks/bm_2to3/data/2to3/__init__.py.txt rename to pyperformance/data-files/benchmarks/bm_2to3/data/2to3/__init__.py.txt diff --git a/pyperformance/_benchmarks/bm_2to3/data/2to3/context_processors.py.txt b/pyperformance/data-files/benchmarks/bm_2to3/data/2to3/context_processors.py.txt similarity index 100% rename from pyperformance/_benchmarks/bm_2to3/data/2to3/context_processors.py.txt rename to pyperformance/data-files/benchmarks/bm_2to3/data/2to3/context_processors.py.txt diff --git a/pyperformance/_benchmarks/bm_2to3/data/2to3/exceptions.py.txt b/pyperformance/data-files/benchmarks/bm_2to3/data/2to3/exceptions.py.txt similarity index 100% rename from pyperformance/_benchmarks/bm_2to3/data/2to3/exceptions.py.txt rename to pyperformance/data-files/benchmarks/bm_2to3/data/2to3/exceptions.py.txt diff --git a/pyperformance/_benchmarks/bm_2to3/data/2to3/mail.py.txt b/pyperformance/data-files/benchmarks/bm_2to3/data/2to3/mail.py.txt similarity index 100% rename from pyperformance/_benchmarks/bm_2to3/data/2to3/mail.py.txt rename to pyperformance/data-files/benchmarks/bm_2to3/data/2to3/mail.py.txt diff --git a/pyperformance/_benchmarks/bm_2to3/data/2to3/paginator.py.txt b/pyperformance/data-files/benchmarks/bm_2to3/data/2to3/paginator.py.txt similarity index 100% rename from pyperformance/_benchmarks/bm_2to3/data/2to3/paginator.py.txt rename to pyperformance/data-files/benchmarks/bm_2to3/data/2to3/paginator.py.txt diff --git a/pyperformance/_benchmarks/bm_2to3/data/2to3/signals.py.txt b/pyperformance/data-files/benchmarks/bm_2to3/data/2to3/signals.py.txt similarity index 100% rename from pyperformance/_benchmarks/bm_2to3/data/2to3/signals.py.txt rename to pyperformance/data-files/benchmarks/bm_2to3/data/2to3/signals.py.txt diff --git a/pyperformance/_benchmarks/bm_2to3/data/2to3/template_loader.py.txt b/pyperformance/data-files/benchmarks/bm_2to3/data/2to3/template_loader.py.txt similarity index 100% rename from pyperformance/_benchmarks/bm_2to3/data/2to3/template_loader.py.txt rename to pyperformance/data-files/benchmarks/bm_2to3/data/2to3/template_loader.py.txt diff --git a/pyperformance/_benchmarks/bm_2to3/data/2to3/urlresolvers.py.txt b/pyperformance/data-files/benchmarks/bm_2to3/data/2to3/urlresolvers.py.txt similarity index 100% rename from pyperformance/_benchmarks/bm_2to3/data/2to3/urlresolvers.py.txt rename to pyperformance/data-files/benchmarks/bm_2to3/data/2to3/urlresolvers.py.txt diff --git a/pyperformance/_benchmarks/bm_2to3/data/2to3/xheaders.py.txt b/pyperformance/data-files/benchmarks/bm_2to3/data/2to3/xheaders.py.txt similarity index 100% rename from pyperformance/_benchmarks/bm_2to3/data/2to3/xheaders.py.txt rename to pyperformance/data-files/benchmarks/bm_2to3/data/2to3/xheaders.py.txt diff --git a/pyperformance/_benchmarks/bm_2to3/pyproject.toml b/pyperformance/data-files/benchmarks/bm_2to3/pyproject.toml similarity index 100% rename from pyperformance/_benchmarks/bm_2to3/pyproject.toml rename to pyperformance/data-files/benchmarks/bm_2to3/pyproject.toml diff --git a/pyperformance/_benchmarks/bm_2to3/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_2to3/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_2to3/run_benchmark.py rename to pyperformance/data-files/benchmarks/bm_2to3/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_chameleon/pyproject.toml b/pyperformance/data-files/benchmarks/bm_chameleon/pyproject.toml similarity index 100% rename from pyperformance/_benchmarks/bm_chameleon/pyproject.toml rename to pyperformance/data-files/benchmarks/bm_chameleon/pyproject.toml diff --git a/pyperformance/_benchmarks/bm_chameleon/requirements.txt b/pyperformance/data-files/benchmarks/bm_chameleon/requirements.txt similarity index 100% rename from pyperformance/_benchmarks/bm_chameleon/requirements.txt rename to pyperformance/data-files/benchmarks/bm_chameleon/requirements.txt diff --git a/pyperformance/_benchmarks/bm_chameleon/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_chameleon/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_chameleon/run_benchmark.py rename to pyperformance/data-files/benchmarks/bm_chameleon/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_chaos/pyproject.toml b/pyperformance/data-files/benchmarks/bm_chaos/pyproject.toml similarity index 100% rename from pyperformance/_benchmarks/bm_chaos/pyproject.toml rename to pyperformance/data-files/benchmarks/bm_chaos/pyproject.toml diff --git a/pyperformance/_benchmarks/bm_chaos/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_chaos/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_chaos/run_benchmark.py rename to pyperformance/data-files/benchmarks/bm_chaos/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_crypto_pyaes/pyproject.toml b/pyperformance/data-files/benchmarks/bm_crypto_pyaes/pyproject.toml similarity index 100% rename from pyperformance/_benchmarks/bm_crypto_pyaes/pyproject.toml rename to pyperformance/data-files/benchmarks/bm_crypto_pyaes/pyproject.toml diff --git a/pyperformance/_benchmarks/bm_crypto_pyaes/requirements.txt b/pyperformance/data-files/benchmarks/bm_crypto_pyaes/requirements.txt similarity index 100% rename from pyperformance/_benchmarks/bm_crypto_pyaes/requirements.txt rename to pyperformance/data-files/benchmarks/bm_crypto_pyaes/requirements.txt diff --git a/pyperformance/_benchmarks/bm_crypto_pyaes/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_crypto_pyaes/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_crypto_pyaes/run_benchmark.py rename to pyperformance/data-files/benchmarks/bm_crypto_pyaes/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_deltablue/pyproject.toml b/pyperformance/data-files/benchmarks/bm_deltablue/pyproject.toml similarity index 100% rename from pyperformance/_benchmarks/bm_deltablue/pyproject.toml rename to pyperformance/data-files/benchmarks/bm_deltablue/pyproject.toml diff --git a/pyperformance/_benchmarks/bm_deltablue/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_deltablue/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_deltablue/run_benchmark.py rename to pyperformance/data-files/benchmarks/bm_deltablue/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_django_template/pyproject.toml b/pyperformance/data-files/benchmarks/bm_django_template/pyproject.toml similarity index 100% rename from pyperformance/_benchmarks/bm_django_template/pyproject.toml rename to pyperformance/data-files/benchmarks/bm_django_template/pyproject.toml diff --git a/pyperformance/_benchmarks/bm_django_template/requirements.txt b/pyperformance/data-files/benchmarks/bm_django_template/requirements.txt similarity index 100% rename from pyperformance/_benchmarks/bm_django_template/requirements.txt rename to pyperformance/data-files/benchmarks/bm_django_template/requirements.txt diff --git a/pyperformance/_benchmarks/bm_django_template/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_django_template/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_django_template/run_benchmark.py rename to pyperformance/data-files/benchmarks/bm_django_template/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/COMMIT_EDITMSG b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/COMMIT_EDITMSG similarity index 100% rename from pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/COMMIT_EDITMSG rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/COMMIT_EDITMSG diff --git a/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/FETCH_HEAD b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/FETCH_HEAD similarity index 100% rename from pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/FETCH_HEAD rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/FETCH_HEAD diff --git a/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/HEAD b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/HEAD similarity index 100% rename from pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/HEAD rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/HEAD diff --git a/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/ORIG_HEAD b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/ORIG_HEAD similarity index 100% rename from pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/ORIG_HEAD rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/ORIG_HEAD diff --git a/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/config b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/config similarity index 100% rename from pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/config rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/config diff --git a/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/description b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/description similarity index 100% rename from pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/description rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/description diff --git a/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/hooks/applypatch-msg.sample b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/hooks/applypatch-msg.sample similarity index 100% rename from pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/hooks/applypatch-msg.sample rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/hooks/applypatch-msg.sample diff --git a/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/hooks/commit-msg.sample b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/hooks/commit-msg.sample similarity index 100% rename from pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/hooks/commit-msg.sample rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/hooks/commit-msg.sample diff --git a/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/hooks/post-update.sample b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/hooks/post-update.sample similarity index 100% rename from pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/hooks/post-update.sample rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/hooks/post-update.sample diff --git a/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/hooks/pre-applypatch.sample b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/hooks/pre-applypatch.sample similarity index 100% rename from pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/hooks/pre-applypatch.sample rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/hooks/pre-applypatch.sample diff --git a/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/hooks/pre-commit.sample b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/hooks/pre-commit.sample similarity index 100% rename from pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/hooks/pre-commit.sample rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/hooks/pre-commit.sample diff --git a/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/hooks/pre-push.sample b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/hooks/pre-push.sample similarity index 100% rename from pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/hooks/pre-push.sample rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/hooks/pre-push.sample diff --git a/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/hooks/pre-rebase.sample b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/hooks/pre-rebase.sample similarity index 100% rename from pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/hooks/pre-rebase.sample rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/hooks/pre-rebase.sample diff --git a/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/hooks/prepare-commit-msg.sample b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/hooks/prepare-commit-msg.sample similarity index 100% rename from pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/hooks/prepare-commit-msg.sample rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/hooks/prepare-commit-msg.sample diff --git a/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/hooks/update.sample b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/hooks/update.sample similarity index 100% rename from pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/hooks/update.sample rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/hooks/update.sample diff --git a/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/index b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/index similarity index 100% rename from pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/index rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/index diff --git a/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/info/exclude b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/info/exclude similarity index 100% rename from pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/info/exclude rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/info/exclude diff --git a/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/info/refs b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/info/refs similarity index 100% rename from pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/info/refs rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/info/refs diff --git a/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/logs/HEAD b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/logs/HEAD similarity index 100% rename from pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/logs/HEAD rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/logs/HEAD diff --git a/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/logs/refs/heads/master b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/logs/refs/heads/master similarity index 100% rename from pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/logs/refs/heads/master rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/logs/refs/heads/master diff --git a/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/logs/refs/remotes/origin/HEAD b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/logs/refs/remotes/origin/HEAD similarity index 100% rename from pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/logs/refs/remotes/origin/HEAD rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/logs/refs/remotes/origin/HEAD diff --git a/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/logs/refs/remotes/origin/bind_modules b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/logs/refs/remotes/origin/bind_modules similarity index 100% rename from pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/logs/refs/remotes/origin/bind_modules rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/logs/refs/remotes/origin/bind_modules diff --git a/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/logs/refs/remotes/origin/master b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/logs/refs/remotes/origin/master similarity index 100% rename from pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/logs/refs/remotes/origin/master rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/logs/refs/remotes/origin/master diff --git a/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/logs/refs/remotes/origin/zero_timeout b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/logs/refs/remotes/origin/zero_timeout similarity index 100% rename from pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/logs/refs/remotes/origin/zero_timeout rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/logs/refs/remotes/origin/zero_timeout diff --git a/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/objects/info/packs b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/objects/info/packs similarity index 100% rename from pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/objects/info/packs rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/objects/info/packs diff --git a/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/objects/pack/pack-7e1b1ace85030071ca314cd565ae038bacc302a4.idx b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/objects/pack/pack-7e1b1ace85030071ca314cd565ae038bacc302a4.idx similarity index 100% rename from pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/objects/pack/pack-7e1b1ace85030071ca314cd565ae038bacc302a4.idx rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/objects/pack/pack-7e1b1ace85030071ca314cd565ae038bacc302a4.idx diff --git a/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/objects/pack/pack-7e1b1ace85030071ca314cd565ae038bacc302a4.pack b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/objects/pack/pack-7e1b1ace85030071ca314cd565ae038bacc302a4.pack similarity index 100% rename from pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/objects/pack/pack-7e1b1ace85030071ca314cd565ae038bacc302a4.pack rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/objects/pack/pack-7e1b1ace85030071ca314cd565ae038bacc302a4.pack diff --git a/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/packed-refs b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/packed-refs similarity index 100% rename from pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/packed-refs rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/packed-refs diff --git a/pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/refs/remotes/origin/HEAD b/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/refs/remotes/origin/HEAD similarity index 100% rename from pyperformance/_benchmarks/bm_dulwich_log/data/asyncio.git/refs/remotes/origin/HEAD rename to pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/refs/remotes/origin/HEAD diff --git a/pyperformance/_benchmarks/bm_dulwich_log/pyproject.toml b/pyperformance/data-files/benchmarks/bm_dulwich_log/pyproject.toml similarity index 100% rename from pyperformance/_benchmarks/bm_dulwich_log/pyproject.toml rename to pyperformance/data-files/benchmarks/bm_dulwich_log/pyproject.toml diff --git a/pyperformance/_benchmarks/bm_dulwich_log/requirements.txt b/pyperformance/data-files/benchmarks/bm_dulwich_log/requirements.txt similarity index 100% rename from pyperformance/_benchmarks/bm_dulwich_log/requirements.txt rename to pyperformance/data-files/benchmarks/bm_dulwich_log/requirements.txt diff --git a/pyperformance/_benchmarks/bm_dulwich_log/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_dulwich_log/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_dulwich_log/run_benchmark.py rename to pyperformance/data-files/benchmarks/bm_dulwich_log/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_fannkuch/pyproject.toml b/pyperformance/data-files/benchmarks/bm_fannkuch/pyproject.toml similarity index 100% rename from pyperformance/_benchmarks/bm_fannkuch/pyproject.toml rename to pyperformance/data-files/benchmarks/bm_fannkuch/pyproject.toml diff --git a/pyperformance/_benchmarks/bm_fannkuch/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_fannkuch/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_fannkuch/run_benchmark.py rename to pyperformance/data-files/benchmarks/bm_fannkuch/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_float/pyproject.toml b/pyperformance/data-files/benchmarks/bm_float/pyproject.toml similarity index 100% rename from pyperformance/_benchmarks/bm_float/pyproject.toml rename to pyperformance/data-files/benchmarks/bm_float/pyproject.toml diff --git a/pyperformance/_benchmarks/bm_float/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_float/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_float/run_benchmark.py rename to pyperformance/data-files/benchmarks/bm_float/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_genshi/pyproject.toml b/pyperformance/data-files/benchmarks/bm_genshi/pyproject.toml similarity index 100% rename from pyperformance/_benchmarks/bm_genshi/pyproject.toml rename to pyperformance/data-files/benchmarks/bm_genshi/pyproject.toml diff --git a/pyperformance/_benchmarks/bm_genshi/requirements.txt b/pyperformance/data-files/benchmarks/bm_genshi/requirements.txt similarity index 100% rename from pyperformance/_benchmarks/bm_genshi/requirements.txt rename to pyperformance/data-files/benchmarks/bm_genshi/requirements.txt diff --git a/pyperformance/_benchmarks/bm_genshi/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_genshi/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_genshi/run_benchmark.py rename to pyperformance/data-files/benchmarks/bm_genshi/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_go/pyproject.toml b/pyperformance/data-files/benchmarks/bm_go/pyproject.toml similarity index 100% rename from pyperformance/_benchmarks/bm_go/pyproject.toml rename to pyperformance/data-files/benchmarks/bm_go/pyproject.toml diff --git a/pyperformance/_benchmarks/bm_go/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_go/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_go/run_benchmark.py rename to pyperformance/data-files/benchmarks/bm_go/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_hexiom/pyproject.toml b/pyperformance/data-files/benchmarks/bm_hexiom/pyproject.toml similarity index 100% rename from pyperformance/_benchmarks/bm_hexiom/pyproject.toml rename to pyperformance/data-files/benchmarks/bm_hexiom/pyproject.toml diff --git a/pyperformance/_benchmarks/bm_hexiom/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_hexiom/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_hexiom/run_benchmark.py rename to pyperformance/data-files/benchmarks/bm_hexiom/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_hg_startup/pyproject.toml b/pyperformance/data-files/benchmarks/bm_hg_startup/pyproject.toml similarity index 100% rename from pyperformance/_benchmarks/bm_hg_startup/pyproject.toml rename to pyperformance/data-files/benchmarks/bm_hg_startup/pyproject.toml diff --git a/pyperformance/_benchmarks/bm_hg_startup/requirements.txt b/pyperformance/data-files/benchmarks/bm_hg_startup/requirements.txt similarity index 100% rename from pyperformance/_benchmarks/bm_hg_startup/requirements.txt rename to pyperformance/data-files/benchmarks/bm_hg_startup/requirements.txt diff --git a/pyperformance/_benchmarks/bm_hg_startup/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_hg_startup/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_hg_startup/run_benchmark.py rename to pyperformance/data-files/benchmarks/bm_hg_startup/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_html5lib/data/w3_tr_html5.html b/pyperformance/data-files/benchmarks/bm_html5lib/data/w3_tr_html5.html similarity index 100% rename from pyperformance/_benchmarks/bm_html5lib/data/w3_tr_html5.html rename to pyperformance/data-files/benchmarks/bm_html5lib/data/w3_tr_html5.html diff --git a/pyperformance/_benchmarks/bm_html5lib/pyproject.toml b/pyperformance/data-files/benchmarks/bm_html5lib/pyproject.toml similarity index 100% rename from pyperformance/_benchmarks/bm_html5lib/pyproject.toml rename to pyperformance/data-files/benchmarks/bm_html5lib/pyproject.toml diff --git a/pyperformance/_benchmarks/bm_html5lib/requirements.txt b/pyperformance/data-files/benchmarks/bm_html5lib/requirements.txt similarity index 100% rename from pyperformance/_benchmarks/bm_html5lib/requirements.txt rename to pyperformance/data-files/benchmarks/bm_html5lib/requirements.txt diff --git a/pyperformance/_benchmarks/bm_html5lib/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_html5lib/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_html5lib/run_benchmark.py rename to pyperformance/data-files/benchmarks/bm_html5lib/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_json_dumps/pyproject.toml b/pyperformance/data-files/benchmarks/bm_json_dumps/pyproject.toml similarity index 100% rename from pyperformance/_benchmarks/bm_json_dumps/pyproject.toml rename to pyperformance/data-files/benchmarks/bm_json_dumps/pyproject.toml diff --git a/pyperformance/_benchmarks/bm_json_dumps/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_json_dumps/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_json_dumps/run_benchmark.py rename to pyperformance/data-files/benchmarks/bm_json_dumps/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_json_loads/pyproject.toml b/pyperformance/data-files/benchmarks/bm_json_loads/pyproject.toml similarity index 100% rename from pyperformance/_benchmarks/bm_json_loads/pyproject.toml rename to pyperformance/data-files/benchmarks/bm_json_loads/pyproject.toml diff --git a/pyperformance/_benchmarks/bm_json_loads/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_json_loads/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_json_loads/run_benchmark.py rename to pyperformance/data-files/benchmarks/bm_json_loads/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_logging/pyproject.toml b/pyperformance/data-files/benchmarks/bm_logging/pyproject.toml similarity index 100% rename from pyperformance/_benchmarks/bm_logging/pyproject.toml rename to pyperformance/data-files/benchmarks/bm_logging/pyproject.toml diff --git a/pyperformance/_benchmarks/bm_logging/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_logging/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_logging/run_benchmark.py rename to pyperformance/data-files/benchmarks/bm_logging/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_mako/pyproject.toml b/pyperformance/data-files/benchmarks/bm_mako/pyproject.toml similarity index 100% rename from pyperformance/_benchmarks/bm_mako/pyproject.toml rename to pyperformance/data-files/benchmarks/bm_mako/pyproject.toml diff --git a/pyperformance/_benchmarks/bm_mako/requirements.txt b/pyperformance/data-files/benchmarks/bm_mako/requirements.txt similarity index 100% rename from pyperformance/_benchmarks/bm_mako/requirements.txt rename to pyperformance/data-files/benchmarks/bm_mako/requirements.txt diff --git a/pyperformance/_benchmarks/bm_mako/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_mako/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_mako/run_benchmark.py rename to pyperformance/data-files/benchmarks/bm_mako/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_mdp/pyproject.toml b/pyperformance/data-files/benchmarks/bm_mdp/pyproject.toml similarity index 100% rename from pyperformance/_benchmarks/bm_mdp/pyproject.toml rename to pyperformance/data-files/benchmarks/bm_mdp/pyproject.toml diff --git a/pyperformance/_benchmarks/bm_mdp/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_mdp/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_mdp/run_benchmark.py rename to pyperformance/data-files/benchmarks/bm_mdp/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_meteor_contest/pyproject.toml b/pyperformance/data-files/benchmarks/bm_meteor_contest/pyproject.toml similarity index 100% rename from pyperformance/_benchmarks/bm_meteor_contest/pyproject.toml rename to pyperformance/data-files/benchmarks/bm_meteor_contest/pyproject.toml diff --git a/pyperformance/_benchmarks/bm_meteor_contest/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_meteor_contest/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_meteor_contest/run_benchmark.py rename to pyperformance/data-files/benchmarks/bm_meteor_contest/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_nbody/pyproject.toml b/pyperformance/data-files/benchmarks/bm_nbody/pyproject.toml similarity index 100% rename from pyperformance/_benchmarks/bm_nbody/pyproject.toml rename to pyperformance/data-files/benchmarks/bm_nbody/pyproject.toml diff --git a/pyperformance/_benchmarks/bm_nbody/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_nbody/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_nbody/run_benchmark.py rename to pyperformance/data-files/benchmarks/bm_nbody/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_nqueens/pyproject.toml b/pyperformance/data-files/benchmarks/bm_nqueens/pyproject.toml similarity index 100% rename from pyperformance/_benchmarks/bm_nqueens/pyproject.toml rename to pyperformance/data-files/benchmarks/bm_nqueens/pyproject.toml diff --git a/pyperformance/_benchmarks/bm_nqueens/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_nqueens/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_nqueens/run_benchmark.py rename to pyperformance/data-files/benchmarks/bm_nqueens/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_pathlib/pyproject.toml b/pyperformance/data-files/benchmarks/bm_pathlib/pyproject.toml similarity index 100% rename from pyperformance/_benchmarks/bm_pathlib/pyproject.toml rename to pyperformance/data-files/benchmarks/bm_pathlib/pyproject.toml diff --git a/pyperformance/_benchmarks/bm_pathlib/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_pathlib/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_pathlib/run_benchmark.py rename to pyperformance/data-files/benchmarks/bm_pathlib/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_pickle/bm_pickle_dict.toml b/pyperformance/data-files/benchmarks/bm_pickle/bm_pickle_dict.toml similarity index 100% rename from pyperformance/_benchmarks/bm_pickle/bm_pickle_dict.toml rename to pyperformance/data-files/benchmarks/bm_pickle/bm_pickle_dict.toml diff --git a/pyperformance/_benchmarks/bm_pickle/bm_pickle_list.toml b/pyperformance/data-files/benchmarks/bm_pickle/bm_pickle_list.toml similarity index 100% rename from pyperformance/_benchmarks/bm_pickle/bm_pickle_list.toml rename to pyperformance/data-files/benchmarks/bm_pickle/bm_pickle_list.toml diff --git a/pyperformance/_benchmarks/bm_pickle/bm_pickle_pure_python.toml b/pyperformance/data-files/benchmarks/bm_pickle/bm_pickle_pure_python.toml similarity index 100% rename from pyperformance/_benchmarks/bm_pickle/bm_pickle_pure_python.toml rename to pyperformance/data-files/benchmarks/bm_pickle/bm_pickle_pure_python.toml diff --git a/pyperformance/_benchmarks/bm_pickle/bm_unpickle.toml b/pyperformance/data-files/benchmarks/bm_pickle/bm_unpickle.toml similarity index 100% rename from pyperformance/_benchmarks/bm_pickle/bm_unpickle.toml rename to pyperformance/data-files/benchmarks/bm_pickle/bm_unpickle.toml diff --git a/pyperformance/_benchmarks/bm_pickle/bm_unpickle_list.toml b/pyperformance/data-files/benchmarks/bm_pickle/bm_unpickle_list.toml similarity index 100% rename from pyperformance/_benchmarks/bm_pickle/bm_unpickle_list.toml rename to pyperformance/data-files/benchmarks/bm_pickle/bm_unpickle_list.toml diff --git a/pyperformance/_benchmarks/bm_pickle/bm_unpickle_pure_python.toml b/pyperformance/data-files/benchmarks/bm_pickle/bm_unpickle_pure_python.toml similarity index 100% rename from pyperformance/_benchmarks/bm_pickle/bm_unpickle_pure_python.toml rename to pyperformance/data-files/benchmarks/bm_pickle/bm_unpickle_pure_python.toml diff --git a/pyperformance/_benchmarks/bm_pickle/pyproject.toml b/pyperformance/data-files/benchmarks/bm_pickle/pyproject.toml similarity index 100% rename from pyperformance/_benchmarks/bm_pickle/pyproject.toml rename to pyperformance/data-files/benchmarks/bm_pickle/pyproject.toml diff --git a/pyperformance/_benchmarks/bm_pickle/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_pickle/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_pickle/run_benchmark.py rename to pyperformance/data-files/benchmarks/bm_pickle/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_pidigits/pyproject.toml b/pyperformance/data-files/benchmarks/bm_pidigits/pyproject.toml similarity index 100% rename from pyperformance/_benchmarks/bm_pidigits/pyproject.toml rename to pyperformance/data-files/benchmarks/bm_pidigits/pyproject.toml diff --git a/pyperformance/_benchmarks/bm_pidigits/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_pidigits/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_pidigits/run_benchmark.py rename to pyperformance/data-files/benchmarks/bm_pidigits/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_pyflate/data/interpreter.tar.bz2 b/pyperformance/data-files/benchmarks/bm_pyflate/data/interpreter.tar.bz2 similarity index 100% rename from pyperformance/_benchmarks/bm_pyflate/data/interpreter.tar.bz2 rename to pyperformance/data-files/benchmarks/bm_pyflate/data/interpreter.tar.bz2 diff --git a/pyperformance/_benchmarks/bm_pyflate/pyproject.toml b/pyperformance/data-files/benchmarks/bm_pyflate/pyproject.toml similarity index 100% rename from pyperformance/_benchmarks/bm_pyflate/pyproject.toml rename to pyperformance/data-files/benchmarks/bm_pyflate/pyproject.toml diff --git a/pyperformance/_benchmarks/bm_pyflate/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_pyflate/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_pyflate/run_benchmark.py rename to pyperformance/data-files/benchmarks/bm_pyflate/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_python_startup/bm_python_startup_no_site.toml b/pyperformance/data-files/benchmarks/bm_python_startup/bm_python_startup_no_site.toml similarity index 100% rename from pyperformance/_benchmarks/bm_python_startup/bm_python_startup_no_site.toml rename to pyperformance/data-files/benchmarks/bm_python_startup/bm_python_startup_no_site.toml diff --git a/pyperformance/_benchmarks/bm_python_startup/pyproject.toml b/pyperformance/data-files/benchmarks/bm_python_startup/pyproject.toml similarity index 100% rename from pyperformance/_benchmarks/bm_python_startup/pyproject.toml rename to pyperformance/data-files/benchmarks/bm_python_startup/pyproject.toml diff --git a/pyperformance/_benchmarks/bm_python_startup/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_python_startup/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_python_startup/run_benchmark.py rename to pyperformance/data-files/benchmarks/bm_python_startup/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_raytrace/pyproject.toml b/pyperformance/data-files/benchmarks/bm_raytrace/pyproject.toml similarity index 100% rename from pyperformance/_benchmarks/bm_raytrace/pyproject.toml rename to pyperformance/data-files/benchmarks/bm_raytrace/pyproject.toml diff --git a/pyperformance/_benchmarks/bm_raytrace/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_raytrace/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_raytrace/run_benchmark.py rename to pyperformance/data-files/benchmarks/bm_raytrace/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_regex_compile/bm_regex_effbot.py b/pyperformance/data-files/benchmarks/bm_regex_compile/bm_regex_effbot.py similarity index 100% rename from pyperformance/_benchmarks/bm_regex_compile/bm_regex_effbot.py rename to pyperformance/data-files/benchmarks/bm_regex_compile/bm_regex_effbot.py diff --git a/pyperformance/_benchmarks/bm_regex_compile/bm_regex_v8.py b/pyperformance/data-files/benchmarks/bm_regex_compile/bm_regex_v8.py similarity index 100% rename from pyperformance/_benchmarks/bm_regex_compile/bm_regex_v8.py rename to pyperformance/data-files/benchmarks/bm_regex_compile/bm_regex_v8.py diff --git a/pyperformance/_benchmarks/bm_regex_compile/pyproject.toml b/pyperformance/data-files/benchmarks/bm_regex_compile/pyproject.toml similarity index 100% rename from pyperformance/_benchmarks/bm_regex_compile/pyproject.toml rename to pyperformance/data-files/benchmarks/bm_regex_compile/pyproject.toml diff --git a/pyperformance/_benchmarks/bm_regex_compile/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_regex_compile/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_regex_compile/run_benchmark.py rename to pyperformance/data-files/benchmarks/bm_regex_compile/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_regex_dna/pyproject.toml b/pyperformance/data-files/benchmarks/bm_regex_dna/pyproject.toml similarity index 100% rename from pyperformance/_benchmarks/bm_regex_dna/pyproject.toml rename to pyperformance/data-files/benchmarks/bm_regex_dna/pyproject.toml diff --git a/pyperformance/_benchmarks/bm_regex_dna/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_regex_dna/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_regex_dna/run_benchmark.py rename to pyperformance/data-files/benchmarks/bm_regex_dna/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_regex_effbot/pyproject.toml b/pyperformance/data-files/benchmarks/bm_regex_effbot/pyproject.toml similarity index 100% rename from pyperformance/_benchmarks/bm_regex_effbot/pyproject.toml rename to pyperformance/data-files/benchmarks/bm_regex_effbot/pyproject.toml diff --git a/pyperformance/_benchmarks/bm_regex_effbot/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_regex_effbot/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_regex_effbot/run_benchmark.py rename to pyperformance/data-files/benchmarks/bm_regex_effbot/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_regex_v8/pyproject.toml b/pyperformance/data-files/benchmarks/bm_regex_v8/pyproject.toml similarity index 100% rename from pyperformance/_benchmarks/bm_regex_v8/pyproject.toml rename to pyperformance/data-files/benchmarks/bm_regex_v8/pyproject.toml diff --git a/pyperformance/_benchmarks/bm_regex_v8/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_regex_v8/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_regex_v8/run_benchmark.py rename to pyperformance/data-files/benchmarks/bm_regex_v8/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_richards/pyproject.toml b/pyperformance/data-files/benchmarks/bm_richards/pyproject.toml similarity index 100% rename from pyperformance/_benchmarks/bm_richards/pyproject.toml rename to pyperformance/data-files/benchmarks/bm_richards/pyproject.toml diff --git a/pyperformance/_benchmarks/bm_richards/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_richards/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_richards/run_benchmark.py rename to pyperformance/data-files/benchmarks/bm_richards/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_scimark/pyproject.toml b/pyperformance/data-files/benchmarks/bm_scimark/pyproject.toml similarity index 100% rename from pyperformance/_benchmarks/bm_scimark/pyproject.toml rename to pyperformance/data-files/benchmarks/bm_scimark/pyproject.toml diff --git a/pyperformance/_benchmarks/bm_scimark/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_scimark/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_scimark/run_benchmark.py rename to pyperformance/data-files/benchmarks/bm_scimark/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_spectral_norm/pyproject.toml b/pyperformance/data-files/benchmarks/bm_spectral_norm/pyproject.toml similarity index 100% rename from pyperformance/_benchmarks/bm_spectral_norm/pyproject.toml rename to pyperformance/data-files/benchmarks/bm_spectral_norm/pyproject.toml diff --git a/pyperformance/_benchmarks/bm_spectral_norm/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_spectral_norm/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_spectral_norm/run_benchmark.py rename to pyperformance/data-files/benchmarks/bm_spectral_norm/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_sqlalchemy_declarative/pyproject.toml b/pyperformance/data-files/benchmarks/bm_sqlalchemy_declarative/pyproject.toml similarity index 100% rename from pyperformance/_benchmarks/bm_sqlalchemy_declarative/pyproject.toml rename to pyperformance/data-files/benchmarks/bm_sqlalchemy_declarative/pyproject.toml diff --git a/pyperformance/_benchmarks/bm_sqlalchemy_declarative/requirements.txt b/pyperformance/data-files/benchmarks/bm_sqlalchemy_declarative/requirements.txt similarity index 100% rename from pyperformance/_benchmarks/bm_sqlalchemy_declarative/requirements.txt rename to pyperformance/data-files/benchmarks/bm_sqlalchemy_declarative/requirements.txt diff --git a/pyperformance/_benchmarks/bm_sqlalchemy_declarative/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_sqlalchemy_declarative/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_sqlalchemy_declarative/run_benchmark.py rename to pyperformance/data-files/benchmarks/bm_sqlalchemy_declarative/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_sqlalchemy_imperative/pyproject.toml b/pyperformance/data-files/benchmarks/bm_sqlalchemy_imperative/pyproject.toml similarity index 100% rename from pyperformance/_benchmarks/bm_sqlalchemy_imperative/pyproject.toml rename to pyperformance/data-files/benchmarks/bm_sqlalchemy_imperative/pyproject.toml diff --git a/pyperformance/_benchmarks/bm_sqlalchemy_imperative/requirements.txt b/pyperformance/data-files/benchmarks/bm_sqlalchemy_imperative/requirements.txt similarity index 100% rename from pyperformance/_benchmarks/bm_sqlalchemy_imperative/requirements.txt rename to pyperformance/data-files/benchmarks/bm_sqlalchemy_imperative/requirements.txt diff --git a/pyperformance/_benchmarks/bm_sqlalchemy_imperative/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_sqlalchemy_imperative/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_sqlalchemy_imperative/run_benchmark.py rename to pyperformance/data-files/benchmarks/bm_sqlalchemy_imperative/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_sqlite_synth/pyproject.toml b/pyperformance/data-files/benchmarks/bm_sqlite_synth/pyproject.toml similarity index 100% rename from pyperformance/_benchmarks/bm_sqlite_synth/pyproject.toml rename to pyperformance/data-files/benchmarks/bm_sqlite_synth/pyproject.toml diff --git a/pyperformance/_benchmarks/bm_sqlite_synth/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_sqlite_synth/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_sqlite_synth/run_benchmark.py rename to pyperformance/data-files/benchmarks/bm_sqlite_synth/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_sympy/pyproject.toml b/pyperformance/data-files/benchmarks/bm_sympy/pyproject.toml similarity index 100% rename from pyperformance/_benchmarks/bm_sympy/pyproject.toml rename to pyperformance/data-files/benchmarks/bm_sympy/pyproject.toml diff --git a/pyperformance/_benchmarks/bm_sympy/requirements.txt b/pyperformance/data-files/benchmarks/bm_sympy/requirements.txt similarity index 100% rename from pyperformance/_benchmarks/bm_sympy/requirements.txt rename to pyperformance/data-files/benchmarks/bm_sympy/requirements.txt diff --git a/pyperformance/_benchmarks/bm_sympy/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_sympy/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_sympy/run_benchmark.py rename to pyperformance/data-files/benchmarks/bm_sympy/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_telco/data/telco-bench.b b/pyperformance/data-files/benchmarks/bm_telco/data/telco-bench.b similarity index 100% rename from pyperformance/_benchmarks/bm_telco/data/telco-bench.b rename to pyperformance/data-files/benchmarks/bm_telco/data/telco-bench.b diff --git a/pyperformance/_benchmarks/bm_telco/pyproject.toml b/pyperformance/data-files/benchmarks/bm_telco/pyproject.toml similarity index 100% rename from pyperformance/_benchmarks/bm_telco/pyproject.toml rename to pyperformance/data-files/benchmarks/bm_telco/pyproject.toml diff --git a/pyperformance/_benchmarks/bm_telco/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_telco/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_telco/run_benchmark.py rename to pyperformance/data-files/benchmarks/bm_telco/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_tornado_http/pyproject.toml b/pyperformance/data-files/benchmarks/bm_tornado_http/pyproject.toml similarity index 100% rename from pyperformance/_benchmarks/bm_tornado_http/pyproject.toml rename to pyperformance/data-files/benchmarks/bm_tornado_http/pyproject.toml diff --git a/pyperformance/_benchmarks/bm_tornado_http/requirements.txt b/pyperformance/data-files/benchmarks/bm_tornado_http/requirements.txt similarity index 100% rename from pyperformance/_benchmarks/bm_tornado_http/requirements.txt rename to pyperformance/data-files/benchmarks/bm_tornado_http/requirements.txt diff --git a/pyperformance/_benchmarks/bm_tornado_http/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_tornado_http/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_tornado_http/run_benchmark.py rename to pyperformance/data-files/benchmarks/bm_tornado_http/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_unpack_sequence/pyproject.toml b/pyperformance/data-files/benchmarks/bm_unpack_sequence/pyproject.toml similarity index 100% rename from pyperformance/_benchmarks/bm_unpack_sequence/pyproject.toml rename to pyperformance/data-files/benchmarks/bm_unpack_sequence/pyproject.toml diff --git a/pyperformance/_benchmarks/bm_unpack_sequence/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_unpack_sequence/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_unpack_sequence/run_benchmark.py rename to pyperformance/data-files/benchmarks/bm_unpack_sequence/run_benchmark.py diff --git a/pyperformance/_benchmarks/bm_xml_etree/pyproject.toml b/pyperformance/data-files/benchmarks/bm_xml_etree/pyproject.toml similarity index 100% rename from pyperformance/_benchmarks/bm_xml_etree/pyproject.toml rename to pyperformance/data-files/benchmarks/bm_xml_etree/pyproject.toml diff --git a/pyperformance/_benchmarks/bm_xml_etree/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_xml_etree/run_benchmark.py similarity index 100% rename from pyperformance/_benchmarks/bm_xml_etree/run_benchmark.py rename to pyperformance/data-files/benchmarks/bm_xml_etree/run_benchmark.py From afafda695236c7f40cb9f587433244bbec79fb5e Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 2 Nov 2021 11:19:16 -0600 Subject: [PATCH 048/126] Move _pythoninfo out of the _utils dir. --- pyperformance/{_utils => }/_pythoninfo.py | 41 ++++++++++++++++------- pyperformance/_utils/__init__.py | 5 --- pyperformance/run.py | 4 +-- pyperformance/venv.py | 4 +-- 4 files changed, 33 insertions(+), 21 deletions(-) rename pyperformance/{_utils => }/_pythoninfo.py (93%) diff --git a/pyperformance/_utils/_pythoninfo.py b/pyperformance/_pythoninfo.py similarity index 93% rename from pyperformance/_utils/_pythoninfo.py rename to pyperformance/_pythoninfo.py index e503da9e..54b04a54 100644 --- a/pyperformance/_utils/_pythoninfo.py +++ b/pyperformance/_pythoninfo.py @@ -1,3 +1,14 @@ +# A utility library for getting information about a Python executable. +# +# This may be used as a script. + +__all__ = [ + 'get_python_id', + 'get_python_info', + 'inspect_python_install', +] + + import hashlib import json import os @@ -5,18 +16,6 @@ import sys -try: - PLATLIBDIR = sys.platlibdir -except AttributeError: - PLATLIBDIR = 'lib' -STDLIB_DIR = os.path.dirname(os.__file__) -try: - from importlib.util import MAGIC_NUMBER -except ImportError: - import _imp - MAGIC_NUMBER = _imp.get_magic() - - def get_python_id(python=sys.executable, *, prefix=None): """Return a unique (str) identifier for the given Python executable.""" if not python or isinstance(python, str): @@ -78,6 +77,21 @@ def inspect_python_install(python=sys.executable): return _inspect_python_install(**info) +####################################### +# internal implementation + +try: + PLATLIBDIR = sys.platlibdir +except AttributeError: + PLATLIBDIR = 'lib' +STDLIB_DIR = os.path.dirname(os.__file__) +try: + from importlib.util import MAGIC_NUMBER +except ImportError: + import _imp + MAGIC_NUMBER = _imp.get_magic() + + def _inspect_python_install(executable, prefix, base_prefix, platlibdir, stdlib_dir, version_info, **_ignored): is_venv = prefix != base_prefix @@ -136,6 +150,9 @@ def _get_raw_info(): } +####################################### +# use as a script + if __name__ == '__main__': info = _get_raw_info() json.dump(info, sys.stdout, indent=4) diff --git a/pyperformance/_utils/__init__.py b/pyperformance/_utils/__init__.py index cf03b028..7a05a17d 100644 --- a/pyperformance/_utils/__init__.py +++ b/pyperformance/_utils/__init__.py @@ -24,8 +24,3 @@ parse_pyproject_toml, load_pyproject_toml, ) -from ._pythoninfo import ( - get_python_id, - get_python_info, - inspect_python_install, -) diff --git a/pyperformance/run.py b/pyperformance/run.py index 112db710..c640300e 100644 --- a/pyperformance/run.py +++ b/pyperformance/run.py @@ -10,7 +10,7 @@ import pyperf import pyperformance -from . import _utils +from . import _utils, _pythoninfo from . import venv as _venv @@ -48,7 +48,7 @@ def name(self): def get_run_id(python, bench=None): - py_id = _utils.get_python_id(python, prefix=True) + py_id = _pythoninfo.get_python_id(python, prefix=True) compat_id = get_compatibility_id(bench) ts = time.time() return RunID(py_id, compat_id, bench, ts) diff --git a/pyperformance/venv.py b/pyperformance/venv.py index b04d2e48..92a744f8 100644 --- a/pyperformance/venv.py +++ b/pyperformance/venv.py @@ -9,7 +9,7 @@ from shlex import quote as shell_quote import pyperformance -from . import _utils +from . import _utils, _pythoninfo GET_PIP_URL = 'https://bootstrap.pypa.io/get-pip.py' @@ -176,7 +176,7 @@ def __init__(self, options, bench=None, name=None, *, ): python = options.python if usebase: - python, _, _ = _utils.inspect_python_install(python) + python, _, _ = _pythoninfo.inspect_python_install(python) self.options = options self.python = python From cf4679ad0695e71da323fbd8162937af1d67c3d3 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 2 Nov 2021 11:30:42 -0600 Subject: [PATCH 049/126] Move _pyproject_toml out of the _utils dir. --- pyperformance/{_utils => }/_pyproject_toml.py | 100 ++++++++++-------- pyperformance/_utils/__init__.py | 7 -- pyperformance/benchmark/_metadata.py | 17 +-- 3 files changed, 66 insertions(+), 58 deletions(-) rename pyperformance/{_utils => }/_pyproject_toml.py (97%) diff --git a/pyperformance/_utils/_pyproject_toml.py b/pyperformance/_pyproject_toml.py similarity index 97% rename from pyperformance/_utils/_pyproject_toml.py rename to pyperformance/_pyproject_toml.py index 8bfe6ba0..bfde1530 100644 --- a/pyperformance/_utils/_pyproject_toml.py +++ b/pyperformance/_pyproject_toml.py @@ -1,6 +1,15 @@ # This module should be replaced with the equivalent functionality # in the PyPI "packaging" package (once it's added there). +__all__ = [ + 'parse_person', + 'parse_classifier', + 'parse_entry_point', + 'parse_pyproject_toml', + 'load_pyproject_toml', +] + + import os.path import re import urllib.parse @@ -11,7 +20,7 @@ import packaging.version import toml -from ._misc import check_name +from ._utils import check_name NAME_RE = re.compile('^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$', re.IGNORECASE) @@ -39,6 +48,52 @@ def parse_entry_point(text): raise ValueError(f'invalid entry point {text!r}') +def parse_pyproject_toml(text, rootdir, name=None, *, + tools=None, + requirefiles=True, + ): + data = toml.loads(text) + unused = list(data) + + for section, normalize in SECTIONS.items(): + try: + secdata = data[section] + except KeyError: + data[section] = None + else: + data[section] = normalize(secdata, + name=name, + tools=tools, + rootdir=rootdir, + requirefiles=requirefiles, + ) + unused.remove(section) + + if unused: + raise ValueError(f'unsupported sections ({", ".join(sorted(unused))})') + + return data + + +def load_pyproject_toml(filename, *, name=None, tools=None, requirefiles=True): + if os.path.isdir(filename): + rootdir = filename + filename = os.path.join(rootdir, 'pyproject.toml') + else: + rootdir = os.path.dirname(filename) + + with open(filename) as infile: + text = infile.read() + data = parse_pyproject_toml(text, rootdir, name, + tools=tools, + requirefiles=requirefiles, + ) + return data, filename + + +####################################### +# internal implementation + def _check_relfile(relname, rootdir, kind): if os.path.isabs(relname): raise ValuError(f'{relname!r} is absolute, expected relative') @@ -71,49 +126,6 @@ def _check_file_or_text(table, rootdir, requirefiles, extra=None): # XXX Validate it? -def load_pyproject_toml(filename, *, name=None, tools=None, requirefiles=True): - if os.path.isdir(filename): - rootdir = filename - filename = os.path.join(rootdir, 'pyproject.toml') - else: - rootdir = os.path.dirname(filename) - - with open(filename) as infile: - text = infile.read() - data = parse_pyproject_toml(text, rootdir, name, - tools=tools, - requirefiles=requirefiles, - ) - return data, filename - - -def parse_pyproject_toml(text, rootdir, name=None, *, - tools=None, - requirefiles=True, - ): - data = toml.loads(text) - unused = list(data) - - for section, normalize in SECTIONS.items(): - try: - secdata = data[section] - except KeyError: - data[section] = None - else: - data[section] = normalize(secdata, - name=name, - tools=tools, - rootdir=rootdir, - requirefiles=requirefiles, - ) - unused.remove(section) - - if unused: - raise ValueError(f'unsupported sections ({", ".join(sorted(unused))})') - - return data - - def _normalize_project(data, rootdir, name, requirefiles, **_ignored): # See PEP 621. unused = set(data) diff --git a/pyperformance/_utils/__init__.py b/pyperformance/_utils/__init__.py index 7a05a17d..a2358aa6 100644 --- a/pyperformance/_utils/__init__.py +++ b/pyperformance/_utils/__init__.py @@ -17,10 +17,3 @@ MS_WINDOWS, run_command, ) -from ._pyproject_toml import ( - parse_person, - parse_classifier, - parse_entry_point, - parse_pyproject_toml, - load_pyproject_toml, -) diff --git a/pyperformance/benchmark/_metadata.py b/pyperformance/benchmark/_metadata.py index 5afde52d..8c5d2bd0 100644 --- a/pyperformance/benchmark/_metadata.py +++ b/pyperformance/benchmark/_metadata.py @@ -1,6 +1,6 @@ import os.path -from .. import _utils +from .. import _utils, _pyproject_toml from ._spec import BenchmarkSpec @@ -45,16 +45,19 @@ def load_metadata(metafile, defaults=None): if isinstance(metafile, str): name, rootdir = _name_from_filename(metafile) - data, filename = _utils.load_pyproject_toml(metafile, - name=name or None, - requirefiles=False, - ) + data, filename = _pyproject_toml.load_pyproject_toml( + metafile, + name=name or None, + requirefiles=False, + ) else: text = metafile.read() filename = metafile.name name, rootdir = _name_from_filename(filename) - data = _utils.parse_pyproject_toml(text, rootdir, name, - requirefiles=False) + data = _pyproject_toml.parse_pyproject_toml( + text, rootdir, name, + requirefiles=False, + ) project = data.get('project') tool = data.get('tool', {}).get('pyperformance', {}) From d3bdaad30830900f52294a66883f516bac687f22 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 2 Nov 2021 12:43:46 -0600 Subject: [PATCH 050/126] Make _utils a single module. --- pyperformance/_utils.py | 169 ++++++++++++++++++++++++++++++ pyperformance/_utils/__init__.py | 19 ---- pyperformance/_utils/_fs.py | 32 ------ pyperformance/_utils/_misc.py | 62 ----------- pyperformance/_utils/_platform.py | 45 -------- 5 files changed, 169 insertions(+), 158 deletions(-) create mode 100644 pyperformance/_utils.py delete mode 100644 pyperformance/_utils/__init__.py delete mode 100644 pyperformance/_utils/_fs.py delete mode 100644 pyperformance/_utils/_misc.py delete mode 100644 pyperformance/_utils/_platform.py diff --git a/pyperformance/_utils.py b/pyperformance/_utils.py new file mode 100644 index 00000000..e71639dc --- /dev/null +++ b/pyperformance/_utils.py @@ -0,0 +1,169 @@ + +__all__ = [ + # filesystem + 'temporary_file', + 'check_file', + 'check_dir', + # platform + 'MS_WINDOWS', + 'run_command', + # misc + 'check_name', + 'parse_name_pattern', + 'parse_tag_pattern', + 'parse_selections', + 'iter_clean_lines', +] + + +####################################### +# filesystem utils + +import contextlib +import errno +import os +import os.path +import tempfile + + +@contextlib.contextmanager +def temporary_file(): + tmp_filename = tempfile.mktemp() + try: + yield tmp_filename + finally: + try: + os.unlink(tmp_filename) + except OSError as exc: + if exc.errno != errno.ENOENT: + raise + + +def check_file(filename): + if not os.path.isabs(filename): + raise ValueError(f'expected absolute path, got {filename!r}') + if not os.path.isfile(filename): + raise ValueError(f'file missing ({filename})') + + +def check_dir(dirname): + if not os.path.isabs(dirname): + raise ValueError(f'expected absolute path, got {dirname!r}') + if not os.path.isdir(dirname): + raise ValueError(f'directory missing ({dirname})') + + +####################################### +# platform utils + +import logging +import subprocess +import sys + + +MS_WINDOWS = (sys.platform == 'win32') + + +def run_command(command, env=None, *, hide_stderr=True): + if hide_stderr: + kw = {'stderr': subprocess.PIPE} + else: + kw = {} + + logging.info("Running `%s`", + " ".join(list(map(str, command)))) + + # Explicitly flush standard streams, required if streams are buffered + # (not TTY) to write lines in the expected order + sys.stdout.flush() + sys.stderr.flush() + + proc = subprocess.Popen(command, + universal_newlines=True, + env=env, + **kw) + try: + stderr = proc.communicate()[1] + except: # noqa + if proc.stderr: + proc.stderr.close() + try: + proc.kill() + except OSError: + # process already exited + pass + proc.wait() + raise + + if proc.returncode != 0: + if hide_stderr: + sys.stderr.flush() + sys.stderr.write(stderr) + sys.stderr.flush() + raise RuntimeError("Benchmark died") + + +####################################### +# misc utils + +def check_name(name, *, loose=False): + if not name or not isinstance(name, str): + raise ValueError(f'bad name {name!r}') + if not loose: + if name.startswith('-'): + raise ValueError(name) + if not name.replace('-', '_').isidentifier(): + raise ValueError(name) + + +def parse_name_pattern(text, *, fail=True): + name = text + # XXX Support globs and/or regexes? (return a callable) + try: + check_name('_' + name) + except Exception: + if fail: + raise # re-raise + return None + return name + + +def parse_tag_pattern(text): + if not text.startswith('<'): + return None + if not text.endswith('>'): + return None + tag = text[1:-1] + # XXX Support globs and/or regexes? (return a callable) + check_name(tag) + return tag + + +def parse_selections(selections, parse_entry=None): + if isinstance(selections, str): + selections = selections.split(',') + if parse_entry is None: + parse_entry = (lambda o, e: (o, e, None, e)) + + for entry in selections: + entry = entry.strip() + if not entry: + continue + + op = '+' + if entry.startswith('-'): + op = '-' + entry = entry[1:] + + yield parse_entry(op, entry) + + +def iter_clean_lines(filename): + with open(filename) as reqsfile: + for line in reqsfile: + # strip comment + line = line.partition('#')[0] + line = line.rstrip() + if not line: + continue + yield line diff --git a/pyperformance/_utils/__init__.py b/pyperformance/_utils/__init__.py deleted file mode 100644 index a2358aa6..00000000 --- a/pyperformance/_utils/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ - -######### -# aliases -from ._fs import ( - temporary_file, - check_file, - check_dir, -) -from ._misc import ( - check_name, - iter_clean_lines, - parse_name_pattern, - parse_tag_pattern, - parse_selections, -) -from ._platform import ( - MS_WINDOWS, - run_command, -) diff --git a/pyperformance/_utils/_fs.py b/pyperformance/_utils/_fs.py deleted file mode 100644 index 11bf19bf..00000000 --- a/pyperformance/_utils/_fs.py +++ /dev/null @@ -1,32 +0,0 @@ -import contextlib -import errno -import os -import os.path -import tempfile - - -@contextlib.contextmanager -def temporary_file(): - tmp_filename = tempfile.mktemp() - try: - yield tmp_filename - finally: - try: - os.unlink(tmp_filename) - except OSError as exc: - if exc.errno != errno.ENOENT: - raise - - -def check_file(filename): - if not os.path.isabs(filename): - raise ValueError(f'expected absolute path, got {filename!r}') - if not os.path.isfile(filename): - raise ValueError(f'file missing ({filename})') - - -def check_dir(dirname): - if not os.path.isabs(dirname): - raise ValueError(f'expected absolute path, got {dirname!r}') - if not os.path.isdir(dirname): - raise ValueError(f'directory missing ({dirname})') diff --git a/pyperformance/_utils/_misc.py b/pyperformance/_utils/_misc.py deleted file mode 100644 index e73e00a8..00000000 --- a/pyperformance/_utils/_misc.py +++ /dev/null @@ -1,62 +0,0 @@ - -def check_name(name, *, loose=False): - if not name or not isinstance(name, str): - raise ValueError(f'bad name {name!r}') - if not loose: - if name.startswith('-'): - raise ValueError(name) - if not name.replace('-', '_').isidentifier(): - raise ValueError(name) - - -def parse_name_pattern(text, *, fail=True): - name = text - # XXX Support globs and/or regexes? (return a callable) - try: - check_name('_' + name) - except Exception: - if fail: - raise # re-raise - return None - return name - - -def parse_tag_pattern(text): - if not text.startswith('<'): - return None - if not text.endswith('>'): - return None - tag = text[1:-1] - # XXX Support globs and/or regexes? (return a callable) - check_name(tag) - return tag - - -def parse_selections(selections, parse_entry=None): - if isinstance(selections, str): - selections = selections.split(',') - if parse_entry is None: - parse_entry = (lambda o, e: (o, e, None, e)) - - for entry in selections: - entry = entry.strip() - if not entry: - continue - - op = '+' - if entry.startswith('-'): - op = '-' - entry = entry[1:] - - yield parse_entry(op, entry) - - -def iter_clean_lines(filename): - with open(filename) as reqsfile: - for line in reqsfile: - # strip comment - line = line.partition('#')[0] - line = line.rstrip() - if not line: - continue - yield line diff --git a/pyperformance/_utils/_platform.py b/pyperformance/_utils/_platform.py deleted file mode 100644 index 25eb0242..00000000 --- a/pyperformance/_utils/_platform.py +++ /dev/null @@ -1,45 +0,0 @@ -import logging -import subprocess -import sys - - -MS_WINDOWS = (sys.platform == 'win32') - - -def run_command(command, env=None, *, hide_stderr=True): - if hide_stderr: - kw = {'stderr': subprocess.PIPE} - else: - kw = {} - - logging.info("Running `%s`", - " ".join(list(map(str, command)))) - - # Explicitly flush standard streams, required if streams are buffered - # (not TTY) to write lines in the expected order - sys.stdout.flush() - sys.stderr.flush() - - proc = subprocess.Popen(command, - universal_newlines=True, - env=env, - **kw) - try: - stderr = proc.communicate()[1] - except: # noqa - if proc.stderr: - proc.stderr.close() - try: - proc.kill() - except OSError: - # process already exited - pass - proc.wait() - raise - - if proc.returncode != 0: - if hide_stderr: - sys.stderr.flush() - sys.stderr.write(stderr) - sys.stderr.flush() - raise RuntimeError("Benchmark died") From 8f0b3e3c7051f580ca8851629ac539422967a847 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 2 Nov 2021 13:05:08 -0600 Subject: [PATCH 051/126] Move benchmarks.* up to the top level. --- ...selections.py => _benchmark_selections.py} | 29 +++++++---- pyperformance/{benchmarks => }/_manifest.py | 51 ++++++++++++++++++- pyperformance/benchmarks/__init__.py | 44 ---------------- pyperformance/cli.py | 8 +-- 4 files changed, 73 insertions(+), 59 deletions(-) rename pyperformance/{benchmarks/_selections.py => _benchmark_selections.py} (83%) rename pyperformance/{benchmarks => }/_manifest.py (75%) delete mode 100644 pyperformance/benchmarks/__init__.py diff --git a/pyperformance/benchmarks/_selections.py b/pyperformance/_benchmark_selections.py similarity index 83% rename from pyperformance/benchmarks/_selections.py rename to pyperformance/_benchmark_selections.py index 5e15685d..d628450e 100644 --- a/pyperformance/benchmarks/_selections.py +++ b/pyperformance/_benchmark_selections.py @@ -1,6 +1,12 @@ -from .._utils import check_name, parse_name_pattern, parse_tag_pattern -from ..benchmark import parse_benchmark, Benchmark -from ._manifest import expand_benchmark_groups + +__all__ = [ + 'parse_selection', + 'iter_selections', +] + + +from . import _utils, _manifest +from . import benchmark as _benchmark def parse_selection(selection, *, op=None): @@ -10,25 +16,25 @@ def parse_selection(selection, *, op=None): # * a benchmark pattern # * a tag # * a tag pattern - parsed = parse_benchmark(selection, fail=False) + parsed = _benchmark.parse_benchmark(selection, fail=False) spec, metafile = parsed if parsed else (None, None) if parsed and spec.version: kind = 'benchmark' spec, metafile = parsed if metafile: - parsed = Benchmark(spec, metafile) + parsed = _benchmark.Benchmark(spec, metafile) else: parsed = spec elif parsed and (spec.origin or metafile): raise NotImplementedError(selection) else: - parsed = parse_tag_pattern(selection) + parsed = _utils.parse_tag_pattern(selection) if parsed: kind = 'tag' else: kind = 'name' - parsed = parse_name_pattern(selection, fail=True) -# parsed = parse_name_pattern(selection, fail=False) + parsed = _utils.parse_name_pattern(selection, fail=True) +# parsed = _utils.parse_name_pattern(selection, fail=False) if not parsed: raise ValueError(f'unsupported selection {selection!r}') return op or '+', selection, kind, parsed @@ -61,6 +67,9 @@ def iter_selections(manifest, selections, *, unique=True): yield bench +####################################### +# internal implementation + def _match_selection(manifest, kind, parsed, byname): if kind == 'benchmark': bench = parsed @@ -89,7 +98,7 @@ def _match_selection(manifest, kind, parsed, byname): else: raise ValueError(f'unsupported selection {parsed!r}') for group in groups: - yield from expand_benchmark_groups(group, manifest.groups) + yield from _manifest.expand_benchmark_groups(group, manifest.groups) elif kind == 'name': if callable(parsed): match_bench = parsed @@ -104,7 +113,7 @@ def _match_selection(manifest, kind, parsed, byname): elif name in manifest.groups: yield from _match_selection(manifest, 'tag', name, byname) else: - check_name(name) + _utils.check_name(name) # No match! The caller can handle this as they like. yield name else: diff --git a/pyperformance/benchmarks/_manifest.py b/pyperformance/_manifest.py similarity index 75% rename from pyperformance/benchmarks/_manifest.py rename to pyperformance/_manifest.py index f61a300d..25d34ab9 100644 --- a/pyperformance/benchmarks/_manifest.py +++ b/pyperformance/_manifest.py @@ -1,8 +1,23 @@ + +__all__ = [ + 'BenchmarksManifest', + 'load_manifest', + 'parse_manifest', + 'expand_benchmark_groups', +] + + from collections import namedtuple import os.path -from .. import benchmark as _benchmark, _utils +from . import __version__, DATA_DIR +from . import _manifest +from . import benchmark as _benchmark + + +DEFAULTS_DIR = os.path.join(DATA_DIR, 'benchmarks') +DEFAULT_MANIFEST = os.path.join(DEFAULTS_DIR, 'MANIFEST') BENCH_COLUMNS = ('name', 'version', 'origin', 'metafile') BENCH_HEADER = '\t'.join(BENCH_COLUMNS) @@ -11,6 +26,37 @@ BenchmarksManifest = namedtuple('BenchmarksManifest', 'benchmarks groups') +def load_manifest(filename, *, resolve=None): + if not filename: + filename = DEFAULT_MANIFEST + else: + filename = os.path.abspath(filename) + if resolve is None: + if filename == DEFAULT_MANIFEST: + def resolve(bench): + if isinstance(bench, _benchmark.Benchmark): + spec = bench.spec + else: + spec = bench + bench = _benchmark.Benchmark(spec, '') + bench.metafile = None + + if not spec.version: + spec = spec._replace(version=__version__) + if not spec.origin: + spec = spec._replace(origin='') + bench.spec = spec + + if not bench.metafile: + metafile = os.path.join(DEFAULTS_DIR, + f'bm_{bench.name}', + 'pyproject.toml') + bench.metafile = metafile + return bench + with open(filename) as infile: + return _manifest.parse_manifest(infile, resolve=resolve, filename=filename) + + def parse_manifest(text, *, resolve=None, filename=None): if isinstance(text, str): lines = text.splitlines() @@ -57,6 +103,9 @@ def expand_benchmark_groups(bench, groups): yield from expand_benchmark_groups(bench, groups) +####################################### +# internal implementation + def _iter_sections(lines): lines = (line.split('#')[0].strip() for line in lines) diff --git a/pyperformance/benchmarks/__init__.py b/pyperformance/benchmarks/__init__.py deleted file mode 100644 index 99d9034c..00000000 --- a/pyperformance/benchmarks/__init__.py +++ /dev/null @@ -1,44 +0,0 @@ -import os.path - -from .. import __version__, DATA_DIR -from .. import benchmark as _benchmark -from . import _manifest - -# aliases -from ._manifest import expand_benchmark_groups -from ._selections import parse_selection, iter_selections - - -DEFAULTS_DIR = os.path.join(DATA_DIR, 'benchmarks') -DEFAULT_MANIFEST = os.path.join(DEFAULTS_DIR, 'MANIFEST') - - -def load_manifest(filename, *, resolve=None): - if not filename: - filename = DEFAULT_MANIFEST - else: - filename = os.path.abspath(filename) - if resolve is None: - if filename == DEFAULT_MANIFEST: - def resolve(bench): - if isinstance(bench, _benchmark.Benchmark): - spec = bench.spec - else: - spec = bench - bench = _benchmark.Benchmark(spec, '') - bench.metafile = None - - if not spec.version: - spec = spec._replace(version=__version__) - if not spec.origin: - spec = spec._replace(origin='') - bench.spec = spec - - if not bench.metafile: - metafile = os.path.join(DEFAULTS_DIR, - f'bm_{bench.name}', - 'pyproject.toml') - bench.metafile = metafile - return bench - with open(filename) as infile: - return _manifest.parse_manifest(infile, resolve=resolve, filename=filename) diff --git a/pyperformance/cli.py b/pyperformance/cli.py index a075497c..a23438a5 100644 --- a/pyperformance/cli.py +++ b/pyperformance/cli.py @@ -3,7 +3,7 @@ import os.path import sys -from pyperformance import _utils, benchmarks as _benchmarks +from pyperformance import _utils, _manifest, _benchmark_selections from pyperformance.venv import exec_in_virtualenv, cmd_venv @@ -186,7 +186,7 @@ def parse_args(): def _select_benchmarks(raw, manifest): # Get the raw list of benchmarks. entries = raw.lower() - parse_entry = (lambda o, s: _benchmarks.parse_selection(s, op=o)) + parse_entry = (lambda o, s: _benchmark_selections.parse_selection(s, op=o)) parsed = _utils.parse_selections(entries, parse_entry) parsed_infos = list(parsed) @@ -200,7 +200,7 @@ def _select_benchmarks(raw, manifest): # Get the selections. selected = [] - for bench in _benchmarks.iter_selections(manifest, parsed_infos): + for bench in _benchmark_selections.iter_selections(manifest, parsed_infos): if isinstance(bench, str): logging.warning(f"no benchmark named {bench!r}") continue @@ -213,7 +213,7 @@ def _main(): if hasattr(options, 'manifest'): # Load and update the manifest. - manifest = _benchmarks.load_manifest(options.manifest) + manifest = _manifest.load_manifest(options.manifest) if 'all' not in manifest.groups: manifest.groups['all'] = list(manifest.benchmarks) if hasattr(options, 'benchmarks'): From 3ce65a633142f2bbf82097d2f1f7c8f3caa79e05 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 2 Nov 2021 13:24:25 -0600 Subject: [PATCH 052/126] Move benchmark.* up to the top level. --- pyperformance/_benchmark.py | 272 ++++++++++++++++++ .../_metadata.py => _benchmark_metadata.py} | 15 +- pyperformance/_benchmark_selections.py | 3 +- pyperformance/_manifest.py | 3 +- pyperformance/benchmark/__init__.py | 4 - pyperformance/benchmark/_benchmark.py | 140 --------- pyperformance/benchmark/_run.py | 96 ------- pyperformance/benchmark/_spec.py | 35 --- 8 files changed, 286 insertions(+), 282 deletions(-) create mode 100644 pyperformance/_benchmark.py rename pyperformance/{benchmark/_metadata.py => _benchmark_metadata.py} (96%) delete mode 100644 pyperformance/benchmark/__init__.py delete mode 100644 pyperformance/benchmark/_benchmark.py delete mode 100644 pyperformance/benchmark/_run.py delete mode 100644 pyperformance/benchmark/_spec.py diff --git a/pyperformance/_benchmark.py b/pyperformance/_benchmark.py new file mode 100644 index 00000000..e2b05c14 --- /dev/null +++ b/pyperformance/_benchmark.py @@ -0,0 +1,272 @@ + +__all__ = [ + 'BenchmarkSpec', + 'Benchmark' + 'check_name', + 'parse_benchmark', +] + + +from collections import namedtuple +import os +import os.path +import sys + +import pyperf + +from . import _utils, _benchmark_metadata + + +def check_name(name): + _utils.check_name('_' + name) + + +def parse_benchmark(entry, *, fail=True): + name = entry + version = None + origin = None + metafile = None + + if not f'_{name}'.isidentifier(): + if not fail: + return None + raise ValueError(f'unsupported benchmark name in {entry!r}') + + bench = BenchmarkSpec(name, version, origin) + return bench, metafile + + +class BenchmarkSpec(namedtuple('BenchmarkSpec', 'name version origin')): + __slots__ = () + + @classmethod + def from_raw(cls, raw): + if isinstance(raw, BenchmarkSpec): + return raw, None + elif isinstance(raw, str): + return parse_benchmark(raw) + else: + raise ValueError(f'unsupported raw spec {raw!r}') + + +class Benchmark: + + _metadata = None + + def __init__(self, spec, metafile): + spec, _metafile = BenchmarkSpec.from_raw(spec) + if not metafile: + if not _metafile: + raise ValueError(f'missing metafile for {spec!r}') + metafile = _metafile + + self.spec = spec + self.metafile = metafile + + def __repr__(self): + return f'{type(self).__name__}(spec={self.spec}, metafile={self.metafile})' + + def __hash__(self): + return hash(self.spec) + + def __eq__(self, other): + try: + other_spec = other.spec + except AttributeError: + return NotImplemented + return self.spec == other_spec + + def __gt__(self, other): + try: + other_spec = other.spec + except AttributeError: + return NotImplemented + return self.spec > other_spec + + # __getattr__() gets weird when AttributeError comes out of + # properties so we spell out all the aliased attributes. + + @property + def name(self): + return self.spec.name + + @property + def version(self): + return self.spec.version + + @property + def origin(self): + return self.spec.origin + + def _get_rootdir(self): + try: + return self._rootdir + except AttributeError: + script = self.runscript + self._rootdir = os.path.dirname(script) if script else None + return self._rootdir + + def _init_metadata(self): + if self._metadata is not None: + raise NotImplementedError + + def _get_metadata_value(self, key, default): + try: + return self._metadata[key] + except TypeError: + if self._metadata is None: + defaults = { + 'name': self.name, + 'version': self.version, + } + self._metadata, _ = _benchmark_metadata.load_metadata( + self.metafile, + defaults, + ) + except KeyError: + pass + return self._metadata.setdefault(key, default) + + @property + def tags(self): + return self._get_metadata_value('tags', ()) + + @property + def datadir(self): + return self._get_metadata_value('datadir', None) + + @property + def requirements_lockfile(self): + try: + return self._lockfile + except AttributeError: + lockfile = self._get_metadata_value('requirements_lockfile', None) + if not lockfile: + rootdir = self._get_rootdir() + if rootdir: + lockfile = os.path.join(rootdir, 'requirements.txt') + self._lockfile = lockfile + return self._lockfile + + @property + def runscript(self): + return self._get_metadata_value('runscript', None) + + @property + def extra_opts(self): + return self._get_metadata_value('extra_opts', ()) + + # Other metadata keys: + # * base + # * python + # * dependencies + # * requirements + + def run(self, python, runid=None, pyperf_opts=None, *, + venv=None, + verbose=False, + ): + if venv and python == sys.executable: + python = venv.get_python_program() + + if not runid: + from ..run import get_run_id + runid = get_run_id(python, self) + + runscript = self.runscript + bench = _run_perf_script( + python, + runscript, + runid, + extra_opts=self.extra_opts, + pyperf_opts=pyperf_opts, + verbose=verbose, + ) + + return bench + + +####################################### +# internal implementation + +def _run_perf_script(python, runscript, runid, *, + extra_opts=None, + pyperf_opts=None, + verbose=False, + ): + if not runscript: + raise ValueError('missing runscript') + if not isinstance(runscript, str): + raise TypeError(f'runscript must be a string, got {runscript!r}') + + with _utils.temporary_file() as tmp: + opts = [ + *(extra_opts or ()), + *(pyperf_opts or ()), + '--output', tmp, + ] + if pyperf_opts and '--copy-env' in pyperf_opts: + argv, env = _prep_cmd(python, runscript, opts, runid, NOOP) + else: + opts, inherit_envvar = _resolve_restricted_opts(opts) + argv, env = _prep_cmd(python, runscript, opts, runid, inherit_envvar) + _utils.run_command(argv, env=env, hide_stderr=not verbose) + + return pyperf.BenchmarkSuite.load(tmp) + + +def _prep_cmd(python, script, opts, runid, on_set_envvar=None): + # Populate the environment variables. + env = dict(os.environ) + def set_envvar(name, value): + env[name] = value + if on_set_envvar is not None: + on_set_envvar(name) + # on_set_envvar() may update "opts" so all calls to set_envvar() + # must happen before building argv. + set_envvar('PYPERFORMANCE_RUNID', str(runid)) + + # Build argv. + argv = [ + python, '-u', script, + *(opts or ()), + ] + + return argv, env + + +def _resolve_restricted_opts(opts): + # Deal with --inherit-environ. + FLAG = '--inherit-environ' + resolved = [] + idx = None + for i, opt in enumerate(opts): + if opt.startswith(FLAG + '='): + idx = i + 1 + resolved.append(FLAG) + resolved.append(opt.partition('=')[-2]) + resolved.extend(opts[idx:]) + break + elif opt == FLAG: + idx = i + 1 + resolved.append(FLAG) + resolved.append(opts[idx]) + resolved.extend(opts[idx + 1:]) + break + else: + resolved.append(opt) + else: + resolved.extend(['--inherit-environ', '']) + idx = len(resolved) - 1 + inherited = set(resolved[idx].replace(',', ' ').split()) + def inherit_env_var(name): + inherited.add(name) + resolved[idx] = ','.join(inherited) + + return resolved, inherit_env_var + + +def _insert_on_PYTHONPATH(entry, env): + PYTHONPATH = env.get('PYTHONPATH', '').split(os.pathsep) + PYTHONPATH.insert(0, entry) + env['PYTHONPATH'] = os.pathsep.join(PYTHONPATH) diff --git a/pyperformance/benchmark/_metadata.py b/pyperformance/_benchmark_metadata.py similarity index 96% rename from pyperformance/benchmark/_metadata.py rename to pyperformance/_benchmark_metadata.py index 8c5d2bd0..e39b867e 100644 --- a/pyperformance/benchmark/_metadata.py +++ b/pyperformance/_benchmark_metadata.py @@ -1,7 +1,13 @@ + +__all__ = [ + 'load_metadata', +] + + import os.path -from .. import _utils, _pyproject_toml -from ._spec import BenchmarkSpec +from . import _utils, _pyproject_toml +from . import _benchmark METADATA = 'pyproject.toml' @@ -77,7 +83,7 @@ def load_metadata(metafile, defaults=None): raise ValueError('missing benchmark version') metafile = merged.pop('metafile') - merged['spec'] = BenchmarkSpec( + merged['spec'] = _benchmark.BenchmarkSpec( merged.pop('name'), merged.pop('version'), # XXX Should we leave this (origin) blank? @@ -89,6 +95,9 @@ def load_metadata(metafile, defaults=None): return merged, filename +####################################### +# internal implementation + def _name_from_filename(metafile): rootdir, basename = os.path.split(metafile) if basename == 'pyproject.toml': diff --git a/pyperformance/_benchmark_selections.py b/pyperformance/_benchmark_selections.py index d628450e..c788641e 100644 --- a/pyperformance/_benchmark_selections.py +++ b/pyperformance/_benchmark_selections.py @@ -5,8 +5,7 @@ ] -from . import _utils, _manifest -from . import benchmark as _benchmark +from . import _utils, _manifest, _benchmark def parse_selection(selection, *, op=None): diff --git a/pyperformance/_manifest.py b/pyperformance/_manifest.py index 25d34ab9..5883618a 100644 --- a/pyperformance/_manifest.py +++ b/pyperformance/_manifest.py @@ -12,8 +12,7 @@ from . import __version__, DATA_DIR -from . import _manifest -from . import benchmark as _benchmark +from . import _manifest, _benchmark DEFAULTS_DIR = os.path.join(DATA_DIR, 'benchmarks') diff --git a/pyperformance/benchmark/__init__.py b/pyperformance/benchmark/__init__.py deleted file mode 100644 index d9797bab..00000000 --- a/pyperformance/benchmark/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ - -# aliases -from ._spec import BenchmarkSpec, parse_benchmark, check_name -from ._benchmark import Benchmark diff --git a/pyperformance/benchmark/_benchmark.py b/pyperformance/benchmark/_benchmark.py deleted file mode 100644 index 9c46ae46..00000000 --- a/pyperformance/benchmark/_benchmark.py +++ /dev/null @@ -1,140 +0,0 @@ -import os.path -import sys - -from ._spec import BenchmarkSpec -from ._metadata import load_metadata -from ._run import run_perf_script, run_other_script - - -class Benchmark: - - _metadata = None - - def __init__(self, spec, metafile): - spec, _metafile = BenchmarkSpec.from_raw(spec) - if not metafile: - if not _metafile: - raise ValueError(f'missing metafile for {spec!r}') - metafile = _metafile - - self.spec = spec - self.metafile = metafile - - def __repr__(self): - return f'{type(self).__name__}(spec={self.spec}, metafile={self.metafile})' - - def __hash__(self): - return hash(self.spec) - - def __eq__(self, other): - try: - other_spec = other.spec - except AttributeError: - return NotImplemented - return self.spec == other_spec - - def __gt__(self, other): - try: - other_spec = other.spec - except AttributeError: - return NotImplemented - return self.spec > other_spec - - # __getattr__() gets weird when AttributeError comes out of - # properties so we spell out all the aliased attributes. - - @property - def name(self): - return self.spec.name - - @property - def version(self): - return self.spec.version - - @property - def origin(self): - return self.spec.origin - - def _get_rootdir(self): - try: - return self._rootdir - except AttributeError: - script = self.runscript - self._rootdir = os.path.dirname(script) if script else None - return self._rootdir - - def _init_metadata(self): - if self._metadata is not None: - raise NotImplementedError - - def _get_metadata_value(self, key, default): - try: - return self._metadata[key] - except TypeError: - if self._metadata is None: - defaults = { - 'name': self.name, - 'version': self.version, - } - self._metadata, _ = load_metadata(self.metafile, defaults) - except KeyError: - pass - return self._metadata.setdefault(key, default) - - @property - def tags(self): - return self._get_metadata_value('tags', ()) - - @property - def datadir(self): - return self._get_metadata_value('datadir', None) - - @property - def requirements_lockfile(self): - try: - return self._lockfile - except AttributeError: - lockfile = self._get_metadata_value('requirements_lockfile', None) - if not lockfile: - rootdir = self._get_rootdir() - if rootdir: - lockfile = os.path.join(rootdir, 'requirements.txt') - self._lockfile = lockfile - return self._lockfile - - @property - def runscript(self): - return self._get_metadata_value('runscript', None) - - @property - def extra_opts(self): - return self._get_metadata_value('extra_opts', ()) - - # Other metadata keys: - # * base - # * python - # * dependencies - # * requirements - - def run(self, python, runid=None, pyperf_opts=None, *, - venv=None, - verbose=False, - ): - if venv and python == sys.executable: - python = venv.get_python_program() - - if not runid: - from ..run import get_run_id - runid = get_run_id(python, self) - - runscript = self.runscript - bench = run_perf_script( - python, - runscript, - runid, - extra_opts=self.extra_opts, - pyperf_opts=pyperf_opts, - verbose=verbose, - ) - - return bench diff --git a/pyperformance/benchmark/_run.py b/pyperformance/benchmark/_run.py deleted file mode 100644 index 4ef7ca30..00000000 --- a/pyperformance/benchmark/_run.py +++ /dev/null @@ -1,96 +0,0 @@ -import os - -import pyperf - -from .. import _utils - - -def run_perf_script(python, runscript, runid, *, - extra_opts=None, - pyperf_opts=None, - verbose=False, - ): - if not runscript: - raise ValueError('missing runscript') - if not isinstance(runscript, str): - raise TypeError(f'runscript must be a string, got {runscript!r}') - - with _utils.temporary_file() as tmp: - opts = [ - *(extra_opts or ()), - *(pyperf_opts or ()), - '--output', tmp, - ] - if pyperf_opts and '--copy-env' in pyperf_opts: - argv, env = _prep_cmd(python, runscript, opts, runid, NOOP) - else: - opts, inherit_envvar = _resolve_restricted_opts(opts) - argv, env = _prep_cmd(python, runscript, opts, runid, inherit_envvar) - _utils.run_command(argv, env=env, hide_stderr=not verbose) - - return pyperf.BenchmarkSuite.load(tmp) - - -def run_other_script(python, script, runid, *, - extra_opts=None, - verbose=False - ): - argv, env = _prep_cmd(python, script, extra_opts, runid) - _utils.run_command(argv, env=env, hide_stderr=not verbose) - - -def _prep_cmd(python, script, opts, runid, on_set_envvar=None): - # Populate the environment variables. - env = dict(os.environ) - def set_envvar(name, value): - env[name] = value - if on_set_envvar is not None: - on_set_envvar(name) - # on_set_envvar() may update "opts" so all calls to set_envvar() - # must happen before building argv. - set_envvar('PYPERFORMANCE_RUNID', str(runid)) - - # Build argv. - argv = [ - python, '-u', script, - *(opts or ()), - ] - - return argv, env - - -def _resolve_restricted_opts(opts): - # Deal with --inherit-environ. - FLAG = '--inherit-environ' - resolved = [] - idx = None - for i, opt in enumerate(opts): - if opt.startswith(FLAG + '='): - idx = i + 1 - resolved.append(FLAG) - resolved.append(opt.partition('=')[-2]) - resolved.extend(opts[idx:]) - break - elif opt == FLAG: - idx = i + 1 - resolved.append(FLAG) - resolved.append(opts[idx]) - resolved.extend(opts[idx + 1:]) - break - else: - resolved.append(opt) - else: - resolved.extend(['--inherit-environ', '']) - idx = len(resolved) - 1 - inherited = set(resolved[idx].replace(',', ' ').split()) - def inherit_env_var(name): - inherited.add(name) - resolved[idx] = ','.join(inherited) - - return resolved, inherit_env_var - - -def _insert_on_PYTHONPATH(entry, env): - PYTHONPATH = env.get('PYTHONPATH', '').split(os.pathsep) - PYTHONPATH.insert(0, entry) - env['PYTHONPATH'] = os.pathsep.join(PYTHONPATH) diff --git a/pyperformance/benchmark/_spec.py b/pyperformance/benchmark/_spec.py deleted file mode 100644 index d85de8e1..00000000 --- a/pyperformance/benchmark/_spec.py +++ /dev/null @@ -1,35 +0,0 @@ -from collections import namedtuple - -from .. import _utils - - -def check_name(name): - _utils.check_name('_' + name) - - -def parse_benchmark(entry, *, fail=True): - name = entry - version = None - origin = None - metafile = None - - if not f'_{name}'.isidentifier(): - if not fail: - return None - raise ValueError(f'unsupported benchmark name in {entry!r}') - - bench = BenchmarkSpec(name, version, origin) - return bench, metafile - - -class BenchmarkSpec(namedtuple('BenchmarkSpec', 'name version origin')): - __slots__ = () - - @classmethod - def from_raw(cls, raw): - if isinstance(raw, BenchmarkSpec): - return raw, None - elif isinstance(raw, str): - return parse_benchmark(raw) - else: - raise ValueError(f'unsupported raw spec {raw!r}') From 3479e27ee3ccbdeb293b42fba111baabe1ce4baf Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 2 Nov 2021 14:56:47 -0600 Subject: [PATCH 053/126] Clean up the metadata files. --- pyperformance/data-files/benchmarks/base.toml | 32 +++++++++---------- .../benchmarks/bm_2to3/pyproject.toml | 15 ++++----- .../benchmarks/bm_chameleon/pyproject.toml | 15 ++++----- .../benchmarks/bm_chaos/pyproject.toml | 15 ++++----- .../benchmarks/bm_crypto_pyaes/pyproject.toml | 15 ++++----- .../benchmarks/bm_deltablue/pyproject.toml | 15 ++++----- .../bm_django_template/pyproject.toml | 15 ++++----- .../benchmarks/bm_dulwich_log/pyproject.toml | 18 ++++------- .../benchmarks/bm_fannkuch/pyproject.toml | 15 ++++----- .../benchmarks/bm_float/pyproject.toml | 15 ++++----- .../benchmarks/bm_genshi/pyproject.toml | 15 ++++----- .../benchmarks/bm_go/pyproject.toml | 15 ++++----- .../benchmarks/bm_hexiom/pyproject.toml | 15 ++++----- .../benchmarks/bm_hg_startup/pyproject.toml | 15 ++++----- .../benchmarks/bm_html5lib/pyproject.toml | 15 ++++----- .../benchmarks/bm_json_dumps/pyproject.toml | 15 ++++----- .../benchmarks/bm_json_loads/pyproject.toml | 15 ++++----- .../benchmarks/bm_logging/pyproject.toml | 15 ++++----- .../benchmarks/bm_mako/pyproject.toml | 17 ++++------ .../benchmarks/bm_mdp/pyproject.toml | 15 ++++----- .../bm_meteor_contest/pyproject.toml | 15 ++++----- .../benchmarks/bm_nbody/pyproject.toml | 15 ++++----- .../benchmarks/bm_nqueens/pyproject.toml | 15 ++++----- .../benchmarks/bm_pathlib/pyproject.toml | 15 ++++----- .../benchmarks/bm_pickle/bm_pickle_dict.toml | 15 ++++----- .../benchmarks/bm_pickle/bm_pickle_list.toml | 15 ++++----- .../bm_pickle/bm_pickle_pure_python.toml | 15 ++++----- .../benchmarks/bm_pickle/bm_unpickle.toml | 15 ++++----- .../bm_pickle/bm_unpickle_list.toml | 15 ++++----- .../bm_pickle/bm_unpickle_pure_python.toml | 15 ++++----- .../benchmarks/bm_pickle/pyproject.toml | 15 ++++----- .../benchmarks/bm_pidigits/pyproject.toml | 15 ++++----- .../benchmarks/bm_pyflate/pyproject.toml | 15 ++++----- .../bm_python_startup_no_site.toml | 15 ++++----- .../bm_python_startup/pyproject.toml | 15 ++++----- .../benchmarks/bm_raytrace/pyproject.toml | 15 ++++----- .../bm_regex_compile/pyproject.toml | 15 ++++----- .../benchmarks/bm_regex_dna/pyproject.toml | 15 ++++----- .../benchmarks/bm_regex_effbot/pyproject.toml | 15 ++++----- .../benchmarks/bm_regex_v8/pyproject.toml | 15 ++++----- .../benchmarks/bm_richards/pyproject.toml | 15 ++++----- .../benchmarks/bm_scimark/pyproject.toml | 15 ++++----- .../bm_spectral_norm/pyproject.toml | 15 ++++----- .../bm_sqlalchemy_declarative/pyproject.toml | 15 ++++----- .../bm_sqlalchemy_imperative/pyproject.toml | 15 ++++----- .../benchmarks/bm_sqlite_synth/pyproject.toml | 15 ++++----- .../benchmarks/bm_sympy/pyproject.toml | 15 ++++----- .../benchmarks/bm_telco/pyproject.toml | 15 ++++----- .../benchmarks/bm_tornado_http/pyproject.toml | 15 ++++----- .../bm_unpack_sequence/pyproject.toml | 15 ++++----- .../benchmarks/bm_xml_etree/pyproject.toml | 15 ++++----- 51 files changed, 317 insertions(+), 470 deletions(-) diff --git a/pyperformance/data-files/benchmarks/base.toml b/pyperformance/data-files/benchmarks/base.toml index 37b53c10..1f094cc0 100644 --- a/pyperformance/data-files/benchmarks/base.toml +++ b/pyperformance/data-files/benchmarks/base.toml @@ -1,24 +1,22 @@ [project] -#description = "a pyperformance benchmark" -#readme = "README.rst" #requires-python = ">=3.8" -#license = {file = "LICENSE.txt"} +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["name", "version"] + +#[tool.pyperformance] +#metabase = "" + -dependencies = [ - "pyperf", -] -urls = {repository = "https://github.com/python/pyperformance"} -dynamic = [ - "name", - "version", -] + +[project] +name = "pyperformance_bm_" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -metabase = "" -#metafile = "" -#tags = [] -#prescript = "" -#runscript = "" -#extra_opts = "" +name = "" diff --git a/pyperformance/data-files/benchmarks/bm_2to3/pyproject.toml b/pyperformance/data-files/benchmarks/bm_2to3/pyproject.toml index bf447649..2773d777 100644 --- a/pyperformance/data-files/benchmarks/bm_2to3/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_2to3/pyproject.toml @@ -1,12 +1,9 @@ [project] -#name = "bm_" - -# XXX This should be inherited from metabase. -dynamic = [ - "name", - "version", -] +name = "pyperformance_bm_2to3" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -# "name" is set automatically. -metabase = ".." +name = "2to3" diff --git a/pyperformance/data-files/benchmarks/bm_chameleon/pyproject.toml b/pyperformance/data-files/benchmarks/bm_chameleon/pyproject.toml index ba661812..279264d9 100644 --- a/pyperformance/data-files/benchmarks/bm_chameleon/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_chameleon/pyproject.toml @@ -1,15 +1,12 @@ [project] -name = "bm_chameleon" +name = "pyperformance_bm_chameleon" +requires-python = ">=3.8" dependencies = [ + "pyperf", "Chameleon", ] - -# XXX This should be inherited from metabase. -dynamic = [ - "name", - "version", -] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -# "name" is set automatically. -metabase = ".." +name = "chameleon" diff --git a/pyperformance/data-files/benchmarks/bm_chaos/pyproject.toml b/pyperformance/data-files/benchmarks/bm_chaos/pyproject.toml index bf447649..7ba09715 100644 --- a/pyperformance/data-files/benchmarks/bm_chaos/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_chaos/pyproject.toml @@ -1,12 +1,9 @@ [project] -#name = "bm_" - -# XXX This should be inherited from metabase. -dynamic = [ - "name", - "version", -] +name = "pyperformance_bm_chaos" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -# "name" is set automatically. -metabase = ".." +name = "chaos" diff --git a/pyperformance/data-files/benchmarks/bm_crypto_pyaes/pyproject.toml b/pyperformance/data-files/benchmarks/bm_crypto_pyaes/pyproject.toml index 516f1bd8..cc97eff4 100644 --- a/pyperformance/data-files/benchmarks/bm_crypto_pyaes/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_crypto_pyaes/pyproject.toml @@ -1,15 +1,12 @@ [project] -name = "bm_crypto_pyaes" +name = "pyperformance_bm_crypto_pyaes" +requires-python = ">=3.8" dependencies = [ + "pyperf", "pyaes", ] - -# XXX This should be inherited from metabase. -dynamic = [ - "name", - "version", -] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -# "name" is set automatically. -metabase = ".." +name = "crypto_pyaes" diff --git a/pyperformance/data-files/benchmarks/bm_deltablue/pyproject.toml b/pyperformance/data-files/benchmarks/bm_deltablue/pyproject.toml index bf447649..2345a57e 100644 --- a/pyperformance/data-files/benchmarks/bm_deltablue/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_deltablue/pyproject.toml @@ -1,12 +1,9 @@ [project] -#name = "bm_" - -# XXX This should be inherited from metabase. -dynamic = [ - "name", - "version", -] +name = "pyperformance_bm_deltablue" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -# "name" is set automatically. -metabase = ".." +name = "deltablue" diff --git a/pyperformance/data-files/benchmarks/bm_django_template/pyproject.toml b/pyperformance/data-files/benchmarks/bm_django_template/pyproject.toml index 8035647b..1aece5b6 100644 --- a/pyperformance/data-files/benchmarks/bm_django_template/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_django_template/pyproject.toml @@ -1,15 +1,12 @@ [project] -name = "bm_django_template" +name = "pyperformance_bm_django_template" +requires-python = ">=3.8" dependencies = [ + "pyperf", "django", ] - -# XXX This should be inherited from metabase. -dynamic = [ - "name", - "version", -] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -# "name" is set automatically. -metabase = ".." +name = "django_template" diff --git a/pyperformance/data-files/benchmarks/bm_dulwich_log/pyproject.toml b/pyperformance/data-files/benchmarks/bm_dulwich_log/pyproject.toml index b8524790..ac4df546 100644 --- a/pyperformance/data-files/benchmarks/bm_dulwich_log/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_dulwich_log/pyproject.toml @@ -1,16 +1,12 @@ [project] -name = "bm_dulwich_log" +name = "pyperformance_bm_dulwich_log" +requires-python = ">=3.8" dependencies = [ - # optional? - "dulwich", -] - -# XXX This should be inherited from metabase. -dynamic = [ - "name", - "version", + "pyperf", + "dulwich", # optional? ] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -# "name" is set automatically. -metabase = ".." +name = "dulwich_log" diff --git a/pyperformance/data-files/benchmarks/bm_fannkuch/pyproject.toml b/pyperformance/data-files/benchmarks/bm_fannkuch/pyproject.toml index bf447649..0a13e04c 100644 --- a/pyperformance/data-files/benchmarks/bm_fannkuch/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_fannkuch/pyproject.toml @@ -1,12 +1,9 @@ [project] -#name = "bm_" - -# XXX This should be inherited from metabase. -dynamic = [ - "name", - "version", -] +name = "pyperformance_bm_fannkuch" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -# "name" is set automatically. -metabase = ".." +name = "fannkuch" diff --git a/pyperformance/data-files/benchmarks/bm_float/pyproject.toml b/pyperformance/data-files/benchmarks/bm_float/pyproject.toml index bf447649..2d02da20 100644 --- a/pyperformance/data-files/benchmarks/bm_float/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_float/pyproject.toml @@ -1,12 +1,9 @@ [project] -#name = "bm_" - -# XXX This should be inherited from metabase. -dynamic = [ - "name", - "version", -] +name = "pyperformance_bm_float" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -# "name" is set automatically. -metabase = ".." +name = "float" diff --git a/pyperformance/data-files/benchmarks/bm_genshi/pyproject.toml b/pyperformance/data-files/benchmarks/bm_genshi/pyproject.toml index fd86d6a7..5f9f59df 100644 --- a/pyperformance/data-files/benchmarks/bm_genshi/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_genshi/pyproject.toml @@ -1,15 +1,12 @@ [project] -name = "bm_genshi" +name = "pyperformance_bm_genshi" +requires-python = ">=3.8" dependencies = [ + "pyperf", "Genshi", ] - -# XXX This should be inherited from metabase. -dynamic = [ - "name", - "version", -] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -# "name" is set automatically. -metabase = ".." +name = "genshi" diff --git a/pyperformance/data-files/benchmarks/bm_go/pyproject.toml b/pyperformance/data-files/benchmarks/bm_go/pyproject.toml index bf447649..a4abcf90 100644 --- a/pyperformance/data-files/benchmarks/bm_go/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_go/pyproject.toml @@ -1,12 +1,9 @@ [project] -#name = "bm_" - -# XXX This should be inherited from metabase. -dynamic = [ - "name", - "version", -] +name = "pyperformance_bm_go" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -# "name" is set automatically. -metabase = ".." +name = "go" diff --git a/pyperformance/data-files/benchmarks/bm_hexiom/pyproject.toml b/pyperformance/data-files/benchmarks/bm_hexiom/pyproject.toml index bf447649..bb66e1f5 100644 --- a/pyperformance/data-files/benchmarks/bm_hexiom/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_hexiom/pyproject.toml @@ -1,12 +1,9 @@ [project] -#name = "bm_" - -# XXX This should be inherited from metabase. -dynamic = [ - "name", - "version", -] +name = "pyperformance_bm_hexiom" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -# "name" is set automatically. -metabase = ".." +name = "hexiom" diff --git a/pyperformance/data-files/benchmarks/bm_hg_startup/pyproject.toml b/pyperformance/data-files/benchmarks/bm_hg_startup/pyproject.toml index 316f9f67..3c34a6c0 100644 --- a/pyperformance/data-files/benchmarks/bm_hg_startup/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_hg_startup/pyproject.toml @@ -1,15 +1,12 @@ [project] -name = "bm_hg_startup" +name = "pyperformance_bm_hg_startup" +requires-python = ">=3.8" dependencies = [ + "pyperf", "mercurial", ] - -# XXX This should be inherited from metabase. -dynamic = [ - "name", - "version", -] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -# "name" is set automatically. -metabase = ".." +name = "hg_startup" diff --git a/pyperformance/data-files/benchmarks/bm_html5lib/pyproject.toml b/pyperformance/data-files/benchmarks/bm_html5lib/pyproject.toml index 0c481f6d..9ba73187 100644 --- a/pyperformance/data-files/benchmarks/bm_html5lib/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_html5lib/pyproject.toml @@ -1,15 +1,12 @@ [project] -name = "bm_html5lib" +name = "pyperformance_bm_html5lib" +requires-python = ">=3.8" dependencies = [ + "pyperf", "html5lib", ] - -# XXX This should be inherited from metabase. -dynamic = [ - "name", - "version", -] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -# "name" is set automatically. -metabase = ".." +name = "html5lib" diff --git a/pyperformance/data-files/benchmarks/bm_json_dumps/pyproject.toml b/pyperformance/data-files/benchmarks/bm_json_dumps/pyproject.toml index bf447649..2a7b3bab 100644 --- a/pyperformance/data-files/benchmarks/bm_json_dumps/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_json_dumps/pyproject.toml @@ -1,12 +1,9 @@ [project] -#name = "bm_" - -# XXX This should be inherited from metabase. -dynamic = [ - "name", - "version", -] +name = "pyperformance_bm_json_dumps" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -# "name" is set automatically. -metabase = ".." +name = "json_dumps" diff --git a/pyperformance/data-files/benchmarks/bm_json_loads/pyproject.toml b/pyperformance/data-files/benchmarks/bm_json_loads/pyproject.toml index bf447649..4da14740 100644 --- a/pyperformance/data-files/benchmarks/bm_json_loads/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_json_loads/pyproject.toml @@ -1,12 +1,9 @@ [project] -#name = "bm_" - -# XXX This should be inherited from metabase. -dynamic = [ - "name", - "version", -] +name = "pyperformance_bm_json_loads" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -# "name" is set automatically. -metabase = ".." +name = "json_loads" diff --git a/pyperformance/data-files/benchmarks/bm_logging/pyproject.toml b/pyperformance/data-files/benchmarks/bm_logging/pyproject.toml index bf447649..7b2d0878 100644 --- a/pyperformance/data-files/benchmarks/bm_logging/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_logging/pyproject.toml @@ -1,12 +1,9 @@ [project] -#name = "bm_" - -# XXX This should be inherited from metabase. -dynamic = [ - "name", - "version", -] +name = "pyperformance_bm_logging" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -# "name" is set automatically. -metabase = ".." +name = "logging" diff --git a/pyperformance/data-files/benchmarks/bm_mako/pyproject.toml b/pyperformance/data-files/benchmarks/bm_mako/pyproject.toml index 1c70c705..dc451fb0 100644 --- a/pyperformance/data-files/benchmarks/bm_mako/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_mako/pyproject.toml @@ -1,15 +1,12 @@ [project] -name = "bm_mako" +name = "pyperformance_bm_mako" +requires-python = ">=3.8" dependencies = [ - "Mako", -] - -# XXX This should be inherited from metabase. -dynamic = [ - "name", - "version", + "pyperf", + "Mako", ] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -# "name" is set automatically. -metabase = ".." +name = "mako" diff --git a/pyperformance/data-files/benchmarks/bm_mdp/pyproject.toml b/pyperformance/data-files/benchmarks/bm_mdp/pyproject.toml index bf447649..176ab715 100644 --- a/pyperformance/data-files/benchmarks/bm_mdp/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_mdp/pyproject.toml @@ -1,12 +1,9 @@ [project] -#name = "bm_" - -# XXX This should be inherited from metabase. -dynamic = [ - "name", - "version", -] +name = "pyperformance_bm_mdp" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -# "name" is set automatically. -metabase = ".." +name = "mdp" diff --git a/pyperformance/data-files/benchmarks/bm_meteor_contest/pyproject.toml b/pyperformance/data-files/benchmarks/bm_meteor_contest/pyproject.toml index bf447649..3196e0e1 100644 --- a/pyperformance/data-files/benchmarks/bm_meteor_contest/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_meteor_contest/pyproject.toml @@ -1,12 +1,9 @@ [project] -#name = "bm_" - -# XXX This should be inherited from metabase. -dynamic = [ - "name", - "version", -] +name = "pyperformance_bm_meteor_contest" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -# "name" is set automatically. -metabase = ".." +name = "meteor_contest" diff --git a/pyperformance/data-files/benchmarks/bm_nbody/pyproject.toml b/pyperformance/data-files/benchmarks/bm_nbody/pyproject.toml index bf447649..3af10dc1 100644 --- a/pyperformance/data-files/benchmarks/bm_nbody/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_nbody/pyproject.toml @@ -1,12 +1,9 @@ [project] -#name = "bm_" - -# XXX This should be inherited from metabase. -dynamic = [ - "name", - "version", -] +name = "pyperformance_bm_nbody" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -# "name" is set automatically. -metabase = ".." +name = "nbody" diff --git a/pyperformance/data-files/benchmarks/bm_nqueens/pyproject.toml b/pyperformance/data-files/benchmarks/bm_nqueens/pyproject.toml index bf447649..93ad084f 100644 --- a/pyperformance/data-files/benchmarks/bm_nqueens/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_nqueens/pyproject.toml @@ -1,12 +1,9 @@ [project] -#name = "bm_" - -# XXX This should be inherited from metabase. -dynamic = [ - "name", - "version", -] +name = "pyperformance_bm_nqueens" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -# "name" is set automatically. -metabase = ".." +name = "nqueens" diff --git a/pyperformance/data-files/benchmarks/bm_pathlib/pyproject.toml b/pyperformance/data-files/benchmarks/bm_pathlib/pyproject.toml index bf447649..caa4bb1a 100644 --- a/pyperformance/data-files/benchmarks/bm_pathlib/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_pathlib/pyproject.toml @@ -1,12 +1,9 @@ [project] -#name = "bm_" - -# XXX This should be inherited from metabase. -dynamic = [ - "name", - "version", -] +name = "pyperformance_bm_pathlib" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -# "name" is set automatically. -metabase = ".." +name = "pathlib" diff --git a/pyperformance/data-files/benchmarks/bm_pickle/bm_pickle_dict.toml b/pyperformance/data-files/benchmarks/bm_pickle/bm_pickle_dict.toml index dbc4f2dd..8e825eca 100644 --- a/pyperformance/data-files/benchmarks/bm_pickle/bm_pickle_dict.toml +++ b/pyperformance/data-files/benchmarks/bm_pickle/bm_pickle_dict.toml @@ -1,13 +1,10 @@ [project] -#name = "bm_" - -# XXX This should be inherited from metabase. -dynamic = [ - "name", - "version", -] +name = "pyperformance_bm_pickle_dict" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -# "name" is set automatically. -# "metabase" is set automatically +name = "pickle_dict" extra_opts = ["pickle_dict"] diff --git a/pyperformance/data-files/benchmarks/bm_pickle/bm_pickle_list.toml b/pyperformance/data-files/benchmarks/bm_pickle/bm_pickle_list.toml index 04bc23d5..48599614 100644 --- a/pyperformance/data-files/benchmarks/bm_pickle/bm_pickle_list.toml +++ b/pyperformance/data-files/benchmarks/bm_pickle/bm_pickle_list.toml @@ -1,13 +1,10 @@ [project] -#name = "bm_" - -# XXX This should be inherited from metabase. -dynamic = [ - "name", - "version", -] +name = "pyperformance_bm_pickle_list" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -# "name" is set automatically. -# "metabase" is set automatically +name = "pickle_list" extra_opts = ["pickle_list"] diff --git a/pyperformance/data-files/benchmarks/bm_pickle/bm_pickle_pure_python.toml b/pyperformance/data-files/benchmarks/bm_pickle/bm_pickle_pure_python.toml index a36e4c27..1bfffb28 100644 --- a/pyperformance/data-files/benchmarks/bm_pickle/bm_pickle_pure_python.toml +++ b/pyperformance/data-files/benchmarks/bm_pickle/bm_pickle_pure_python.toml @@ -1,13 +1,10 @@ [project] -#name = "bm_" - -# XXX This should be inherited from metabase. -dynamic = [ - "name", - "version", -] +name = "pyperformance_bm_pickle_pure_python" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -# "name" is set automatically. -# "metabase" is set automatically +name = "pickle_pure_python" extra_opts = ["--pure-python", "pickle"] diff --git a/pyperformance/data-files/benchmarks/bm_pickle/bm_unpickle.toml b/pyperformance/data-files/benchmarks/bm_pickle/bm_unpickle.toml index fde38133..f2a0f226 100644 --- a/pyperformance/data-files/benchmarks/bm_pickle/bm_unpickle.toml +++ b/pyperformance/data-files/benchmarks/bm_pickle/bm_unpickle.toml @@ -1,13 +1,10 @@ [project] -#name = "bm_" - -# XXX This should be inherited from metabase. -dynamic = [ - "name", - "version", -] +name = "pyperformance_bm_unpickle" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -# "name" is set automatically. -# "metabase" is set automatically +name = "unpickle" extra_opts = ["unpickle"] diff --git a/pyperformance/data-files/benchmarks/bm_pickle/bm_unpickle_list.toml b/pyperformance/data-files/benchmarks/bm_pickle/bm_unpickle_list.toml index 074cd744..d90f5e3e 100644 --- a/pyperformance/data-files/benchmarks/bm_pickle/bm_unpickle_list.toml +++ b/pyperformance/data-files/benchmarks/bm_pickle/bm_unpickle_list.toml @@ -1,13 +1,10 @@ [project] -#name = "bm_" - -# XXX This should be inherited from metabase. -dynamic = [ - "name", - "version", -] +name = "pyperformance_bm_unpickle_list" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -# "name" is set automatically. -# "metabase" is set automatically +name = "unpickle_list" extra_opts = ["unpickle_list"] diff --git a/pyperformance/data-files/benchmarks/bm_pickle/bm_unpickle_pure_python.toml b/pyperformance/data-files/benchmarks/bm_pickle/bm_unpickle_pure_python.toml index d364229e..9035e619 100644 --- a/pyperformance/data-files/benchmarks/bm_pickle/bm_unpickle_pure_python.toml +++ b/pyperformance/data-files/benchmarks/bm_pickle/bm_unpickle_pure_python.toml @@ -1,13 +1,10 @@ [project] -#name = "bm_" - -# XXX This should be inherited from metabase. -dynamic = [ - "name", - "version", -] +name = "pyperformance_bm_unpickle_pure_python" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -# "name" is set automatically. -# "metabase" is set automatically +name = "unpickle_pure_python" extra_opts = ["--pure-python", "unpickle"] diff --git a/pyperformance/data-files/benchmarks/bm_pickle/pyproject.toml b/pyperformance/data-files/benchmarks/bm_pickle/pyproject.toml index 1106ff4f..d41e515a 100644 --- a/pyperformance/data-files/benchmarks/bm_pickle/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_pickle/pyproject.toml @@ -1,13 +1,10 @@ [project] -#name = "bm_" - -# XXX This should be inherited from metabase. -dynamic = [ - "name", - "version", -] +name = "pyperformance_bm_pickle" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -# "name" is set automatically. -metabase = ".." +name = "pickle" extra_opts = ["pickle"] diff --git a/pyperformance/data-files/benchmarks/bm_pidigits/pyproject.toml b/pyperformance/data-files/benchmarks/bm_pidigits/pyproject.toml index bf447649..d67fb916 100644 --- a/pyperformance/data-files/benchmarks/bm_pidigits/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_pidigits/pyproject.toml @@ -1,12 +1,9 @@ [project] -#name = "bm_" - -# XXX This should be inherited from metabase. -dynamic = [ - "name", - "version", -] +name = "pyperformance_bm_pidigits" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -# "name" is set automatically. -metabase = ".." +name = "pidigits" diff --git a/pyperformance/data-files/benchmarks/bm_pyflate/pyproject.toml b/pyperformance/data-files/benchmarks/bm_pyflate/pyproject.toml index bf447649..e1c0a7ff 100644 --- a/pyperformance/data-files/benchmarks/bm_pyflate/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_pyflate/pyproject.toml @@ -1,12 +1,9 @@ [project] -#name = "bm_" - -# XXX This should be inherited from metabase. -dynamic = [ - "name", - "version", -] +name = "pyperformance_bm_pyflate" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -# "name" is set automatically. -metabase = ".." +name = "pyflate" diff --git a/pyperformance/data-files/benchmarks/bm_python_startup/bm_python_startup_no_site.toml b/pyperformance/data-files/benchmarks/bm_python_startup/bm_python_startup_no_site.toml index 65049d40..94f11834 100644 --- a/pyperformance/data-files/benchmarks/bm_python_startup/bm_python_startup_no_site.toml +++ b/pyperformance/data-files/benchmarks/bm_python_startup/bm_python_startup_no_site.toml @@ -1,13 +1,10 @@ [project] -#name = "bm_" - -# XXX This should be inherited from metabase. -dynamic = [ - "name", - "version", -] +name = "pyperformance_bm_python_startup_no_site" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -# "name" is set automatically. -# "metabase" is set automatically +name = "python_startup_no_site" extra_opts = ["--no-site"] diff --git a/pyperformance/data-files/benchmarks/bm_python_startup/pyproject.toml b/pyperformance/data-files/benchmarks/bm_python_startup/pyproject.toml index bf447649..b7a44930 100644 --- a/pyperformance/data-files/benchmarks/bm_python_startup/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_python_startup/pyproject.toml @@ -1,12 +1,9 @@ [project] -#name = "bm_" - -# XXX This should be inherited from metabase. -dynamic = [ - "name", - "version", -] +name = "pyperformance_bm_python_startup_" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -# "name" is set automatically. -metabase = ".." +name = "_python_startup" diff --git a/pyperformance/data-files/benchmarks/bm_raytrace/pyproject.toml b/pyperformance/data-files/benchmarks/bm_raytrace/pyproject.toml index bf447649..d9ca5ab5 100644 --- a/pyperformance/data-files/benchmarks/bm_raytrace/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_raytrace/pyproject.toml @@ -1,12 +1,9 @@ [project] -#name = "bm_" - -# XXX This should be inherited from metabase. -dynamic = [ - "name", - "version", -] +name = "pyperformance_bm_raytrace" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -# "name" is set automatically. -metabase = ".." +name = "raytrace" diff --git a/pyperformance/data-files/benchmarks/bm_regex_compile/pyproject.toml b/pyperformance/data-files/benchmarks/bm_regex_compile/pyproject.toml index bf447649..bace2300 100644 --- a/pyperformance/data-files/benchmarks/bm_regex_compile/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_regex_compile/pyproject.toml @@ -1,12 +1,9 @@ [project] -#name = "bm_" - -# XXX This should be inherited from metabase. -dynamic = [ - "name", - "version", -] +name = "pyperformance_bm_regex_compile" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -# "name" is set automatically. -metabase = ".." +name = "regex_compile" diff --git a/pyperformance/data-files/benchmarks/bm_regex_dna/pyproject.toml b/pyperformance/data-files/benchmarks/bm_regex_dna/pyproject.toml index bf447649..b672861b 100644 --- a/pyperformance/data-files/benchmarks/bm_regex_dna/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_regex_dna/pyproject.toml @@ -1,12 +1,9 @@ [project] -#name = "bm_" - -# XXX This should be inherited from metabase. -dynamic = [ - "name", - "version", -] +name = "pyperformance_bm_regex_dna" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -# "name" is set automatically. -metabase = ".." +name = "regex_dna" diff --git a/pyperformance/data-files/benchmarks/bm_regex_effbot/pyproject.toml b/pyperformance/data-files/benchmarks/bm_regex_effbot/pyproject.toml index bf447649..63992206 100644 --- a/pyperformance/data-files/benchmarks/bm_regex_effbot/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_regex_effbot/pyproject.toml @@ -1,12 +1,9 @@ [project] -#name = "bm_" - -# XXX This should be inherited from metabase. -dynamic = [ - "name", - "version", -] +name = "pyperformance_bm_regex_effbot" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -# "name" is set automatically. -metabase = ".." +name = "regex_effbot" diff --git a/pyperformance/data-files/benchmarks/bm_regex_v8/pyproject.toml b/pyperformance/data-files/benchmarks/bm_regex_v8/pyproject.toml index bf447649..56239603 100644 --- a/pyperformance/data-files/benchmarks/bm_regex_v8/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_regex_v8/pyproject.toml @@ -1,12 +1,9 @@ [project] -#name = "bm_" - -# XXX This should be inherited from metabase. -dynamic = [ - "name", - "version", -] +name = "pyperformance_bm_regex_v8" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -# "name" is set automatically. -metabase = ".." +name = "regex_v8" diff --git a/pyperformance/data-files/benchmarks/bm_richards/pyproject.toml b/pyperformance/data-files/benchmarks/bm_richards/pyproject.toml index bf447649..aa464ddb 100644 --- a/pyperformance/data-files/benchmarks/bm_richards/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_richards/pyproject.toml @@ -1,12 +1,9 @@ [project] -#name = "bm_" - -# XXX This should be inherited from metabase. -dynamic = [ - "name", - "version", -] +name = "pyperformance_bm_richards" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -# "name" is set automatically. -metabase = ".." +name = "richards" diff --git a/pyperformance/data-files/benchmarks/bm_scimark/pyproject.toml b/pyperformance/data-files/benchmarks/bm_scimark/pyproject.toml index bf447649..8b2f2ca1 100644 --- a/pyperformance/data-files/benchmarks/bm_scimark/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_scimark/pyproject.toml @@ -1,12 +1,9 @@ [project] -#name = "bm_" - -# XXX This should be inherited from metabase. -dynamic = [ - "name", - "version", -] +name = "pyperformance_bm_scimark" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -# "name" is set automatically. -metabase = ".." +name = "scimark" diff --git a/pyperformance/data-files/benchmarks/bm_spectral_norm/pyproject.toml b/pyperformance/data-files/benchmarks/bm_spectral_norm/pyproject.toml index bf447649..8ebdddb3 100644 --- a/pyperformance/data-files/benchmarks/bm_spectral_norm/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_spectral_norm/pyproject.toml @@ -1,12 +1,9 @@ [project] -#name = "bm_" - -# XXX This should be inherited from metabase. -dynamic = [ - "name", - "version", -] +name = "pyperformance_bm_spectral_norm" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -# "name" is set automatically. -metabase = ".." +name = "spectral_norm" diff --git a/pyperformance/data-files/benchmarks/bm_sqlalchemy_declarative/pyproject.toml b/pyperformance/data-files/benchmarks/bm_sqlalchemy_declarative/pyproject.toml index 9597ae5a..3c1cc775 100644 --- a/pyperformance/data-files/benchmarks/bm_sqlalchemy_declarative/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_sqlalchemy_declarative/pyproject.toml @@ -1,15 +1,12 @@ [project] -name = "bm_sqlalchemy_declarative" +name = "pyperformance_bm_sqlalchemy_declarative" +requires-python = ">=3.8" dependencies = [ + "pyperf", "SQLAlchemy", ] - -# XXX This should be inherited from metabase. -dynamic = [ - "name", - "version", -] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -# "name" is set automatically. -metabase = ".." +name = "sqlalchemy_declarative" diff --git a/pyperformance/data-files/benchmarks/bm_sqlalchemy_imperative/pyproject.toml b/pyperformance/data-files/benchmarks/bm_sqlalchemy_imperative/pyproject.toml index eb5b89ef..1b9f75db 100644 --- a/pyperformance/data-files/benchmarks/bm_sqlalchemy_imperative/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_sqlalchemy_imperative/pyproject.toml @@ -1,15 +1,12 @@ [project] -name = "bm_sqlalchemy_imperative" +name = "pyperformance_bm_sqlalchemy_imperative" +requires-python = ">=3.8" dependencies = [ + "pyperf", "SQLAlchemy", ] - -# XXX This should be inherited from metabase. -dynamic = [ - "name", - "version", -] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -# "name" is set automatically. -metabase = ".." +name = "sqlalchemy_imperative" diff --git a/pyperformance/data-files/benchmarks/bm_sqlite_synth/pyproject.toml b/pyperformance/data-files/benchmarks/bm_sqlite_synth/pyproject.toml index bf447649..f406d737 100644 --- a/pyperformance/data-files/benchmarks/bm_sqlite_synth/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_sqlite_synth/pyproject.toml @@ -1,12 +1,9 @@ [project] -#name = "bm_" - -# XXX This should be inherited from metabase. -dynamic = [ - "name", - "version", -] +name = "pyperformance_bm_sqlite_synth" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -# "name" is set automatically. -metabase = ".." +name = "sqlite_synth" diff --git a/pyperformance/data-files/benchmarks/bm_sympy/pyproject.toml b/pyperformance/data-files/benchmarks/bm_sympy/pyproject.toml index 64683b2b..bcd03442 100644 --- a/pyperformance/data-files/benchmarks/bm_sympy/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_sympy/pyproject.toml @@ -1,15 +1,12 @@ [project] -name = "bm_sympy" +name = "pyperformance_bm_sympy" +requires-python = ">=3.8" dependencies = [ + "pyperf", "sympy", ] - -# XXX This should be inherited from metabase. -dynamic = [ - "name", - "version", -] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -# "name" is set automatically. -metabase = ".." +name = "sympy" diff --git a/pyperformance/data-files/benchmarks/bm_telco/pyproject.toml b/pyperformance/data-files/benchmarks/bm_telco/pyproject.toml index bf447649..194aa095 100644 --- a/pyperformance/data-files/benchmarks/bm_telco/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_telco/pyproject.toml @@ -1,12 +1,9 @@ [project] -#name = "bm_" - -# XXX This should be inherited from metabase. -dynamic = [ - "name", - "version", -] +name = "pyperformance_bm_telco" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -# "name" is set automatically. -metabase = ".." +name = "telco" diff --git a/pyperformance/data-files/benchmarks/bm_tornado_http/pyproject.toml b/pyperformance/data-files/benchmarks/bm_tornado_http/pyproject.toml index 1bb2b709..76833b86 100644 --- a/pyperformance/data-files/benchmarks/bm_tornado_http/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_tornado_http/pyproject.toml @@ -1,15 +1,12 @@ [project] -name = "bm_tornado_http" +name = "pyperformance_bm_tornado_http" +requires-python = ">=3.8" dependencies = [ + "pyperf", "tornado", ] - -# XXX This should be inherited from metabase. -dynamic = [ - "name", - "version", -] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -# "name" is set automatically. -metabase = ".." +name = "tornado_http" diff --git a/pyperformance/data-files/benchmarks/bm_unpack_sequence/pyproject.toml b/pyperformance/data-files/benchmarks/bm_unpack_sequence/pyproject.toml index bf447649..ddbd2559 100644 --- a/pyperformance/data-files/benchmarks/bm_unpack_sequence/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_unpack_sequence/pyproject.toml @@ -1,12 +1,9 @@ [project] -#name = "bm_" - -# XXX This should be inherited from metabase. -dynamic = [ - "name", - "version", -] +name = "pyperformance_bm_unpack_sequence" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -# "name" is set automatically. -metabase = ".." +name = "unpack_sequence" diff --git a/pyperformance/data-files/benchmarks/bm_xml_etree/pyproject.toml b/pyperformance/data-files/benchmarks/bm_xml_etree/pyproject.toml index bf447649..95076479 100644 --- a/pyperformance/data-files/benchmarks/bm_xml_etree/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_xml_etree/pyproject.toml @@ -1,12 +1,9 @@ [project] -#name = "bm_" - -# XXX This should be inherited from metabase. -dynamic = [ - "name", - "version", -] +name = "pyperformance_bm_xml_etree" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] [tool.pyperformance] -# "name" is set automatically. -metabase = ".." +name = "xml_etree" From 35e0c7afea4437aea2a52788ea8fcc6486aeeb9d Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 2 Nov 2021 15:41:36 -0600 Subject: [PATCH 054/126] Do not import _manifest in itself. --- pyperformance/_manifest.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyperformance/_manifest.py b/pyperformance/_manifest.py index 5883618a..6096ef0f 100644 --- a/pyperformance/_manifest.py +++ b/pyperformance/_manifest.py @@ -12,7 +12,7 @@ from . import __version__, DATA_DIR -from . import _manifest, _benchmark +from . import _benchmark DEFAULTS_DIR = os.path.join(DATA_DIR, 'benchmarks') @@ -53,7 +53,7 @@ def resolve(bench): bench.metafile = metafile return bench with open(filename) as infile: - return _manifest.parse_manifest(infile, resolve=resolve, filename=filename) + return parse_manifest(infile, resolve=resolve, filename=filename) def parse_manifest(text, *, resolve=None, filename=None): From 0fe268a584bf37f7ebc6373794c77bc015ef1033 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 2 Nov 2021 15:48:08 -0600 Subject: [PATCH 055/126] Drop version and origin from the manifest. --- pyperformance/_benchmark.py | 4 + pyperformance/_manifest.py | 10 +- pyperformance/data-files/benchmarks/MANIFEST | 102 +++++++++---------- 3 files changed, 59 insertions(+), 57 deletions(-) diff --git a/pyperformance/_benchmark.py b/pyperformance/_benchmark.py index e2b05c14..a472323c 100644 --- a/pyperformance/_benchmark.py +++ b/pyperformance/_benchmark.py @@ -48,6 +48,10 @@ def from_raw(cls, raw): else: raise ValueError(f'unsupported raw spec {raw!r}') + def __new__(cls, name, version=None, origin=None): + self = super().__new__(cls, name, version or None, origin or None) + return self + class Benchmark: diff --git a/pyperformance/_manifest.py b/pyperformance/_manifest.py index 6096ef0f..3706df94 100644 --- a/pyperformance/_manifest.py +++ b/pyperformance/_manifest.py @@ -18,7 +18,7 @@ DEFAULTS_DIR = os.path.join(DATA_DIR, 'benchmarks') DEFAULT_MANIFEST = os.path.join(DEFAULTS_DIR, 'MANIFEST') -BENCH_COLUMNS = ('name', 'version', 'origin', 'metafile') +BENCH_COLUMNS = ('name', 'metafile') BENCH_HEADER = '\t'.join(BENCH_COLUMNS) @@ -139,16 +139,14 @@ def _parse_benchmarks(lines, resolve, filename): localdir = os.path.dirname(filename) benchmarks = [] + version = origin = None for line in lines: try: - name, version, origin, metafile = (None if l == '-' else l + name, metafile = (None if l == '-' else l for l in line.split('\t')) except ValueError: raise ValueError(f'bad benchmark line {line!r}') - spec = _benchmark.BenchmarkSpec(name or None, - version or None, - origin or None, - ) + spec = _benchmark.BenchmarkSpec(name or None, version, origin) if metafile: metafile = _resolve_metafile(metafile, name, localdir) bench = _benchmark.Benchmark(spec, metafile) diff --git a/pyperformance/data-files/benchmarks/MANIFEST b/pyperformance/data-files/benchmarks/MANIFEST index afcdd121..a4684135 100644 --- a/pyperformance/data-files/benchmarks/MANIFEST +++ b/pyperformance/data-files/benchmarks/MANIFEST @@ -1,58 +1,58 @@ [benchmarks] -name version origin metafile -2to3 - - -chameleon - - -chaos - - -crypto_pyaes - - -deltablue - - -django_template - - -dulwich_log - - -fannkuch - - -float - - -genshi - - -go - - -hexiom - - +name metafile +2to3 +chameleon +chaos +crypto_pyaes +deltablue +django_template +dulwich_log +fannkuch +float +genshi +go +hexiom # FIXME: this benchmark fails with: # Unable to get the program 'hg' from the virtual environment -#hg_startup - - -#html5lib - - -json_dumps - - -json_loads - - -logging - - -mako - - -mdp - - -meteor_contest - - -nbody - - -nqueens - - -pathlib - - -pickle - - -pickle_dict - - -pickle_list - - -pickle_pure_python - - -pidigits - - -pyflate - - -python_startup - - -python_startup_no_site - - -raytrace - - -regex_compile - - -regex_dna - - -regex_effbot - - -regex_v8 - - -richards - - -scimark - - -spectral_norm - - -sqlalchemy_declarative - - -sqlalchemy_imperative - - -sqlite_synth - - -sympy - - -telco - - -tornado_http - - -unpack_sequence - - -unpickle - - -unpickle_list - - -unpickle_pure_python - - -xml_etree - - +#hg_startup +#html5lib +json_dumps +json_loads +logging +mako +mdp +meteor_contest +nbody +nqueens +pathlib +pickle +pickle_dict +pickle_list +pickle_pure_python +pidigits +pyflate +python_startup +python_startup_no_site +raytrace +regex_compile +regex_dna +regex_effbot +regex_v8 +richards +scimark +spectral_norm +sqlalchemy_declarative +sqlalchemy_imperative +sqlite_synth +sympy +telco +tornado_http +unpack_sequence +unpickle +unpickle_list +unpickle_pure_python +xml_etree [group default] From 698dec9cce4a6616c95c435f4267c4d4d0bebdf6 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 2 Nov 2021 15:52:08 -0600 Subject: [PATCH 056/126] Add a project-level symlink to the default benchmarks. --- benchmarks | 1 + 1 file changed, 1 insertion(+) create mode 120000 benchmarks diff --git a/benchmarks b/benchmarks new file mode 120000 index 00000000..5a5d7851 --- /dev/null +++ b/benchmarks @@ -0,0 +1 @@ +pyperformance/data-files/benchmarks \ No newline at end of file From f3c6c4b2ce589c7eda8640fe6d7f25a1a9bc2576 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 2 Nov 2021 16:13:13 -0600 Subject: [PATCH 057/126] Names starting with a digit. --- pyperformance/_benchmark_metadata.py | 2 +- pyperformance/_utils.py | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/pyperformance/_benchmark_metadata.py b/pyperformance/_benchmark_metadata.py index e39b867e..9c9eff05 100644 --- a/pyperformance/_benchmark_metadata.py +++ b/pyperformance/_benchmark_metadata.py @@ -194,7 +194,7 @@ def _resolve(project, tool, filename): def _resolve_value(field, value, rootdir): if field == 'name': - _utils.check_name(value) + _utils.check_name(value, allownumeric=True) elif field == 'metafile': assert False, 'unreachable' elif field == 'tags': diff --git a/pyperformance/_utils.py b/pyperformance/_utils.py index e71639dc..3518d34b 100644 --- a/pyperformance/_utils.py +++ b/pyperformance/_utils.py @@ -106,9 +106,11 @@ def run_command(command, env=None, *, hide_stderr=True): ####################################### # misc utils -def check_name(name, *, loose=False): +def check_name(name, *, loose=False, allownumeric=False): if not name or not isinstance(name, str): raise ValueError(f'bad name {name!r}') + if allownumeric: + name = f'_{name}' if not loose: if name.startswith('-'): raise ValueError(name) From 09878422d6aaf093331f55c6df70aac887c00905 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 2 Nov 2021 16:19:27 -0600 Subject: [PATCH 058/126] Drop the base metadata file. --- pyperformance/data-files/benchmarks/base.toml | 22 ------------------- 1 file changed, 22 deletions(-) delete mode 100644 pyperformance/data-files/benchmarks/base.toml diff --git a/pyperformance/data-files/benchmarks/base.toml b/pyperformance/data-files/benchmarks/base.toml deleted file mode 100644 index 1f094cc0..00000000 --- a/pyperformance/data-files/benchmarks/base.toml +++ /dev/null @@ -1,22 +0,0 @@ -[project] -#requires-python = ">=3.8" -dependencies = ["pyperf"] -urls = {repository = "https://github.com/python/pyperformance"} -dynamic = ["name", "version"] - -#[tool.pyperformance] -#metabase = "" - - - - - -[project] -name = "pyperformance_bm_" -requires-python = ">=3.8" -dependencies = ["pyperf"] -urls = {repository = "https://github.com/python/pyperformance"} -dynamic = ["version"] - -[tool.pyperformance] -name = "" From 2c99d0f4ec2136a2e14121260d14572a8a83e174 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 2 Nov 2021 16:27:17 -0600 Subject: [PATCH 059/126] All extra project fields even if there is a metabase. --- pyperformance/_benchmark_metadata.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pyperformance/_benchmark_metadata.py b/pyperformance/_benchmark_metadata.py index 9c9eff05..954cb52a 100644 --- a/pyperformance/_benchmark_metadata.py +++ b/pyperformance/_benchmark_metadata.py @@ -127,7 +127,8 @@ def _ensure_defaults(defaults, rootdir): return defaults -def _resolve_base(metabase, project, filename, defaults): +def _resolve_base(metabase, project, filename, defaults, *, + minimalwithbase=False): rootdir, basename = os.path.split(filename) if not metabase: @@ -142,7 +143,7 @@ def _resolve_base(metabase, project, filename, defaults): if not os.path.isfile(metabase): return None, None - if project is not None: + if project is not None and minimalwithbase: unexpected = set(project) - {'name', 'dynamic', 'dependencies'} if unexpected: raise ValueError(f'[project] should be minimal if "metabase" is provided, got extra {sorted(unexpected)}') From 121a281465a76ee3cffe9fb2eba405b1d1a91163 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 2 Nov 2021 16:29:23 -0600 Subject: [PATCH 060/126] Fix a typo. --- .../data-files/benchmarks/bm_python_startup/pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyperformance/data-files/benchmarks/bm_python_startup/pyproject.toml b/pyperformance/data-files/benchmarks/bm_python_startup/pyproject.toml index b7a44930..a30d0964 100644 --- a/pyperformance/data-files/benchmarks/bm_python_startup/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_python_startup/pyproject.toml @@ -1,9 +1,9 @@ [project] -name = "pyperformance_bm_python_startup_" +name = "pyperformance_bm_python_startup" requires-python = ">=3.8" dependencies = ["pyperf"] urls = {repository = "https://github.com/python/pyperformance"} dynamic = ["version"] [tool.pyperformance] -name = "_python_startup" +name = "python_startup" From 0f9d202a87a6eefa7455645bb89751fafb667f95 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 2 Nov 2021 17:13:49 -0600 Subject: [PATCH 061/126] Allow specifying the supported groups in the manifest. --- pyperformance/_manifest.py | 8 ++++++-- pyperformance/cli_run.py | 2 +- pyperformance/data-files/benchmarks/MANIFEST | 10 ++++++++++ 3 files changed, 17 insertions(+), 3 deletions(-) diff --git a/pyperformance/_manifest.py b/pyperformance/_manifest.py index 3706df94..56e0318a 100644 --- a/pyperformance/_manifest.py +++ b/pyperformance/_manifest.py @@ -12,7 +12,7 @@ from . import __version__, DATA_DIR -from . import _benchmark +from . import _benchmark, _utils DEFAULTS_DIR = os.path.join(DATA_DIR, 'benchmarks') @@ -72,6 +72,10 @@ def parse_manifest(text, *, resolve=None, filename=None): benchmarks = _parse_benchmarks(seclines, resolve, filename) elif benchmarks is None: raise ValueError('invalid manifest file, expected "benchmarks" section') + elif section == 'groups': + for group in seclines: + _utils.check_name(group) + groups.setdefault(group, None) elif section.startswith('group '): _, _, group = section.partition(' ') groups[group] = _parse_group(group, seclines, benchmarks) @@ -197,7 +201,7 @@ def _parse_group(name, lines, benchmarks): def _check_groups(groups): for group, benchmarks in groups.items(): - for bench in benchmarks: + for bench in benchmarks or (): if not isinstance(bench, str): continue elif bench not in groups: diff --git a/pyperformance/cli_run.py b/pyperformance/cli_run.py index 0b4eda11..f3191cb4 100644 --- a/pyperformance/cli_run.py +++ b/pyperformance/cli_run.py @@ -59,7 +59,7 @@ def cmd_list_groups(manifest): all_benchmarks = set(manifest.benchmarks) for group, specs in sorted(manifest.groups.items()): - known = set(specs) & all_benchmarks + known = set(specs or ()) & all_benchmarks if not known: # skip empty groups continue diff --git a/pyperformance/data-files/benchmarks/MANIFEST b/pyperformance/data-files/benchmarks/MANIFEST index a4684135..a7b4da4f 100644 --- a/pyperformance/data-files/benchmarks/MANIFEST +++ b/pyperformance/data-files/benchmarks/MANIFEST @@ -55,6 +55,15 @@ unpickle_pure_python xml_etree +[groups] +startup +regex +serialize +apps +math +template + + [group default] 2to3 chameleon @@ -74,6 +83,7 @@ json_dumps json_loads logging mako +#mdp meteor_contest nbody nqueens From 3d19edf03176c14bd69439f202d2b773cd7faded Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 2 Nov 2021 17:14:55 -0600 Subject: [PATCH 062/126] Add tags to all the benchmarks. --- pyperformance/data-files/benchmarks/bm_2to3/pyproject.toml | 1 + pyperformance/data-files/benchmarks/bm_chameleon/pyproject.toml | 1 + .../data-files/benchmarks/bm_django_template/pyproject.toml | 1 + pyperformance/data-files/benchmarks/bm_float/pyproject.toml | 1 + pyperformance/data-files/benchmarks/bm_genshi/pyproject.toml | 1 + pyperformance/data-files/benchmarks/bm_hg_startup/pyproject.toml | 1 + pyperformance/data-files/benchmarks/bm_html5lib/pyproject.toml | 1 + pyperformance/data-files/benchmarks/bm_json_dumps/pyproject.toml | 1 + pyperformance/data-files/benchmarks/bm_json_loads/pyproject.toml | 1 + pyperformance/data-files/benchmarks/bm_mako/pyproject.toml | 1 + pyperformance/data-files/benchmarks/bm_nbody/pyproject.toml | 1 + .../data-files/benchmarks/bm_pickle/bm_pickle_dict.toml | 1 + .../data-files/benchmarks/bm_pickle/bm_pickle_list.toml | 1 + .../data-files/benchmarks/bm_pickle/bm_pickle_pure_python.toml | 1 + pyperformance/data-files/benchmarks/bm_pickle/bm_unpickle.toml | 1 + .../data-files/benchmarks/bm_pickle/bm_unpickle_list.toml | 1 + .../data-files/benchmarks/bm_pickle/bm_unpickle_pure_python.toml | 1 + pyperformance/data-files/benchmarks/bm_pickle/pyproject.toml | 1 + pyperformance/data-files/benchmarks/bm_pidigits/pyproject.toml | 1 + .../benchmarks/bm_python_startup/bm_python_startup_no_site.toml | 1 + .../data-files/benchmarks/bm_python_startup/pyproject.toml | 1 + .../data-files/benchmarks/bm_regex_compile/pyproject.toml | 1 + pyperformance/data-files/benchmarks/bm_regex_dna/pyproject.toml | 1 + .../data-files/benchmarks/bm_regex_effbot/pyproject.toml | 1 + pyperformance/data-files/benchmarks/bm_regex_v8/pyproject.toml | 1 + .../data-files/benchmarks/bm_tornado_http/pyproject.toml | 1 + pyperformance/data-files/benchmarks/bm_xml_etree/pyproject.toml | 1 + 27 files changed, 27 insertions(+) diff --git a/pyperformance/data-files/benchmarks/bm_2to3/pyproject.toml b/pyperformance/data-files/benchmarks/bm_2to3/pyproject.toml index 2773d777..f35eb568 100644 --- a/pyperformance/data-files/benchmarks/bm_2to3/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_2to3/pyproject.toml @@ -7,3 +7,4 @@ dynamic = ["version"] [tool.pyperformance] name = "2to3" +tags = "apps" diff --git a/pyperformance/data-files/benchmarks/bm_chameleon/pyproject.toml b/pyperformance/data-files/benchmarks/bm_chameleon/pyproject.toml index 279264d9..b9dbd16d 100644 --- a/pyperformance/data-files/benchmarks/bm_chameleon/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_chameleon/pyproject.toml @@ -10,3 +10,4 @@ dynamic = ["version"] [tool.pyperformance] name = "chameleon" +tags = "apps" diff --git a/pyperformance/data-files/benchmarks/bm_django_template/pyproject.toml b/pyperformance/data-files/benchmarks/bm_django_template/pyproject.toml index 1aece5b6..0b66d9d0 100644 --- a/pyperformance/data-files/benchmarks/bm_django_template/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_django_template/pyproject.toml @@ -10,3 +10,4 @@ dynamic = ["version"] [tool.pyperformance] name = "django_template" +tags = "template" diff --git a/pyperformance/data-files/benchmarks/bm_float/pyproject.toml b/pyperformance/data-files/benchmarks/bm_float/pyproject.toml index 2d02da20..fd0133a7 100644 --- a/pyperformance/data-files/benchmarks/bm_float/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_float/pyproject.toml @@ -7,3 +7,4 @@ dynamic = ["version"] [tool.pyperformance] name = "float" +tags = "math" diff --git a/pyperformance/data-files/benchmarks/bm_genshi/pyproject.toml b/pyperformance/data-files/benchmarks/bm_genshi/pyproject.toml index 5f9f59df..fbba40df 100644 --- a/pyperformance/data-files/benchmarks/bm_genshi/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_genshi/pyproject.toml @@ -10,3 +10,4 @@ dynamic = ["version"] [tool.pyperformance] name = "genshi" +tags = "template" diff --git a/pyperformance/data-files/benchmarks/bm_hg_startup/pyproject.toml b/pyperformance/data-files/benchmarks/bm_hg_startup/pyproject.toml index 3c34a6c0..74d67f3e 100644 --- a/pyperformance/data-files/benchmarks/bm_hg_startup/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_hg_startup/pyproject.toml @@ -10,3 +10,4 @@ dynamic = ["version"] [tool.pyperformance] name = "hg_startup" +tags = "startup" diff --git a/pyperformance/data-files/benchmarks/bm_html5lib/pyproject.toml b/pyperformance/data-files/benchmarks/bm_html5lib/pyproject.toml index 9ba73187..3bd96abd 100644 --- a/pyperformance/data-files/benchmarks/bm_html5lib/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_html5lib/pyproject.toml @@ -10,3 +10,4 @@ dynamic = ["version"] [tool.pyperformance] name = "html5lib" +tags = "apps" diff --git a/pyperformance/data-files/benchmarks/bm_json_dumps/pyproject.toml b/pyperformance/data-files/benchmarks/bm_json_dumps/pyproject.toml index 2a7b3bab..b292fcd7 100644 --- a/pyperformance/data-files/benchmarks/bm_json_dumps/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_json_dumps/pyproject.toml @@ -7,3 +7,4 @@ dynamic = ["version"] [tool.pyperformance] name = "json_dumps" +tags = "serialize" diff --git a/pyperformance/data-files/benchmarks/bm_json_loads/pyproject.toml b/pyperformance/data-files/benchmarks/bm_json_loads/pyproject.toml index 4da14740..18c73fda 100644 --- a/pyperformance/data-files/benchmarks/bm_json_loads/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_json_loads/pyproject.toml @@ -7,3 +7,4 @@ dynamic = ["version"] [tool.pyperformance] name = "json_loads" +tags = "serialize" diff --git a/pyperformance/data-files/benchmarks/bm_mako/pyproject.toml b/pyperformance/data-files/benchmarks/bm_mako/pyproject.toml index dc451fb0..80e1abce 100644 --- a/pyperformance/data-files/benchmarks/bm_mako/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_mako/pyproject.toml @@ -10,3 +10,4 @@ dynamic = ["version"] [tool.pyperformance] name = "mako" +tags = "template" diff --git a/pyperformance/data-files/benchmarks/bm_nbody/pyproject.toml b/pyperformance/data-files/benchmarks/bm_nbody/pyproject.toml index 3af10dc1..546c300a 100644 --- a/pyperformance/data-files/benchmarks/bm_nbody/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_nbody/pyproject.toml @@ -7,3 +7,4 @@ dynamic = ["version"] [tool.pyperformance] name = "nbody" +tags = "math" diff --git a/pyperformance/data-files/benchmarks/bm_pickle/bm_pickle_dict.toml b/pyperformance/data-files/benchmarks/bm_pickle/bm_pickle_dict.toml index 8e825eca..2a87c920 100644 --- a/pyperformance/data-files/benchmarks/bm_pickle/bm_pickle_dict.toml +++ b/pyperformance/data-files/benchmarks/bm_pickle/bm_pickle_dict.toml @@ -7,4 +7,5 @@ dynamic = ["version"] [tool.pyperformance] name = "pickle_dict" +tags = "serialize" extra_opts = ["pickle_dict"] diff --git a/pyperformance/data-files/benchmarks/bm_pickle/bm_pickle_list.toml b/pyperformance/data-files/benchmarks/bm_pickle/bm_pickle_list.toml index 48599614..ab8a3618 100644 --- a/pyperformance/data-files/benchmarks/bm_pickle/bm_pickle_list.toml +++ b/pyperformance/data-files/benchmarks/bm_pickle/bm_pickle_list.toml @@ -7,4 +7,5 @@ dynamic = ["version"] [tool.pyperformance] name = "pickle_list" +tags = "serialize" extra_opts = ["pickle_list"] diff --git a/pyperformance/data-files/benchmarks/bm_pickle/bm_pickle_pure_python.toml b/pyperformance/data-files/benchmarks/bm_pickle/bm_pickle_pure_python.toml index 1bfffb28..94288918 100644 --- a/pyperformance/data-files/benchmarks/bm_pickle/bm_pickle_pure_python.toml +++ b/pyperformance/data-files/benchmarks/bm_pickle/bm_pickle_pure_python.toml @@ -7,4 +7,5 @@ dynamic = ["version"] [tool.pyperformance] name = "pickle_pure_python" +tags = "serialize" extra_opts = ["--pure-python", "pickle"] diff --git a/pyperformance/data-files/benchmarks/bm_pickle/bm_unpickle.toml b/pyperformance/data-files/benchmarks/bm_pickle/bm_unpickle.toml index f2a0f226..959609d0 100644 --- a/pyperformance/data-files/benchmarks/bm_pickle/bm_unpickle.toml +++ b/pyperformance/data-files/benchmarks/bm_pickle/bm_unpickle.toml @@ -7,4 +7,5 @@ dynamic = ["version"] [tool.pyperformance] name = "unpickle" +tags = "serialize" extra_opts = ["unpickle"] diff --git a/pyperformance/data-files/benchmarks/bm_pickle/bm_unpickle_list.toml b/pyperformance/data-files/benchmarks/bm_pickle/bm_unpickle_list.toml index d90f5e3e..b5eb4da3 100644 --- a/pyperformance/data-files/benchmarks/bm_pickle/bm_unpickle_list.toml +++ b/pyperformance/data-files/benchmarks/bm_pickle/bm_unpickle_list.toml @@ -7,4 +7,5 @@ dynamic = ["version"] [tool.pyperformance] name = "unpickle_list" +tags = "serialize" extra_opts = ["unpickle_list"] diff --git a/pyperformance/data-files/benchmarks/bm_pickle/bm_unpickle_pure_python.toml b/pyperformance/data-files/benchmarks/bm_pickle/bm_unpickle_pure_python.toml index 9035e619..af1a2e7c 100644 --- a/pyperformance/data-files/benchmarks/bm_pickle/bm_unpickle_pure_python.toml +++ b/pyperformance/data-files/benchmarks/bm_pickle/bm_unpickle_pure_python.toml @@ -7,4 +7,5 @@ dynamic = ["version"] [tool.pyperformance] name = "unpickle_pure_python" +tags = "serialize" extra_opts = ["--pure-python", "unpickle"] diff --git a/pyperformance/data-files/benchmarks/bm_pickle/pyproject.toml b/pyperformance/data-files/benchmarks/bm_pickle/pyproject.toml index d41e515a..87bc6ab6 100644 --- a/pyperformance/data-files/benchmarks/bm_pickle/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_pickle/pyproject.toml @@ -7,4 +7,5 @@ dynamic = ["version"] [tool.pyperformance] name = "pickle" +tags = "serialize" extra_opts = ["pickle"] diff --git a/pyperformance/data-files/benchmarks/bm_pidigits/pyproject.toml b/pyperformance/data-files/benchmarks/bm_pidigits/pyproject.toml index d67fb916..8fef04a5 100644 --- a/pyperformance/data-files/benchmarks/bm_pidigits/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_pidigits/pyproject.toml @@ -7,3 +7,4 @@ dynamic = ["version"] [tool.pyperformance] name = "pidigits" +tags = "math" diff --git a/pyperformance/data-files/benchmarks/bm_python_startup/bm_python_startup_no_site.toml b/pyperformance/data-files/benchmarks/bm_python_startup/bm_python_startup_no_site.toml index 94f11834..13dd29f0 100644 --- a/pyperformance/data-files/benchmarks/bm_python_startup/bm_python_startup_no_site.toml +++ b/pyperformance/data-files/benchmarks/bm_python_startup/bm_python_startup_no_site.toml @@ -8,3 +8,4 @@ dynamic = ["version"] [tool.pyperformance] name = "python_startup_no_site" extra_opts = ["--no-site"] +tags = "startup" diff --git a/pyperformance/data-files/benchmarks/bm_python_startup/pyproject.toml b/pyperformance/data-files/benchmarks/bm_python_startup/pyproject.toml index a30d0964..1e55ace1 100644 --- a/pyperformance/data-files/benchmarks/bm_python_startup/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_python_startup/pyproject.toml @@ -7,3 +7,4 @@ dynamic = ["version"] [tool.pyperformance] name = "python_startup" +tags = "startup" diff --git a/pyperformance/data-files/benchmarks/bm_regex_compile/pyproject.toml b/pyperformance/data-files/benchmarks/bm_regex_compile/pyproject.toml index bace2300..386df534 100644 --- a/pyperformance/data-files/benchmarks/bm_regex_compile/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_regex_compile/pyproject.toml @@ -7,3 +7,4 @@ dynamic = ["version"] [tool.pyperformance] name = "regex_compile" +tags = "regex" diff --git a/pyperformance/data-files/benchmarks/bm_regex_dna/pyproject.toml b/pyperformance/data-files/benchmarks/bm_regex_dna/pyproject.toml index b672861b..33a40ca2 100644 --- a/pyperformance/data-files/benchmarks/bm_regex_dna/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_regex_dna/pyproject.toml @@ -7,3 +7,4 @@ dynamic = ["version"] [tool.pyperformance] name = "regex_dna" +tags = "regex" diff --git a/pyperformance/data-files/benchmarks/bm_regex_effbot/pyproject.toml b/pyperformance/data-files/benchmarks/bm_regex_effbot/pyproject.toml index 63992206..3d64e118 100644 --- a/pyperformance/data-files/benchmarks/bm_regex_effbot/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_regex_effbot/pyproject.toml @@ -7,3 +7,4 @@ dynamic = ["version"] [tool.pyperformance] name = "regex_effbot" +tags = "regex" diff --git a/pyperformance/data-files/benchmarks/bm_regex_v8/pyproject.toml b/pyperformance/data-files/benchmarks/bm_regex_v8/pyproject.toml index 56239603..86f70bb9 100644 --- a/pyperformance/data-files/benchmarks/bm_regex_v8/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_regex_v8/pyproject.toml @@ -7,3 +7,4 @@ dynamic = ["version"] [tool.pyperformance] name = "regex_v8" +tags = "regex" diff --git a/pyperformance/data-files/benchmarks/bm_tornado_http/pyproject.toml b/pyperformance/data-files/benchmarks/bm_tornado_http/pyproject.toml index 76833b86..c165b4fb 100644 --- a/pyperformance/data-files/benchmarks/bm_tornado_http/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_tornado_http/pyproject.toml @@ -10,3 +10,4 @@ dynamic = ["version"] [tool.pyperformance] name = "tornado_http" +tags = "apps" diff --git a/pyperformance/data-files/benchmarks/bm_xml_etree/pyproject.toml b/pyperformance/data-files/benchmarks/bm_xml_etree/pyproject.toml index 95076479..21feb611 100644 --- a/pyperformance/data-files/benchmarks/bm_xml_etree/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_xml_etree/pyproject.toml @@ -7,3 +7,4 @@ dynamic = ["version"] [tool.pyperformance] name = "xml_etree" +tags = "serialize" From 7270c760be3eddf10264885e15c16a7c9dba6695 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 2 Nov 2021 18:10:28 -0600 Subject: [PATCH 063/126] Use the tags to get the groups in the default manifest. --- pyperformance/_manifest.py | 22 +++++++- pyperformance/data-files/benchmarks/MANIFEST | 55 +++----------------- 2 files changed, 28 insertions(+), 49 deletions(-) diff --git a/pyperformance/_manifest.py b/pyperformance/_manifest.py index 56e0318a..26e6696c 100644 --- a/pyperformance/_manifest.py +++ b/pyperformance/_manifest.py @@ -66,7 +66,7 @@ def parse_manifest(text, *, resolve=None, filename=None): filename = getattr(text, 'name', None) benchmarks = None - groups = {} + groups = {'default': None} for section, seclines in _iter_sections(lines): if section == 'benchmarks': benchmarks = _parse_benchmarks(seclines, resolve, filename) @@ -80,6 +80,26 @@ def parse_manifest(text, *, resolve=None, filename=None): _, _, group = section.partition(' ') groups[group] = _parse_group(group, seclines, benchmarks) _check_groups(groups) + + if groups['default'] is None: + groups['default'] = [b.name for name in benchmarks or ()] + + # Fill in groups from benchmark tags. + tags = {} + for bench in benchmarks or (): + for tag in getattr(bench, 'tags', ()): + if tag in tags: + tags[tag].append(bench) + else: + tags[tag] = [bench] + tags.pop('default', None) # "default" is manifest-specific. + if list(groups) == ['default']: + groups.update(tags) + else: + for group in groups: + if groups[group] is None: + groups[group] = tags.get(group) + # XXX Update tags for each benchmark with member groups. return BenchmarksManifest(benchmarks, groups) diff --git a/pyperformance/data-files/benchmarks/MANIFEST b/pyperformance/data-files/benchmarks/MANIFEST index a7b4da4f..0c6c102a 100644 --- a/pyperformance/data-files/benchmarks/MANIFEST +++ b/pyperformance/data-files/benchmarks/MANIFEST @@ -55,13 +55,13 @@ unpickle_pure_python xml_etree -[groups] -startup -regex -serialize -apps -math -template +#[groups] +#startup +#regex +#serialize +#apps +#math +#template [group default] @@ -115,44 +115,3 @@ unpickle unpickle_list unpickle_pure_python xml_etree - - -[group startup] -python_startup -python_startup_no_site -#hg_startup - - -[group regex] -regex_v8 -regex_effbot -regex_compile -regex_dna - - -[group serialize] -pickle_pure_python -unpickle_pure_python # Not for Python 3 -pickle -unpickle -xml_etree -json_dumps -json_loads - - -[group apps] -2to3 -chameleon -#html5lib -tornado_http - - -[group math] -float -nbody -pidigits - - -[group template] -django_template -mako From efeb77ee3bf4400c0ff5b0594cce6fd79184febb Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 2 Nov 2021 18:17:19 -0600 Subject: [PATCH 064/126] Show the default group before the others. --- pyperformance/cli_run.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/pyperformance/cli_run.py b/pyperformance/cli_run.py index f3191cb4..6c53f4cd 100644 --- a/pyperformance/cli_run.py +++ b/pyperformance/cli_run.py @@ -58,7 +58,12 @@ def cmd_list(options, benchmarks): def cmd_list_groups(manifest): all_benchmarks = set(manifest.benchmarks) - for group, specs in sorted(manifest.groups.items()): + groups = sorted(manifest.groups) + groups.remove('all') + groups.remove('default') + groups[0:0] = ['all', 'default'] + for group in groups: + specs = manifest.groups[group] known = set(specs or ()) & all_benchmarks if not known: # skip empty groups From 3b3de1d2739341dd59c8bf349551f4ef6ce9ba5a Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 2 Nov 2021 18:23:56 -0600 Subject: [PATCH 065/126] Drop an outdated comment. --- pyperformance/_benchmark_selections.py | 1 - 1 file changed, 1 deletion(-) diff --git a/pyperformance/_benchmark_selections.py b/pyperformance/_benchmark_selections.py index c788641e..ec24a788 100644 --- a/pyperformance/_benchmark_selections.py +++ b/pyperformance/_benchmark_selections.py @@ -85,7 +85,6 @@ def _match_selection(manifest, kind, parsed, byname): # No match! The caller can handle this as they like. yield str(bench) elif kind == 'tag': - # XXX Instad, walk all benchmarks to check the "tags" field? groups = [] if callable(parsed): match_tag = parsed From 8ceb990bb22fbe8cc49359ff6bc2904a55b7c9e6 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 2 Nov 2021 18:36:37 -0600 Subject: [PATCH 066/126] Allow excluding benchmarks in a group. --- pyperformance/_manifest.py | 17 +++++++ pyperformance/data-files/benchmarks/MANIFEST | 51 +------------------- 2 files changed, 18 insertions(+), 50 deletions(-) diff --git a/pyperformance/_manifest.py b/pyperformance/_manifest.py index 26e6696c..e0a0b7d6 100644 --- a/pyperformance/_manifest.py +++ b/pyperformance/_manifest.py @@ -206,7 +206,23 @@ def _parse_group(name, lines, benchmarks): group = [] seen = set() + unresolved = 0 for line in lines: + if line.startswith('-'): + # Exclude a benchmark. + if unresolved: + raise NotImplementedError(line) + if not group: + group.extend(benchmarks) + excluded = line[1:] + _benchmark.check_name(excluded) + try: + bench = byname[excluded] + except KeyError: + raise NotImplementedError(line) + if bench in group: + group.remove(bench) + continue benchname = line _benchmark.check_name(benchname) if benchname in seen: @@ -216,6 +232,7 @@ def _parse_group(name, lines, benchmarks): else: # It may be a group. We check later. group.append(benchname) + unresolved += 1 return group diff --git a/pyperformance/data-files/benchmarks/MANIFEST b/pyperformance/data-files/benchmarks/MANIFEST index 0c6c102a..b791b173 100644 --- a/pyperformance/data-files/benchmarks/MANIFEST +++ b/pyperformance/data-files/benchmarks/MANIFEST @@ -65,53 +65,4 @@ xml_etree [group default] -2to3 -chameleon -chaos -crypto_pyaes -deltablue -django_template -dulwich_log -fannkuch -float -genshi -go -hexiom -#hg_startup -#html5lib -json_dumps -json_loads -logging -mako -#mdp -meteor_contest -nbody -nqueens -pathlib -pickle -pickle_dict -pickle_list -pickle_pure_python -pidigits -pyflate -python_startup -python_startup_no_site -raytrace -regex_compile -regex_dna -regex_effbot -regex_v8 -richards -scimark -spectral_norm -sqlalchemy_declarative -sqlalchemy_imperative -sqlite_synth -sympy -telco -tornado_http -unpack_sequence -unpickle -unpickle_list -unpickle_pure_python -xml_etree +-mdp From 0f108591e33f46547b8c1bdf258082f9ab79417a Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Wed, 3 Nov 2021 16:43:38 -0600 Subject: [PATCH 067/126] Finish _init_metadata(). --- pyperformance/_benchmark.py | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/pyperformance/_benchmark.py b/pyperformance/_benchmark.py index a472323c..21789092 100644 --- a/pyperformance/_benchmark.py +++ b/pyperformance/_benchmark.py @@ -111,22 +111,23 @@ def _get_rootdir(self): return self._rootdir def _init_metadata(self): - if self._metadata is not None: - raise NotImplementedError + #assert self._metadata is None + defaults = { + 'name': self.name, + 'version': self.version, + } + self._metadata, _ = _benchmark_metadata.load_metadata( + self.metafile, + defaults, + ) def _get_metadata_value(self, key, default): try: return self._metadata[key] except TypeError: - if self._metadata is None: - defaults = { - 'name': self.name, - 'version': self.version, - } - self._metadata, _ = _benchmark_metadata.load_metadata( - self.metafile, - defaults, - ) + if self._metadata is not None: + raise # re-raise + self._init_metadata() except KeyError: pass return self._metadata.setdefault(key, default) From 9e3124d7e5ba170d6f6e3e4477babaedc750275a Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Wed, 3 Nov 2021 16:46:46 -0600 Subject: [PATCH 068/126] metabase -> inherits --- pyperformance/_benchmark_metadata.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pyperformance/_benchmark_metadata.py b/pyperformance/_benchmark_metadata.py index 954cb52a..43a7f1a2 100644 --- a/pyperformance/_benchmark_metadata.py +++ b/pyperformance/_benchmark_metadata.py @@ -25,7 +25,7 @@ #'urls': '', } TOOL_FIELDS = { - #'metabase': None, + #'inherits': None, 'metafile': None, 'name': None, 'tags': None, @@ -69,7 +69,7 @@ def load_metadata(metafile, defaults=None): defaults = _ensure_defaults(defaults, rootdir) base, basefile = _resolve_base( - tool.get('metabase'), # XXX Pop it? + tool.get('inherits'), # XXX Pop it? project, filename, defaults, @@ -146,7 +146,7 @@ def _resolve_base(metabase, project, filename, defaults, *, if project is not None and minimalwithbase: unexpected = set(project) - {'name', 'dynamic', 'dependencies'} if unexpected: - raise ValueError(f'[project] should be minimal if "metabase" is provided, got extra {sorted(unexpected)}') + raise ValueError(f'[project] should be minimal if "inherits" is provided, got extra {sorted(unexpected)}') if metabase == '..': metabase = os.path.join( From 62f7c237e31609f2b40a1507827252f6b44da481 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Wed, 3 Nov 2021 16:48:02 -0600 Subject: [PATCH 069/126] Document the manifest and benchmark formats. --- BENCHMARKS_FORMAT.md | 323 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 323 insertions(+) create mode 100644 BENCHMARKS_FORMAT.md diff --git a/BENCHMARKS_FORMAT.md b/BENCHMARKS_FORMAT.md new file mode 100644 index 00000000..292d15ed --- /dev/null +++ b/BENCHMARKS_FORMAT.md @@ -0,0 +1,323 @@ +# The pyperformance File Formats + +`pyperformance` uses two file formats to identify benchmarks: + +* manifest - a set of benchmarks +- metadata - a single benchmark + +For each benchmark, there are two required files and several optional +ones. Those files are expected to be in a specific directory structure +(unless customized in the metadata). + +The structure (see below) is such that it's much easier to maintain +a benchmark (or set of benchmarks) on GitHub and distribute it on PyPI. +It also simplifies publishing a Python project's benchmarks. + +Benchmarks can inherit metadata from other metadata files. +This is useful for keeping common metadata for a set of benchmarks +(e.g. "version") in one file. Likewise, benchmarks for a Python +project can inherit metadata from the project's pyproject.toml. + +Sometimes a benchmark will have one or more variants that run using +the same script. Variants like this are supported by `pyperformance` +without requiring much extra effort. + + +## Benchmark Directory Structure + +Normally a benchmark is structured like this: + +``` +bm_NAME/ + data/ # if needed + requirements.txt # lock file, if any + pyproject.toml + run_benchmark.py +``` + +(Note the "bm\_" prefix on the directory name.) + +"pyproject.toml" holds the metadata. "run_benchmark.py" holds +the actual benchmark code. Both are necessary. + +`pyperformance` treats the metadata file as the fundamental source of +information about a benchmark. A manifest for a set of benchmarks is +effectively a mapping of names to metadata files. So a metadata file +is essential. It can be located anywhere on disk. However, if it +isn't located in the structure described above then the metadata must +identify where to find the other files. + +Other than that, only a benchmark script (e.g. "run_benchmark.py" above) +is required. All other files are optional. + +When a benchmark has variants, each has its own metadata file next to +the normal "pyproject.toml", named "bm_NAME.toml". (Note the "bm\_" prefix.) +The format of variant metadata files is exactly the same. `pyperformance` +treats them the same, except that the sibling "pyproject.toml" is +inherited by default). + + +## Manifest Files + +A manifest file identifies a set of benchmarks, as well as (optionally) +how they should be grouped. `pyperformance` uses the manifest to +determine which benchmarks are available to run (and thus which to run). + +A manifest normally looks like this: + +```` +[benchmarks] + +name metafile +bench1 somedir/bm_bench1/pyproject.toml +bench2 somedir/pyproject.toml +bench3 ../anotherdir +``` + +The "benchmarks" section is a table with rows of tab-separated-values. +The "name" value is how `pyperformance` will identify the benchmark. +The "metafile" value is where `pyperformance` will look for the +benchmark's metadata. If a metafile is a directory then it looks +for "pyproject.toml" in that directory. + + +### Benchmark Groups + +The other sections in the manifest file relate to grouping: + +``` +[benchmarks] + +name metafile +bench1 somedir/bm_bench1 +bench2 somedir/bm_bench2 +bench3 anotherdir/mybench.toml + +[groups] +tag1 +tag2 + +[group default] +bench2 +bench3 + +[group tricky] +bench2 +``` + +The "groups" section specifies available groups that may be identified +by benchmark tags (see about tags in the metadata section below). Any +other group sections in the manifest are automatically added to the list +of available groups. + +If no "default" group is specified then one is automatically added with +all benchmarks from the "benchmarks" section in it. If there is no +"groups" section and no individual group sections (other than "default") +then the set of all tags of the known benchmarks is treated as "groups". +A group named "all" as also automatically added which has all known +benchmarks in it. + +Benchmarks can be excluded from a group by using a `-` (minus) prefix. +Any benchmark alraedy in the list (at that point) that matches will be +dropped from the list. If the first entry in the section is an +exclusion then all known benchmarks are first added to the list +before the exclusion is applied. + +For example: + +``` +[benchmarks] + +name metafile +bench1 somedir/bm_bench1 +bench2 somedir/bm_bench2 +bench3 anotherdir/mybench.toml + +[group default] +-bench1 +``` + +This means by default only "bench2" and "bench3" are run. + + +### A Local Benchmark Suite + +Often a project will have more than one benchmark that it will treat +as a suite. `pyperformance` handles this without any extra work. + +In the dirctory holding the manifest file put all the benchmarks. Then +you will `` in the "metafile" column, like this: + +``` +[benchmarks] + +name metafile +bench1 +bench2 +bench3 +bench4 +bench5 +``` + +It will look for `DIR/bm_NAME/pyproject.toml`. + +If there are also variants, identify the main benchmark +in the "metafile" value, like this: + +``` +[benchmarks] + +name metafile +bench1 +bench2 +bench3 +variant1 +variant2 +``` + +`pyperformance` will look for `DIR/bm_BASE/bm_NAME.toml`, where "BASE" +is the part after "local:". + + +### A Project's Benchmark Suite + +A Python project can identify its benchmark suite by putting the path +to the manifest file in the project's top-level pyproject.toml. +Additional manifests can be identified as well. + +``` +[tool.pyperformance] +manifest = "..." +manifests = ["...", "..."] +``` + + +### Merging Manifests + +Note that the `pyperformance` CLI does not support passing multiple +manifests. Instead you must merge the files manually into another file +and pass that. + + +## Benchmark Metadata Files + +A benchmark's metadata file (usually pyproject.toml) follows the format +specified in [PEP 621](https://www.python.org/dev/peps/pep-0621) and +[PEP 518](https://www.python.org/dev/peps/pep-0518). So there are two +supported sections in the file: "project" and "tool.pyperformance". + +A typical metadata file will look something like this: + +``` +[project] +version = "0.9.1" +dependencies = ["pyperf"] +dynamic = ["name"] + +[tool.pyperformance] +name = "my_benchmark" +``` + +A highly detailed one might look like this: + +``` +[project] +name = "pyperformance_bm_json_dumps" +version = "0.9.1" +description = "A benchmark for json.dumps()" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "json_dumps" +tags = "serialize" +runscript = "bench.py" +datadir = ".data-files/extras" +extra_opts = ["--special"] +``` + + +### Inheritance + +For one benchmark to inherit from another (or from common metadata), +the "inherits" field is available: + +``` +[project] +dependencies = ["pyperf"] +dynamic = ["name", "version"] + +[tool.pyperformance] +name = "my_benchmark" +inherits = "../common.toml" +``` + +All values in either section of the inherited metadata are treated +as defaults, on top of which the current metadata is applied. In the +above example, for instance, a value for "version" in common.toml would +be used here. + +If the "inherits" value is a directory (even for "..") then +"base.toml" in that directory will be inherited. + +For variants, the base pyproject.toml is the default value for "inherits". + + +### Inferred Values + +In some situations, omitted values will be inferred from other available +data (even for required fields). + +* `project.name` <= `tool.pyperformance.name` +* `project.*` <= inherited metadata (except for "name" and "dynamic") +* `tool.pyperformance.name` <= metadata filename +* `tool.pyperformance.*` <= inherited metadata (except for "name" and "inherits") + +When the name is inferred from the filename for a regularly structured +benchmark, the "bm\_" prefix is removed from the benchmark's directory. +If it is a variant that prefix is removed from the metadata filename, +as well as the .toml suffix. + + +### The `[project]` Section + +| field | type | R | T | B | D | +|----------------------|-------|---|---|---|---| +| project.name | str | X | X | | | +| project.version | ver | X | | X | X | +| project.dependencies | [str] | | | X | | +| project.dynamic | [str] | | | | | + +"R": required +"T": inferred from the tool section +"B": inferred from the inherited metadata +"D": for default benchmarks, inferred from pyperformance + +"dynamic" is required by PEP 621 for when a field will be filled in +dynamically by the tool. This is especially important for required +fields. + +All other PEP 621 fields are optional (e.g. `requires-python = ">=3.8"`, +`{repository = "https://github.com/..."}`). + + +### The `[tool.pyperformance]` Section + +| field | type | R | B | F | +|-----------------|-------|---|---|---| +| tool.name | str | X | | X | +| tool.tags | [str] | | X | | +| tool.extra_opts | [str] | | X | | +| tool.inherits | file | | | | +| tool.runscript | file | | X | | +| tool.datadir | file | | X | | + +"R": required +"B": inferred from the inherited metadata +"F": inferred from filename + +* tags: optional list of names to group benchmarks +* extra_opts: optional list of args to pass to `tool.runscript` +* runscript: the benchmark script to use instead of run_benchmark.py. From 05bb86fb2e383dc731f35036b5078a531c8ef71c Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Fri, 5 Nov 2021 18:04:08 -0600 Subject: [PATCH 070/126] Fix some typos. --- pyperformance/_benchmark.py | 2 +- pyperformance/_manifest.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pyperformance/_benchmark.py b/pyperformance/_benchmark.py index 21789092..3bfb83c7 100644 --- a/pyperformance/_benchmark.py +++ b/pyperformance/_benchmark.py @@ -68,7 +68,7 @@ def __init__(self, spec, metafile): self.metafile = metafile def __repr__(self): - return f'{type(self).__name__}(spec={self.spec}, metafile={self.metafile})' + return f'{type(self).__name__}(spec={self.spec!r}, metafile={self.metafile!r})' def __hash__(self): return hash(self.spec) diff --git a/pyperformance/_manifest.py b/pyperformance/_manifest.py index e0a0b7d6..d4951864 100644 --- a/pyperformance/_manifest.py +++ b/pyperformance/_manifest.py @@ -82,7 +82,7 @@ def parse_manifest(text, *, resolve=None, filename=None): _check_groups(groups) if groups['default'] is None: - groups['default'] = [b.name for name in benchmarks or ()] + groups['default'] = [b.name for b in benchmarks or ()] # Fill in groups from benchmark tags. tags = {} @@ -183,7 +183,7 @@ def _parse_benchmarks(lines, resolve, filename): def _resolve_metafile(metafile, name, localdir): - if not metafile.startswith('<') and not metafile.endswith('>'): + if not metafile.startswith('<') or not metafile.endswith('>'): return metafile directive, _, extra = metafile[1:-1].partition(':') From 318720f41020b3922f523e4f92a7c3f6cf3acc1a Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Fri, 5 Nov 2021 18:05:38 -0600 Subject: [PATCH 071/126] Print some diagnostic info on error. --- pyperformance/_benchmark_metadata.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/pyperformance/_benchmark_metadata.py b/pyperformance/_benchmark_metadata.py index 43a7f1a2..94273f67 100644 --- a/pyperformance/_benchmark_metadata.py +++ b/pyperformance/_benchmark_metadata.py @@ -80,6 +80,17 @@ def load_metadata(metafile, defaults=None): if not merged.get('name'): raise ValueError('missing benchmark name') if not merged.get('version'): + print('====================') + from pprint import pprint + print('top:') + pprint(top) + print('base:') + pprint(base) + print('defaults:') + pprint(defaults) + print('merged:') + pprint(merged) + print('====================') raise ValueError('missing benchmark version') metafile = merged.pop('metafile') From b51042b20d4d9061376f37bf4933311fe5264376 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Fri, 5 Nov 2021 18:07:16 -0600 Subject: [PATCH 072/126] Fall back to metadata for version. --- pyperformance/_benchmark.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/pyperformance/_benchmark.py b/pyperformance/_benchmark.py index 3bfb83c7..cd51dfe6 100644 --- a/pyperformance/_benchmark.py +++ b/pyperformance/_benchmark.py @@ -96,7 +96,10 @@ def name(self): @property def version(self): - return self.spec.version + version = self.spec.version + if version is None: + version = self._get_metadata_value('version', None) + return version @property def origin(self): @@ -113,8 +116,8 @@ def _get_rootdir(self): def _init_metadata(self): #assert self._metadata is None defaults = { - 'name': self.name, - 'version': self.version, + 'name': self.spec.name, + 'version': self.spec.version, } self._metadata, _ = _benchmark_metadata.load_metadata( self.metafile, From 5eeae8e4dd2c8078c395c9e3ea3a9f56a69252a7 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Fri, 5 Nov 2021 18:09:46 -0600 Subject: [PATCH 073/126] Add benchmarks to the default group instead of names. --- pyperformance/_manifest.py | 2 +- pyperformance/cli.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/pyperformance/_manifest.py b/pyperformance/_manifest.py index d4951864..5faa459f 100644 --- a/pyperformance/_manifest.py +++ b/pyperformance/_manifest.py @@ -82,7 +82,7 @@ def parse_manifest(text, *, resolve=None, filename=None): _check_groups(groups) if groups['default'] is None: - groups['default'] = [b.name for b in benchmarks or ()] + groups['default'] = list(benchmarks or ()) # Fill in groups from benchmark tags. tags = {} diff --git a/pyperformance/cli.py b/pyperformance/cli.py index a23438a5..c2bc6a80 100644 --- a/pyperformance/cli.py +++ b/pyperformance/cli.py @@ -211,6 +211,7 @@ def _select_benchmarks(raw, manifest): def _main(): parser, options = parse_args() + manifest = benchmarks = None if hasattr(options, 'manifest'): # Load and update the manifest. manifest = _manifest.load_manifest(options.manifest) From 0dc395ae4d1c2b678a90e42ae83c3d191c98183e Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Fri, 5 Nov 2021 18:11:21 -0600 Subject: [PATCH 074/126] Install the requirements, even if the venv already exists. --- pyperformance/venv.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pyperformance/venv.py b/pyperformance/venv.py index 92a744f8..d0550de1 100644 --- a/pyperformance/venv.py +++ b/pyperformance/venv.py @@ -462,11 +462,12 @@ def _install_reqs(self): self.run_cmd(cmd) def create(self): + venv_path = self.get_path() if self.exists(): + print("Installing the virtual environment %s" % venv_path) + self._install_reqs() return - venv_path = self.get_path() - print("Creating the virtual environment %s" % venv_path) try: self._create_venv() From 6fb74036f0d03a88b38906ca246388e5e9faa30f Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Mon, 8 Nov 2021 09:20:16 -0700 Subject: [PATCH 075/126] Only re-install reqs for benchmark venvs. --- pyperformance/venv.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pyperformance/venv.py b/pyperformance/venv.py index d0550de1..48837419 100644 --- a/pyperformance/venv.py +++ b/pyperformance/venv.py @@ -464,8 +464,9 @@ def _install_reqs(self): def create(self): venv_path = self.get_path() if self.exists(): - print("Installing the virtual environment %s" % venv_path) - self._install_reqs() + if self.bench: + print("Installing the virtual environment %s" % venv_path) + self._install_reqs() return print("Creating the virtual environment %s" % venv_path) From 4979d5b3a77ae9378a7aa207a955835787837dcb Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Mon, 8 Nov 2021 20:36:19 -0700 Subject: [PATCH 076/126] Support an "includes" section in the manifest. --- BENCHMARKS_FORMAT.md | 14 +- pyperformance/_benchmark_selections.py | 4 +- pyperformance/_manifest.py | 473 +++++++++++++++++-------- pyperformance/cli.py | 4 +- pyperformance/cli_run.py | 6 +- 5 files changed, 337 insertions(+), 164 deletions(-) diff --git a/BENCHMARKS_FORMAT.md b/BENCHMARKS_FORMAT.md index 292d15ed..5d36de58 100644 --- a/BENCHMARKS_FORMAT.md +++ b/BENCHMARKS_FORMAT.md @@ -194,9 +194,17 @@ manifests = ["...", "..."] ### Merging Manifests -Note that the `pyperformance` CLI does not support passing multiple -manifests. Instead you must merge the files manually into another file -and pass that. +To combine manifests, use the `[includes]` section in the manifest: + +``` +[includes] +project1/benchmarks/MANIFEST +project2/benchmarks/MANIFEST + +``` + +Note that `` is the same as including the manifest file +for the default pyperformance benchmarks. ## Benchmark Metadata Files diff --git a/pyperformance/_benchmark_selections.py b/pyperformance/_benchmark_selections.py index ec24a788..f4659afd 100644 --- a/pyperformance/_benchmark_selections.py +++ b/pyperformance/_benchmark_selections.py @@ -91,12 +91,14 @@ def _match_selection(manifest, kind, parsed, byname): for group in manifest.groups: if match_tag(group): groups.append(group) + elif parsed in ('all', 'default'): + groups.append(parsed) elif parsed in manifest.groups: groups.append(parsed) else: raise ValueError(f'unsupported selection {parsed!r}') for group in groups: - yield from _manifest.expand_benchmark_groups(group, manifest.groups) + yield from manifest.resolve_group(group) elif kind == 'name': if callable(parsed): match_bench = parsed diff --git a/pyperformance/_manifest.py b/pyperformance/_manifest.py index 5faa459f..00dc79ee 100644 --- a/pyperformance/_manifest.py +++ b/pyperformance/_manifest.py @@ -3,7 +3,6 @@ 'BenchmarksManifest', 'load_manifest', 'parse_manifest', - 'expand_benchmark_groups', ] @@ -22,108 +21,210 @@ BENCH_HEADER = '\t'.join(BENCH_COLUMNS) -BenchmarksManifest = namedtuple('BenchmarksManifest', 'benchmarks groups') - - def load_manifest(filename, *, resolve=None): if not filename: filename = DEFAULT_MANIFEST - else: - filename = os.path.abspath(filename) - if resolve is None: - if filename == DEFAULT_MANIFEST: - def resolve(bench): - if isinstance(bench, _benchmark.Benchmark): - spec = bench.spec - else: - spec = bench - bench = _benchmark.Benchmark(spec, '') - bench.metafile = None - - if not spec.version: - spec = spec._replace(version=__version__) - if not spec.origin: - spec = spec._replace(origin='') - bench.spec = spec - - if not bench.metafile: - metafile = os.path.join(DEFAULTS_DIR, - f'bm_{bench.name}', - 'pyproject.toml') - bench.metafile = metafile - return bench - with open(filename) as infile: - return parse_manifest(infile, resolve=resolve, filename=filename) + sections = _parse_manifest_file(filename) + return BenchmarksManifest._from_sections(sections, resolve, filename) -def parse_manifest(text, *, resolve=None, filename=None): - if isinstance(text, str): - lines = text.splitlines() +def parse_manifest(lines, *, resolve=None, filename=None): + if isinstance(lines, str): + lines = lines.splitlines() else: - lines = iter(text) if not filename: # Try getting the filename from a file. - filename = getattr(text, 'name', None) + filename = getattr(lines, 'name', None) + sections = _parse_manifest(lines, filename) + return BenchmarksManifest._from_sections(sections, resolve, filename) - benchmarks = None - groups = {'default': None} - for section, seclines in _iter_sections(lines): - if section == 'benchmarks': - benchmarks = _parse_benchmarks(seclines, resolve, filename) - elif benchmarks is None: - raise ValueError('invalid manifest file, expected "benchmarks" section') - elif section == 'groups': - for group in seclines: - _utils.check_name(group) - groups.setdefault(group, None) - elif section.startswith('group '): - _, _, group = section.partition(' ') - groups[group] = _parse_group(group, seclines, benchmarks) - _check_groups(groups) - if groups['default'] is None: - groups['default'] = list(benchmarks or ()) - - # Fill in groups from benchmark tags. - tags = {} - for bench in benchmarks or (): - for tag in getattr(bench, 'tags', ()): - if tag in tags: - tags[tag].append(bench) - else: - tags[tag] = [bench] - tags.pop('default', None) # "default" is manifest-specific. - if list(groups) == ['default']: - groups.update(tags) +def resolve_default_benchmark(bench): + if isinstance(bench, _benchmark.Benchmark): + spec = bench.spec else: - for group in groups: - if groups[group] is None: - groups[group] = tags.get(group) - - # XXX Update tags for each benchmark with member groups. - return BenchmarksManifest(benchmarks, groups) + spec = bench + bench = _benchmark.Benchmark(spec, '') + bench.metafile = None + + if not spec.version: + spec = spec._replace(version=__version__) + if not spec.origin: + spec = spec._replace(origin='') + bench.spec = spec + + if not bench.metafile: + metafile = os.path.join(DEFAULTS_DIR, + f'bm_{bench.name}', + 'pyproject.toml') + bench.metafile = metafile + return bench + + +class BenchmarksManifest: + + @classmethod + def _from_sections(cls, sections, resolve=None, filename=None): + self = cls(filename=filename) + self._add_sections(sections, resolve) + return self + + def __init__(self, benchmarks=None, groups=None, filename=None): + self._raw_benchmarks = [] + # XXX Support disabling all groups (except all and default)? + self._raw_groups = {} + self._raw_filename = filename + self._byname = {} + self._groups = None + self._tags = None + + if benchmarks: + self._add_benchmarks(benchmarks) + if groups: + self._add_groups(groups) + + def __repr__(self): + args = (f'{n}={getattr(self, "_raw_" + n)}' + for n in ('benchmarks', 'groups', 'filename')) + return f'{type(self).__name__}({", ".join(args)})' + + @property + def benchmarks(self): + return list(self._byname.values()) + + @property + def groups(self): + names = self._custom_groups() + if not names: + names = set(self._get_tags()) + return names + + @property + def filename(self): + return self._raw_filename + + def _add_sections(self, sections, resolve): + filename = self._raw_filename + _resolve = resolve + if resolve is None and filename == DEFAULT_MANIFEST: + _resolve = default_resolve = resolve_default_benchmark + sections_seen = {filename: set()} + lastfile = None + for filename, section, data in sections: + if filename != lastfile: + _resolve = resolve + if _resolve is None and filename == DEFAULT_MANIFEST: + _resolve = resolve_default_benchmark + lastfile = filename + + if filename not in sections_seen: + sections_seen[filename] = {section} + elif section in sections_seen[filename]: + # For now each section can only show up once. + raise NotImplementedError((section, data)) + else: + sections_seen[filename].add(section) + + if section == 'includes': + pass + elif section == 'benchmarks': + entries = ((s, m, filename) for s, m in data) + self._add_benchmarks(entries, _resolve) + elif section == 'groups': + for name in data: + self._add_group(name, None) + elif section == 'group': + name, entries = data + self._add_group(name, entries) + else: + raise NotImplementedError((section, data)) + def _add_benchmarks(self, entries, resolve): + for spec, metafile, filename in entries: + # XXX Ignore duplicates? + self._add_benchmark(spec, metafile, resolve, filename) -def expand_benchmark_groups(bench, groups): - if isinstance(bench, str): - spec, metafile = _benchmark.parse_benchmark(bench) + def _add_benchmark(self, spec, metafile, resolve, filename): + if spec.name in self._raw_groups: + raise ValueError(f'a group and a benchmark have the same name ({spec.name})') if metafile: + if filename: + localdir = os.path.dirname(filename) + metafile = os.path.join(localdir, metafile) bench = _benchmark.Benchmark(spec, metafile) else: + metafile = None bench = spec - elif isinstance(bench, _benchmark.Benchmark): - spec = bench.spec - else: - spec = bench - - if not groups: - yield bench - elif bench.name not in groups: - yield bench - else: - benchmarks = groups[bench.name] - for bench in benchmarks or (): - yield from expand_benchmark_groups(bench, groups) + self._raw_benchmarks.append((spec, metafile, filename)) + if resolve is not None: + bench = resolve(bench) + self._byname[bench.name] = bench + self._groups = None # Force re-resolution. + self._tags = None # Force re-resolution. + + def _add_group(self, name, entries): + if name in self._byname: + raise ValueError(f'a group and a benchmark have the same name ({name})') + if name == 'all': + # XXX Emit a warning? + return + if entries: + raw = self._raw_groups.get(name) + if raw is None: + raw = self._raw_groups[name] = list(entries) if entries else None + elif entries is not None: + raw.extend(entries) + elif name in self._raw_groups: + return + else: + self._raw_groups[name] = None + self._groups = None # Force re-resolution. + + def _custom_groups(self): + return set(self._raw_groups) - {'all', 'default'} + + def _get_tags(self): + if self._tags is None: + self._tags = _get_tags(self._byname.values()) + self._tags.pop('all', None) # It is manifest-specific. + self._tags.pop('default', None) # It is manifest-specific. + return self._tags + + def _resolve_groups(self): + if self._groups is not None: + return self._groups + + raw = {} + for name, entries in self._raw_groups.items(): + if entries and entries[0][0] == '-': + entries = list(entries) + entries.insert(0, ('+', '')) + raw[name] = entries + self._groups = _resolve_groups(raw, self._byname) + return self._groups + + def resolve_group(self, name, *, fail=True): + if name == 'all': + benchmarks = self._byname.values() + elif name == 'default': + if 'default' not in self._raw_groups: + benchmarks = self._byname.values() + else: + groups = self._resolve_groups() + benchmarks = groups.get(name) + elif not self._custom_groups(): + benchmarks = self._get_tags().get(name) + if benchmarks is None and fail: + raise KeyError(name) + else: + groups = self._resolve_groups() + benchmarks = groups.get(name) + if not benchmarks: + if name in (set(self._raw_groups) - {'default'}): + benchmarks = self._get_tags().get(name, ()) + elif fail: + raise KeyError(name) + yield from benchmarks or () ####################################### @@ -153,93 +254,157 @@ def _iter_sections(lines): raise ValueError('invalid manifest file, no sections found') -def _parse_benchmarks(lines, resolve, filename): +def _parse_manifest_file(filename): + filename = os.path.abspath(filename) + with open(filename) as infile: + yield from _parse_manifest(infile, filename) + + +def _parse_manifest(lines, filename): + for section, seclines in _iter_sections(lines): + if section == 'includes': + yield filename, section, list(seclines) + for line in seclines: + if line == '': + line = DEFAULT_MANIFEST + yield from _parse_manifest_file(line) + elif section == 'benchmarks': + yield filename, section, list(_parse_benchmarks_section(seclines)) + elif section == 'groups': + yield filename, section, list(_parse_groups_section(seclines)) + elif section.startswith('group '): + section, _, group = section.partition(' ') + entries = list(_parse_group_section(seclines)) + yield filename, section, (group, entries) + else: + raise ValueError(f'unsupported section {section!r}') + + +def _parse_benchmarks_section(lines): if not lines: lines = [''] lines = iter(lines) if next(lines) != BENCH_HEADER: raise ValueError('invalid manifest file, expected benchmarks table header') - localdir = os.path.dirname(filename) - - benchmarks = [] version = origin = None for line in lines: try: name, metafile = (None if l == '-' else l - for l in line.split('\t')) + for l in line.split('\t')) except ValueError: raise ValueError(f'bad benchmark line {line!r}') spec = _benchmark.BenchmarkSpec(name or None, version, origin) - if metafile: - metafile = _resolve_metafile(metafile, name, localdir) - bench = _benchmark.Benchmark(spec, metafile) - else: - bench = spec - if resolve is not None: - bench = resolve(bench) - benchmarks.append(bench) - return benchmarks - - -def _resolve_metafile(metafile, name, localdir): - if not metafile.startswith('<') or not metafile.endswith('>'): - return metafile - - directive, _, extra = metafile[1:-1].partition(':') - if directive == 'local': - if extra: - rootdir = f'bm_{extra}' - basename = f'bm_{name}.toml' + metafile = _parse_metafile(metafile, name) + yield spec, metafile + + +def _parse_metafile(metafile, name): + if not metafile: + return None + elif metafile.startswith('<') and metafile.endswith('>'): + directive, _, extra = metafile[1:-1].partition(':') + if directive == 'local': + if extra: + rootdir = f'bm_{extra}' + basename = f'bm_{name}.toml' + else: + rootdir = f'bm_{name}' + basename = 'pyproject.toml' + # A relative path will be resolved against the manifset file. + return os.path.join(rootdir, basename) else: - rootdir = f'bm_{name}' - basename = 'pyproject.toml' - return os.path.join(localdir, rootdir, basename) + raise ValueError(f'unsupported metafile directive {metafile!r}') else: - raise ValueError(f'unsupported metafile directive {metafile!r}') + return os.path.abspath(metafile) + +def _parse_groups_section(lines): + for name in seclines: + _utils.check_name(name) + yield name -def _parse_group(name, lines, benchmarks): - byname = {b.name: b for b in benchmarks} - if name in byname: - raise ValueError(f'a group and a benchmark have the same name ({name})') - group = [] - seen = set() - unresolved = 0 +def _parse_group_section(lines): + yielded = False for line in lines: if line.startswith('-'): - # Exclude a benchmark. - if unresolved: - raise NotImplementedError(line) - if not group: - group.extend(benchmarks) - excluded = line[1:] - _benchmark.check_name(excluded) - try: - bench = byname[excluded] - except KeyError: - raise NotImplementedError(line) - if bench in group: - group.remove(bench) + # Exclude a benchmark or group. + op = '-' + name = line[1:] + elif line.startswith('+'): + op = '+' + name = line[1:] + else: + name = line + _benchmark.check_name(name) + yield op, name + yielded = True + + +def _get_tags(benchmarks): + # Fill in groups from benchmark tags. + tags = {} + for bench in benchmarks: + for tag in getattr(bench, 'tags', ()): + if tag in tags: + tags[tag].append(bench) + else: + tags[tag] = [bench] + return tags + + +def _resolve_groups(rawgroups, byname): + benchmarks = set(byname.values()) + tags = None + groups = { + 'all': list(benchmarks), + } + unresolved = {} + for groupname, entries in rawgroups.items(): + if groupname == 'all': continue - benchname = line - _benchmark.check_name(benchname) - if benchname in seen: + if not entries: + if groupname == 'default': + groups[groupname] = list(benchmarks) + else: + if tags is None: + tags = _get_tags(benchmarks) + groups[groupname] = tags.get(groupname, ()) continue - if benchname in byname: - group.append(byname[benchname]) - else: - # It may be a group. We check later. - group.append(benchname) - unresolved += 1 - return group - - -def _check_groups(groups): - for group, benchmarks in groups.items(): - for bench in benchmarks or (): - if not isinstance(bench, str): - continue - elif bench not in groups: - raise ValueError(f'unknown benchmark {name!r} (in group {group!r})') + assert entries[0][0] == '+', (groupname, entries) + unresolved[groupname] = names = set() + for op, name in entries: + if op == '+': + if name == '': + names.update(byname) + elif name in byname or name in rawgroups: + names.add(name) + elif op == '-': + if name == '': + raise NotImplementedError((groupname, op, name)) + elif name in byname or name in rawgroups: + if name in names: + names.remove(name) + else: + raise NotImplementedError((groupname, op, name)) + while unresolved: + for groupname, names in list(unresolved.items()): + benchmarks = set() + for name in names: + if name in byname: + benchmarks.add(byname[name]) + elif name in groups: + benchmarks.update(groups[name]) + names.remove(name) + elif name == groupname: + names.remove(name) + break + else: # name in unresolved + names.remove(name) + names.extend(unresolved[name]) + break + else: + groups[groupname] = benchmarks + del unresolved[groupname] + return groups diff --git a/pyperformance/cli.py b/pyperformance/cli.py index c2bc6a80..157b6a38 100644 --- a/pyperformance/cli.py +++ b/pyperformance/cli.py @@ -215,8 +215,8 @@ def _main(): if hasattr(options, 'manifest'): # Load and update the manifest. manifest = _manifest.load_manifest(options.manifest) - if 'all' not in manifest.groups: - manifest.groups['all'] = list(manifest.benchmarks) +# if 'all' not in manifest.groups: +# manifest.groups['all'] = list(manifest.benchmarks) if hasattr(options, 'benchmarks'): benchmarks = _select_benchmarks(options.benchmarks, manifest) diff --git a/pyperformance/cli_run.py b/pyperformance/cli_run.py index 6c53f4cd..65748566 100644 --- a/pyperformance/cli_run.py +++ b/pyperformance/cli_run.py @@ -59,12 +59,10 @@ def cmd_list_groups(manifest): all_benchmarks = set(manifest.benchmarks) groups = sorted(manifest.groups) - groups.remove('all') - groups.remove('default') groups[0:0] = ['all', 'default'] for group in groups: - specs = manifest.groups[group] - known = set(specs or ()) & all_benchmarks + specs = list(manifest.resolve_group(group)) + known = set(specs) & all_benchmarks if not known: # skip empty groups continue From 7f7b5716ce558ba083f0a06d5e506793596ad815 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Mon, 8 Nov 2021 20:43:43 -0700 Subject: [PATCH 077/126] doc fixes --- BENCHMARKS_FORMAT.md | 42 +++++++++++++++++++++++------------------- 1 file changed, 23 insertions(+), 19 deletions(-) diff --git a/BENCHMARKS_FORMAT.md b/BENCHMARKS_FORMAT.md index 5d36de58..ee4dd558 100644 --- a/BENCHMARKS_FORMAT.md +++ b/BENCHMARKS_FORMAT.md @@ -9,9 +9,10 @@ For each benchmark, there are two required files and several optional ones. Those files are expected to be in a specific directory structure (unless customized in the metadata). -The structure (see below) is such that it's much easier to maintain +The structure (see below) is such that it's easy to maintain a benchmark (or set of benchmarks) on GitHub and distribute it on PyPI. It also simplifies publishing a Python project's benchmarks. +The alternative is pointing people at a repo. Benchmarks can inherit metadata from other metadata files. This is useful for keeping common metadata for a set of benchmarks @@ -54,18 +55,19 @@ When a benchmark has variants, each has its own metadata file next to the normal "pyproject.toml", named "bm_NAME.toml". (Note the "bm\_" prefix.) The format of variant metadata files is exactly the same. `pyperformance` treats them the same, except that the sibling "pyproject.toml" is -inherited by default). +inherited by default. ## Manifest Files A manifest file identifies a set of benchmarks, as well as (optionally) how they should be grouped. `pyperformance` uses the manifest to -determine which benchmarks are available to run (and thus which to run). +determine which benchmarks are available to run (and thus which to run +by default). A manifest normally looks like this: -```` +``` [benchmarks] name metafile @@ -140,13 +142,28 @@ bench3 anotherdir/mybench.toml This means by default only "bench2" and "bench3" are run. +### Merging Manifests + +To combine manifests, use the `[includes]` section in the manifest: + +``` +[includes] +project1/benchmarks/MANIFEST +project2/benchmarks/MANIFEST + +``` + +Note that `` is the same as including the manifest file +for the default pyperformance benchmarks. + + ### A Local Benchmark Suite Often a project will have more than one benchmark that it will treat as a suite. `pyperformance` handles this without any extra work. In the dirctory holding the manifest file put all the benchmarks. Then -you will `` in the "metafile" column, like this: +put `` in the "metafile" column, like this: ``` [benchmarks] @@ -191,20 +208,7 @@ manifest = "..." manifests = ["...", "..."] ``` - -### Merging Manifests - -To combine manifests, use the `[includes]` section in the manifest: - -``` -[includes] -project1/benchmarks/MANIFEST -project2/benchmarks/MANIFEST - -``` - -Note that `` is the same as including the manifest file -for the default pyperformance benchmarks. +(Reminder: that is the pyproject.toml, not the manifest file.) ## Benchmark Metadata Files From 0addec2bb1be2e1f832ac1178f0b895a9bb0dadb Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 9 Nov 2021 09:21:01 -0700 Subject: [PATCH 078/126] "all" and "default" are always valid groups. --- pyperformance/_manifest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyperformance/_manifest.py b/pyperformance/_manifest.py index 00dc79ee..84b4465a 100644 --- a/pyperformance/_manifest.py +++ b/pyperformance/_manifest.py @@ -97,7 +97,7 @@ def groups(self): names = self._custom_groups() if not names: names = set(self._get_tags()) - return names + return names | {'all', 'default'} @property def filename(self): From 57e70708f7696227b936647cc86965974da9f69f Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 9 Nov 2021 10:44:14 -0700 Subject: [PATCH 079/126] Do not import pyperformance._manifest unless already installed. --- pyperformance/cli.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pyperformance/cli.py b/pyperformance/cli.py index 157b6a38..a1aa3e5d 100644 --- a/pyperformance/cli.py +++ b/pyperformance/cli.py @@ -3,7 +3,7 @@ import os.path import sys -from pyperformance import _utils, _manifest, _benchmark_selections +from pyperformance import _utils from pyperformance.venv import exec_in_virtualenv, cmd_venv @@ -184,6 +184,8 @@ def parse_args(): def _select_benchmarks(raw, manifest): + from pyperformance import _benchmark_selections + # Get the raw list of benchmarks. entries = raw.lower() parse_entry = (lambda o, s: _benchmark_selections.parse_selection(s, op=o)) @@ -213,6 +215,7 @@ def _main(): manifest = benchmarks = None if hasattr(options, 'manifest'): + from pyperformance import _manifest # Load and update the manifest. manifest = _manifest.load_manifest(options.manifest) # if 'all' not in manifest.groups: From c55d6905e37b77ea5893ca33aa753a0792bb7de0 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 9 Nov 2021 12:50:05 -0700 Subject: [PATCH 080/126] Ensure we run in a venv when needed. --- pyperformance/cli.py | 72 ++++++++++++++++++++++++++++++-------------- pyperformance/run.py | 2 +- 2 files changed, 51 insertions(+), 23 deletions(-) diff --git a/pyperformance/cli.py b/pyperformance/cli.py index a1aa3e5d..e83bf69d 100644 --- a/pyperformance/cli.py +++ b/pyperformance/cli.py @@ -1,4 +1,5 @@ import argparse +import contextlib import logging import os.path import sys @@ -136,9 +137,18 @@ def parse_args(): # venv cmd = subparsers.add_parser('venv', help='Actions on the virtual environment') - cmd.add_argument("venv_action", nargs="?", - choices=('show', 'create', 'recreate', 'remove'), - default='show') + cmd.set_defaults(venv_action='show') + cmds.append(cmd) + venvsubs = cmd.add_subparsers(dest="venv_action") + cmd = venvsubs.add_parser('show') + cmds.append(cmd) + cmd = venvsubs.add_parser('create') + filter_opts(cmd) + cmds.append(cmd) + cmd = venvsubs.add_parser('recreate') + filter_opts(cmd) + cmds.append(cmd) + cmd = venvsubs.add_parser('remove') cmds.append(cmd) for cmd in cmds: @@ -157,7 +167,6 @@ def parse_args(): default=sys.executable) cmd.add_argument("--venv", help="Path to the virtual environment") - filter_opts(cmd) options = parser.parse_args() @@ -183,6 +192,29 @@ def parse_args(): return (parser, options) +@contextlib.contextmanager +def _might_need_venv(options): + try: + yield + except ModuleNotFoundError: + if not options.inside_venv: + print('switching to a venv.') + exec_in_virtualenv(options) + raise # re-raise + + +def _manifest_from_options(options): + from pyperformance import _manifest + return _manifest.load_manifest(options.manifest) + + +def _benchmarks_from_options(options): + if not getattr(options, 'benchmarks', None): + return None + manifest = _manifest_from_options(options) + return _select_benchmarks(options.benchmarks, manifest) + + def _select_benchmarks(raw, manifest): from pyperformance import _benchmark_selections @@ -213,17 +245,9 @@ def _select_benchmarks(raw, manifest): def _main(): parser, options = parse_args() - manifest = benchmarks = None - if hasattr(options, 'manifest'): - from pyperformance import _manifest - # Load and update the manifest. - manifest = _manifest.load_manifest(options.manifest) -# if 'all' not in manifest.groups: -# manifest.groups['all'] = list(manifest.benchmarks) - if hasattr(options, 'benchmarks'): - benchmarks = _select_benchmarks(options.benchmarks, manifest) - if options.action == 'venv': + with _might_need_venv(options): + benchmarks = _benchmarks_from_options(options) cmd_venv(options, benchmarks) sys.exit() elif options.action == 'compile': @@ -242,20 +266,24 @@ def _main(): from pyperformance.compare import cmd_show cmd_show(options) sys.exit() - - if not options.inside_venv: - exec_in_virtualenv(options) - - from pyperformance.cli_run import cmd_run, cmd_list, cmd_list_groups - - if options.action == 'run': + elif options.action == 'run': + with _might_need_venv(options): + from pyperformance.cli_run import cmd_run + benchmarks = _benchmarks_from_options(options) cmd_run(options, benchmarks) elif options.action == 'compare': - from pyperformance.compare import cmd_compare + with _might_need_venv(options): + from pyperformance.compare import cmd_compare cmd_compare(options) elif options.action == 'list': + with _might_need_venv(options): + from pyperformance.cli_run import cmd_list + benchmarks = _benchmarks_from_options(options) cmd_list(options, benchmarks) elif options.action == 'list_groups': + with _might_need_venv(options): + from pyperformance.cli_run import cmd_list_groups + manifest = _manifest_from_options(options) cmd_list_groups(manifest) else: parser.print_help() diff --git a/pyperformance/run.py b/pyperformance/run.py index c640300e..35e46802 100644 --- a/pyperformance/run.py +++ b/pyperformance/run.py @@ -8,7 +8,6 @@ except ImportError: multiprocessing = None -import pyperf import pyperformance from . import _utils, _pythoninfo from . import venv as _venv @@ -73,6 +72,7 @@ def run_benchmarks(should_run, python, options): pyperf_opts = get_pyperf_opts(options) + import pyperf for index, bench in enumerate(to_run): name = bench.name print("[%s/%s] %s..." % From 01697d55583395545e6184ece35f39f295aa22ff Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 9 Nov 2021 13:07:10 -0700 Subject: [PATCH 081/126] Do not re-install the shared venv. --- pyperformance/run.py | 4 +++- pyperformance/venv.py | 4 ++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/pyperformance/run.py b/pyperformance/run.py index 35e46802..a5b663a2 100644 --- a/pyperformance/run.py +++ b/pyperformance/run.py @@ -59,11 +59,13 @@ def run_benchmarks(should_run, python, options): runid = get_run_id(python) benchmarks = {} + venvs = set() for bench in to_run: bench_runid = runid._replace(bench=bench) venv = _venv.VirtualEnvironment(options, bench, bench_runid.name, usebase=True) - venv.create() + venv.create(refresh=venv.get_path() not in venvs) + venvs.add(venv.get_path()) benchmarks[bench] = (venv, bench_runid) suite = None diff --git a/pyperformance/venv.py b/pyperformance/venv.py index 48837419..49284be2 100644 --- a/pyperformance/venv.py +++ b/pyperformance/venv.py @@ -461,10 +461,10 @@ def _install_reqs(self): cmd = pip_program + ['freeze'] self.run_cmd(cmd) - def create(self): + def create(self, refresh=True): venv_path = self.get_path() if self.exists(): - if self.bench: + if refresh: print("Installing the virtual environment %s" % venv_path) self._install_reqs() return From 94ac28a10bf3adbd22f3e58211379b8cdacfcff7 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 9 Nov 2021 15:03:39 -0700 Subject: [PATCH 082/126] Only list the "all" and "default" groups once. --- pyperformance/_benchmark_selections.py | 2 -- pyperformance/cli_run.py | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/pyperformance/_benchmark_selections.py b/pyperformance/_benchmark_selections.py index f4659afd..194f856d 100644 --- a/pyperformance/_benchmark_selections.py +++ b/pyperformance/_benchmark_selections.py @@ -91,8 +91,6 @@ def _match_selection(manifest, kind, parsed, byname): for group in manifest.groups: if match_tag(group): groups.append(group) - elif parsed in ('all', 'default'): - groups.append(parsed) elif parsed in manifest.groups: groups.append(parsed) else: diff --git a/pyperformance/cli_run.py b/pyperformance/cli_run.py index 65748566..cc317160 100644 --- a/pyperformance/cli_run.py +++ b/pyperformance/cli_run.py @@ -58,7 +58,7 @@ def cmd_list(options, benchmarks): def cmd_list_groups(manifest): all_benchmarks = set(manifest.benchmarks) - groups = sorted(manifest.groups) + groups = sorted(manifest.groups - {'all', 'default'}) groups[0:0] = ['all', 'default'] for group in groups: specs = list(manifest.resolve_group(group)) From f4b09bbbd08aabfabb7a943c60c23e52d8fd420b Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 9 Nov 2021 15:11:57 -0700 Subject: [PATCH 083/126] Add the manifest to the "compile" config. --- doc/benchmark.conf.sample | 3 +++ pyperformance/compile.py | 3 +++ 2 files changed, 6 insertions(+) diff --git a/doc/benchmark.conf.sample b/doc/benchmark.conf.sample index 3f4da356..ebf377e8 100644 --- a/doc/benchmark.conf.sample +++ b/doc/benchmark.conf.sample @@ -64,6 +64,9 @@ install = True # Run "sudo python3 -m pyperf system tune" before running benchmarks? system_tune = True +# --manifest option for 'pyperformance run' +manifest = + # --benchmarks option for 'pyperformance run' benchmarks = diff --git a/pyperformance/compile.py b/pyperformance/compile.py index 46fd15eb..0fe5b8e5 100644 --- a/pyperformance/compile.py +++ b/pyperformance/compile.py @@ -515,6 +515,8 @@ def run_benchmark(self): '--output', self.filename] if self.options.inherit_environ: cmd.append('--inherit-environ=%s' % ','.join(self.options.inherit_environ)) + if self.conf.manifest: + cmd.extend(('--manifest', self.conf.manifest)) if self.conf.benchmarks: cmd.extend(('--benchmarks', self.conf.benchmarks)) if self.conf.affinity: @@ -770,6 +772,7 @@ def getboolean(section, key, default): # [run_benchmark] conf.system_tune = getboolean('run_benchmark', 'system_tune', True) + conf.manifest = getstr('run_benchmark', 'manifest', default='') conf.benchmarks = getstr('run_benchmark', 'benchmarks', default='') conf.affinity = getstr('run_benchmark', 'affinity', default='') conf.upload = getboolean('run_benchmark', 'upload', False) From b56b25ada9259ac7bb37a7f996f60e64cd309dd0 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 9 Nov 2021 15:31:06 -0700 Subject: [PATCH 084/126] Adjust the stdlib_dir check. --- pyperformance/_pythoninfo.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/pyperformance/_pythoninfo.py b/pyperformance/_pythoninfo.py index 54b04a54..7e6eb28a 100644 --- a/pyperformance/_pythoninfo.py +++ b/pyperformance/_pythoninfo.py @@ -92,8 +92,10 @@ def inspect_python_install(python=sys.executable): MAGIC_NUMBER = _imp.get_magic() -def _inspect_python_install(executable, prefix, base_prefix, platlibdir, - stdlib_dir, version_info, **_ignored): +def _inspect_python_install(executable, prefix, base_prefix, + platlibdir, stdlib_dir, + version_info, platform, implementation_name, + **_ignored): is_venv = prefix != base_prefix if os.path.basename(stdlib_dir) == 'Lib': @@ -118,8 +120,11 @@ def _inspect_python_install(executable, prefix, base_prefix, platlibdir, # XXX This is good enough for now. base_executable = executable #raise NotImplementedError(stdlib_dir) - else: - expected = os.path.join(prefix, platlibdir, python) + elif implementation_name == 'cpython': + if platform == 'win32': + expected = os.path.join(prefix, platlibdir) + else: + expected = os.path.join(prefix, platlibdir, python) if stdlib_dir == expected: base_executable = executable else: From f8338aef1f96b453607677b5c6054c7433c7e335 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 9 Nov 2021 15:36:09 -0700 Subject: [PATCH 085/126] Be sure to set base_executable. --- pyperformance/_pythoninfo.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pyperformance/_pythoninfo.py b/pyperformance/_pythoninfo.py index 7e6eb28a..69205b98 100644 --- a/pyperformance/_pythoninfo.py +++ b/pyperformance/_pythoninfo.py @@ -129,6 +129,8 @@ def _inspect_python_install(executable, prefix, base_prefix, base_executable = executable else: raise NotImplementedError(stdlib_dir) + else: + base_executable = executable is_dev = False return base_executable, is_dev, is_venv From dd4597bae468c5b0774d32c551dbc3de12ed6108 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Mon, 15 Nov 2021 11:41:37 -0700 Subject: [PATCH 086/126] Use the --venv opt to the "compile" command. --- pyperformance/compile.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pyperformance/compile.py b/pyperformance/compile.py index 0fe5b8e5..15678c4a 100644 --- a/pyperformance/compile.py +++ b/pyperformance/compile.py @@ -956,6 +956,8 @@ def cmd_compile(options): conf.update = False if options.no_tune: conf.system_tune = False + if options.venv: + conf.venv = options.venv bench = BenchmarkRevision(conf, options.revision, options.branch, patch=options.patch, options=options) bench.main() From e22eeb87ba3e365df52fd1575b3dc8194a27d7e1 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Mon, 15 Nov 2021 11:54:32 -0700 Subject: [PATCH 087/126] Separate the logic for create vs. recreate. --- pyperformance/venv.py | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/pyperformance/venv.py b/pyperformance/venv.py index 49284be2..b8011794 100644 --- a/pyperformance/venv.py +++ b/pyperformance/venv.py @@ -506,21 +506,22 @@ def cmd_venv(options, benchmarks=None): venv = VirtualEnvironment(options, requirements=requirements) venv_path = venv.get_path() - if action in ('create', 'recreate'): - recreated = False - if action == 'recreate' and venv.exists(): - recreated = True + if action == 'create': + if not venv.exists(): + venv.create() + print("The virtual environment %s has been created" % venv_path) + else: + print("The virtual environment %s already exists" % venv_path) + elif action == 'recreate': + if venv.exists(): shutil.rmtree(venv_path) print("The old virtual environment %s has been removed" % venv_path) print() - - if not venv.exists(): venv.create() - - what = 'recreated' if recreated else 'created' - print("The virtual environment %s has been %s" % (venv_path, what)) + print("The virtual environment %s has been recreated" % venv_path) else: - print("The virtual environment %s already exists" % venv_path) + venv.create() + print("The virtual environment %s has been created" % venv_path) elif action == 'remove': if os.path.exists(venv_path): From 6bb3a9e599db9c5089b5cefa8766ac03ee8fa9c9 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Mon, 15 Nov 2021 11:56:50 -0700 Subject: [PATCH 088/126] Do not re-create the venv if already running in it. --- pyperformance/venv.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/pyperformance/venv.py b/pyperformance/venv.py index b8011794..5c62027f 100644 --- a/pyperformance/venv.py +++ b/pyperformance/venv.py @@ -514,11 +514,14 @@ def cmd_venv(options, benchmarks=None): print("The virtual environment %s already exists" % venv_path) elif action == 'recreate': if venv.exists(): - shutil.rmtree(venv_path) - print("The old virtual environment %s has been removed" % venv_path) - print() - venv.create() - print("The virtual environment %s has been recreated" % venv_path) + if venv_path == sys.executable: + print("The virtual environment %s already exists" % venv_path) + else: + shutil.rmtree(venv_path) + print("The old virtual environment %s has been removed" % venv_path) + print() + venv.create() + print("The virtual environment %s has been recreated" % venv_path) else: venv.create() print("The virtual environment %s has been created" % venv_path) From 9c664ff477819512fa7d3968c14f643cf454e1f1 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Mon, 15 Nov 2021 13:24:18 -0700 Subject: [PATCH 089/126] Ensure all requirements are always isntalled. --- pyperformance/venv.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pyperformance/venv.py b/pyperformance/venv.py index 5c62027f..bc06a8b1 100644 --- a/pyperformance/venv.py +++ b/pyperformance/venv.py @@ -512,10 +512,12 @@ def cmd_venv(options, benchmarks=None): print("The virtual environment %s has been created" % venv_path) else: print("The virtual environment %s already exists" % venv_path) + venv.create() elif action == 'recreate': if venv.exists(): if venv_path == sys.executable: print("The virtual environment %s already exists" % venv_path) + venv.create() else: shutil.rmtree(venv_path) print("The old virtual environment %s has been removed" % venv_path) From 91c09d30aed02bcd1e77a546b69bc2e48e30da40 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Mon, 15 Nov 2021 13:24:35 -0700 Subject: [PATCH 090/126] Do not buffer stdout during tests. --- runtests.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/runtests.py b/runtests.py index d9268d54..be2f508d 100755 --- a/runtests.py +++ b/runtests.py @@ -39,7 +39,7 @@ def run_bench(*cmd): cmd = cmd + ('--venv', venv) run_cmd(cmd) - run_bench(python, script, 'venv', 'create', '-b', 'all') + run_bench(python, '-u', script, 'venv', 'create', '-b', 'all') egg_info = "pyperformance.egg-info" print("Remove directory %s" % egg_info) @@ -48,7 +48,7 @@ def run_bench(*cmd): except FileNotFoundError: pass - run_bench(python, script, 'venv') + run_bench(python, '-u', script, 'venv') for filename in ( os.path.join('pyperformance', 'tests', 'data', 'py36.json'), @@ -56,8 +56,8 @@ def run_bench(*cmd): ): run_cmd((python, script, 'show', filename)) - run_bench(python, script, 'list') - run_bench(python, script, 'list_groups') + run_bench(python, '-u', script, 'list') + run_bench(python, '-u', script, 'list_groups') json = os.path.join(venv, 'bench.json') @@ -65,18 +65,18 @@ def run_bench(*cmd): # # --debug-single-value: benchmark results don't matter, we only # check that running benchmarks don't fail. - run_bench(python, script, 'run', '-b', 'all', '--debug-single-value', + run_bench(python, '-u', script, 'run', '-b', 'all', '--debug-single-value', '-o', json) # Display slowest benchmarks - run_cmd((venv_python, '-m', 'pyperf', 'slowest', json)) + run_cmd((venv_python, '-u', '-m', 'pyperf', 'slowest', json)) - run_bench(python, script, 'venv', 'remove') + run_bench(python, '-u', script, 'venv', 'remove') def main(): # Unit tests - cmd = [sys.executable, + cmd = [sys.executable, '-u', os.path.join('pyperformance', 'tests', 'test_compare.py')] run_cmd(cmd) From 7a339568a3009bfa93454745b7452f14ffc8b250 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Mon, 15 Nov 2021 13:30:03 -0700 Subject: [PATCH 091/126] Fix a check. --- pyperformance/venv.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyperformance/venv.py b/pyperformance/venv.py index bc06a8b1..b225ba49 100644 --- a/pyperformance/venv.py +++ b/pyperformance/venv.py @@ -515,7 +515,7 @@ def cmd_venv(options, benchmarks=None): venv.create() elif action == 'recreate': if venv.exists(): - if venv_path == sys.executable: + if venv.get_python_program() == sys.executable: print("The virtual environment %s already exists" % venv_path) venv.create() else: From 62e2014563287446b96293746e89c48dcc42bb63 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Mon, 15 Nov 2021 13:36:34 -0700 Subject: [PATCH 092/126] Do not buffer stdout during tests. --- runtests.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/runtests.py b/runtests.py index be2f508d..cc04f073 100755 --- a/runtests.py +++ b/runtests.py @@ -7,7 +7,7 @@ def run_cmd(cmd): - print("Execute: %s" % ' '.join(cmd)) + print("Execute: %s" % ' '.join(cmd), flush=True) proc = subprocess.Popen(cmd) try: proc.wait() @@ -15,10 +15,11 @@ def run_cmd(cmd): proc.kill() proc.wait() raise + sys.stdout.flush() exitcode = proc.returncode if exitcode: sys.exit(exitcode) - print("") + print("", flush=True) def run_tests(venv): @@ -42,7 +43,7 @@ def run_bench(*cmd): run_bench(python, '-u', script, 'venv', 'create', '-b', 'all') egg_info = "pyperformance.egg-info" - print("Remove directory %s" % egg_info) + print("Remove directory %s" % egg_info, flush=True) try: shutil.rmtree(egg_info) except FileNotFoundError: From d7ea256776772146b8eb5e5ac78ff985afd1ace9 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Mon, 15 Nov 2021 14:12:52 -0700 Subject: [PATCH 093/126] Always switch to a venv if running out of the repo. --- pyperformance/__init__.py | 8 +++++++- pyperformance/cli.py | 7 ++++++- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/pyperformance/__init__.py b/pyperformance/__init__.py index 9dd60a33..e77c9201 100644 --- a/pyperformance/__init__.py +++ b/pyperformance/__init__.py @@ -5,4 +5,10 @@ __version__ = '.'.join(map(str, VERSION)) -DATA_DIR = os.path.join(os.path.dirname(__file__), 'data-files') +PKG_ROOT = os.path.dirname(__file__) +DATA_DIR = os.path.join(PKG_ROOT, 'data-files') + + +def is_installed(): + parent = os.path.dirname(PKG_ROOT) + return os.path.exists(os.path.join(parent, 'setup.py')) diff --git a/pyperformance/cli.py b/pyperformance/cli.py index e83bf69d..514d2107 100644 --- a/pyperformance/cli.py +++ b/pyperformance/cli.py @@ -4,7 +4,7 @@ import os.path import sys -from pyperformance import _utils +from pyperformance import _utils, is_installed from pyperformance.venv import exec_in_virtualenv, cmd_venv @@ -245,6 +245,11 @@ def _select_benchmarks(raw, manifest): def _main(): parser, options = parse_args() + if not is_installed(): + assert not options.inside_venv + print('switching to a venv.') + exec_in_virtualenv(options) + if options.action == 'venv': with _might_need_venv(options): benchmarks = _benchmarks_from_options(options) From 8ed6fd549909d5c3cdd6134c828b2dd6a9b1e6c6 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Mon, 15 Nov 2021 14:26:06 -0700 Subject: [PATCH 094/126] Distinguish message from runtests.py. --- runtests.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtests.py b/runtests.py index cc04f073..adcb4915 100755 --- a/runtests.py +++ b/runtests.py @@ -7,7 +7,7 @@ def run_cmd(cmd): - print("Execute: %s" % ' '.join(cmd), flush=True) + print("(runtests.py) Execute: %s" % ' '.join(cmd), flush=True) proc = subprocess.Popen(cmd) try: proc.wait() From ce6d09e334a2a15dd43dc4dcbbccf20fdb64027b Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Mon, 15 Nov 2021 16:21:16 -0700 Subject: [PATCH 095/126] Pass values into VirtualEnvironment instead of the options object. --- pyperformance/run.py | 10 ++++++++-- pyperformance/venv.py | 27 +++++++++++++++++++-------- 2 files changed, 27 insertions(+), 10 deletions(-) diff --git a/pyperformance/run.py b/pyperformance/run.py index a5b663a2..f23ad288 100644 --- a/pyperformance/run.py +++ b/pyperformance/run.py @@ -62,8 +62,14 @@ def run_benchmarks(should_run, python, options): venvs = set() for bench in to_run: bench_runid = runid._replace(bench=bench) - venv = _venv.VirtualEnvironment(options, bench, bench_runid.name, - usebase=True) + venv = _venv.VirtualEnvironment( + options.python, + options.venv, + inherit_environ=options.inherit_environ, + bench=bench, + name=bench_runid.name, + usebase=True, + ) venv.create(refresh=venv.get_path() not in venvs) venvs.add(venv.get_path()) benchmarks[bench] = (venv, bench_runid) diff --git a/pyperformance/venv.py b/pyperformance/venv.py index b225ba49..0d3547eb 100644 --- a/pyperformance/venv.py +++ b/pyperformance/venv.py @@ -170,19 +170,21 @@ def download(url, filename): class VirtualEnvironment(object): - def __init__(self, options, bench=None, name=None, *, + def __init__(self, python, root=None, *, + inherit_environ=None, + bench=None, + name=None, requirements=None, usebase=False, ): - python = options.python if usebase: python, _, _ = _pythoninfo.inspect_python_install(python) - self.options = options self.python = python - self.bench = bench + self.inherit_environ = inherit_environ or None + self.bench = bench or None self._name = name or None - self._venv_path = options.venv + self._venv_path = root or None self._pip_program = None self._force_old_pip = False self.requirements = requirements @@ -213,7 +215,7 @@ def run_cmd_nocheck(self, cmd, verbose=True): sys.stdout.flush() sys.stderr.flush() - env = create_environ(self.options.inherit_environ) + env = create_environ(self.inherit_environ) try: proc = subprocess.Popen(cmd, env=env) except OSError as exc: @@ -480,7 +482,11 @@ def create(self, refresh=True): def exec_in_virtualenv(options): - venv = VirtualEnvironment(options) + venv = VirtualEnvironment( + options.python, + options.venv, + inherit_environ=options.inherit_environ, + ) venv.create() venv_python = venv.get_python_program() @@ -503,7 +509,12 @@ def cmd_venv(options, benchmarks=None): requirements = Requirements.from_benchmarks(benchmarks) - venv = VirtualEnvironment(options, requirements=requirements) + venv = VirtualEnvironment( + options.python, + options.venv, + inherit_environ=options.inherit_environ, + requirements=requirements, + ) venv_path = venv.get_path() if action == 'create': From 4048199ecd6be3dc4fdf70e2e0761b361611290f Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Mon, 15 Nov 2021 16:21:41 -0700 Subject: [PATCH 096/126] Do not buffer stdout during tests. --- pyperformance/cli.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pyperformance/cli.py b/pyperformance/cli.py index 514d2107..37bce54c 100644 --- a/pyperformance/cli.py +++ b/pyperformance/cli.py @@ -185,7 +185,7 @@ def parse_args(): abs_python = os.path.abspath(options.python) if not abs_python: print("ERROR: Unable to locate the Python executable: %r" % - options.python) + options.python, flush=True) sys.exit(1) options.python = abs_python @@ -198,7 +198,7 @@ def _might_need_venv(options): yield except ModuleNotFoundError: if not options.inside_venv: - print('switching to a venv.') + print('switching to a venv.', flush=True) exec_in_virtualenv(options) raise # re-raise @@ -247,7 +247,7 @@ def _main(): if not is_installed(): assert not options.inside_venv - print('switching to a venv.') + print('switching to a venv.', flush=True) exec_in_virtualenv(options) if options.action == 'venv': @@ -299,5 +299,5 @@ def main(): try: _main() except KeyboardInterrupt: - print("Benchmark suite interrupted: exit!") + print("Benchmark suite interrupted: exit!", flush=True) sys.exit(1) From df8cccf2cae3df1aecc82ecfa2cac53c5bda413e Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Mon, 15 Nov 2021 16:38:47 -0700 Subject: [PATCH 097/126] Distinguish message from runtests.py. --- runtests.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtests.py b/runtests.py index adcb4915..1993d1bd 100755 --- a/runtests.py +++ b/runtests.py @@ -43,7 +43,7 @@ def run_bench(*cmd): run_bench(python, '-u', script, 'venv', 'create', '-b', 'all') egg_info = "pyperformance.egg-info" - print("Remove directory %s" % egg_info, flush=True) + print("(runtests.py) Remove directory %s" % egg_info, flush=True) try: shutil.rmtree(egg_info) except FileNotFoundError: From 871963603c79b26f2816bddb7813961519d3b1cd Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Mon, 15 Nov 2021 16:46:45 -0700 Subject: [PATCH 098/126] Print out the --venv option. --- pyperformance/cli.py | 1 + pyperformance/venv.py | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/pyperformance/cli.py b/pyperformance/cli.py index 37bce54c..04b2c358 100644 --- a/pyperformance/cli.py +++ b/pyperformance/cli.py @@ -253,6 +253,7 @@ def _main(): if options.action == 'venv': with _might_need_venv(options): benchmarks = _benchmarks_from_options(options) + print(1, options.venv) cmd_venv(options, benchmarks) sys.exit() elif options.action == 'compile': diff --git a/pyperformance/venv.py b/pyperformance/venv.py index 0d3547eb..1454735f 100644 --- a/pyperformance/venv.py +++ b/pyperformance/venv.py @@ -509,13 +509,16 @@ def cmd_venv(options, benchmarks=None): requirements = Requirements.from_benchmarks(benchmarks) + print(2, options.venv) venv = VirtualEnvironment( options.python, options.venv, inherit_environ=options.inherit_environ, requirements=requirements, ) + print(3, options.venv) venv_path = venv.get_path() + print(4, venv_path) if action == 'create': if not venv.exists(): @@ -528,8 +531,10 @@ def cmd_venv(options, benchmarks=None): if venv.exists(): if venv.get_python_program() == sys.executable: print("The virtual environment %s already exists" % venv_path) + print("(it matches the currently running Python executable)") venv.create() else: + print("The virtual environment %s already exists" % venv_path) shutil.rmtree(venv_path) print("The old virtual environment %s has been removed" % venv_path) print() From 3c5d7da82857a874817ac00bb36a2fc6cc84a87f Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Mon, 15 Nov 2021 16:52:14 -0700 Subject: [PATCH 099/126] Print out the --venv option. --- pyperformance/cli.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pyperformance/cli.py b/pyperformance/cli.py index 04b2c358..41c56676 100644 --- a/pyperformance/cli.py +++ b/pyperformance/cli.py @@ -243,7 +243,9 @@ def _select_benchmarks(raw, manifest): def _main(): + print(sys.argv) parser, options = parse_args() + print(options) if not is_installed(): assert not options.inside_venv From 14761b2ceee5baacdaff53623f2c8f93ce6580f5 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Mon, 15 Nov 2021 16:55:54 -0700 Subject: [PATCH 100/126] Print out the --venv option. --- pyperformance/cli.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyperformance/cli.py b/pyperformance/cli.py index 41c56676..12b65165 100644 --- a/pyperformance/cli.py +++ b/pyperformance/cli.py @@ -243,9 +243,7 @@ def _select_benchmarks(raw, manifest): def _main(): - print(sys.argv) parser, options = parse_args() - print(options) if not is_installed(): assert not options.inside_venv @@ -253,6 +251,8 @@ def _main(): exec_in_virtualenv(options) if options.action == 'venv': + print(sys.argv) + print(options) with _might_need_venv(options): benchmarks = _benchmarks_from_options(options) print(1, options.venv) From b975c51ed6ce013ce25680379a68a774fcd8d403 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Mon, 15 Nov 2021 17:10:47 -0700 Subject: [PATCH 101/126] Do not add args directly to the "venv" command. --- pyperformance/cli.py | 1 - 1 file changed, 1 deletion(-) diff --git a/pyperformance/cli.py b/pyperformance/cli.py index 12b65165..8e2c1125 100644 --- a/pyperformance/cli.py +++ b/pyperformance/cli.py @@ -138,7 +138,6 @@ def parse_args(): cmd = subparsers.add_parser('venv', help='Actions on the virtual environment') cmd.set_defaults(venv_action='show') - cmds.append(cmd) venvsubs = cmd.add_subparsers(dest="venv_action") cmd = venvsubs.add_parser('show') cmds.append(cmd) From 062745bd80432500b8dcd3aff339007b1ad5c910 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Mon, 15 Nov 2021 17:15:20 -0700 Subject: [PATCH 102/126] Be explicit about "create". --- runtests.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtests.py b/runtests.py index 1993d1bd..5c044e62 100755 --- a/runtests.py +++ b/runtests.py @@ -49,7 +49,7 @@ def run_bench(*cmd): except FileNotFoundError: pass - run_bench(python, '-u', script, 'venv') + run_bench(python, '-u', script, 'venv', 'create') for filename in ( os.path.join('pyperformance', 'tests', 'data', 'py36.json'), From d9184292d10a0133f31f7f1ff180f145016203fb Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Mon, 15 Nov 2021 17:20:00 -0700 Subject: [PATCH 103/126] Drop debug messages. --- pyperformance/cli.py | 3 --- pyperformance/venv.py | 3 --- 2 files changed, 6 deletions(-) diff --git a/pyperformance/cli.py b/pyperformance/cli.py index 8e2c1125..9a8d037d 100644 --- a/pyperformance/cli.py +++ b/pyperformance/cli.py @@ -250,11 +250,8 @@ def _main(): exec_in_virtualenv(options) if options.action == 'venv': - print(sys.argv) - print(options) with _might_need_venv(options): benchmarks = _benchmarks_from_options(options) - print(1, options.venv) cmd_venv(options, benchmarks) sys.exit() elif options.action == 'compile': diff --git a/pyperformance/venv.py b/pyperformance/venv.py index 1454735f..2b5931f9 100644 --- a/pyperformance/venv.py +++ b/pyperformance/venv.py @@ -509,16 +509,13 @@ def cmd_venv(options, benchmarks=None): requirements = Requirements.from_benchmarks(benchmarks) - print(2, options.venv) venv = VirtualEnvironment( options.python, options.venv, inherit_environ=options.inherit_environ, requirements=requirements, ) - print(3, options.venv) venv_path = venv.get_path() - print(4, venv_path) if action == 'create': if not venv.exists(): From 5842d4831827276ce403bde55418d1a48cbefbdf Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 16 Nov 2021 09:15:22 -0700 Subject: [PATCH 104/126] Resolve the manifest file in the compile config. --- pyperformance/compile.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/pyperformance/compile.py b/pyperformance/compile.py index 15678c4a..f9b325d8 100644 --- a/pyperformance/compile.py +++ b/pyperformance/compile.py @@ -744,6 +744,13 @@ def getstr(section, key, default=None): # strip spaces return value.strip() + def getfile(section, key, default=None): + value = getstr(section, key, default) + if not value: + return value + value = os.path.expanduser(value) + return value + def getboolean(section, key, default): try: sectionobj = cfgobj[section] @@ -752,19 +759,19 @@ def getboolean(section, key, default): return default # [config] - conf.json_dir = os.path.expanduser(getstr('config', 'json_dir')) + conf.json_dir = getfile('config', 'json_dir') conf.json_patch_dir = os.path.join(conf.json_dir, 'patch') conf.uploaded_json_dir = os.path.join(conf.json_dir, 'uploaded') conf.debug = getboolean('config', 'debug', False) if parse_compile: # [scm] - conf.repo_dir = os.path.expanduser(getstr('scm', 'repo_dir')) + conf.repo_dir = getfile('scm', 'repo_dir') conf.update = getboolean('scm', 'update', True) conf.git_remote = getstr('config', 'git_remote', default='remotes/origin') # [compile] - conf.directory = os.path.expanduser(getstr('compile', 'bench_dir')) + conf.directory = getfile('compile', 'bench_dir') conf.lto = getboolean('compile', 'lto', True) conf.pgo = getboolean('compile', 'pgo', True) conf.install = getboolean('compile', 'install', True) @@ -772,7 +779,7 @@ def getboolean(section, key, default): # [run_benchmark] conf.system_tune = getboolean('run_benchmark', 'system_tune', True) - conf.manifest = getstr('run_benchmark', 'manifest', default='') + conf.manifest = getfile('run_benchmark', 'manifest') conf.benchmarks = getstr('run_benchmark', 'benchmarks', default='') conf.affinity = getstr('run_benchmark', 'affinity', default='') conf.upload = getboolean('run_benchmark', 'upload', False) From f1f9db11e28bb5ddc7a98239130cc9cb9c0853dd Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 16 Nov 2021 10:04:06 -0700 Subject: [PATCH 105/126] Add a "dryrun" mode for testing "compile". --- pyperformance/compile.py | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/pyperformance/compile.py b/pyperformance/compile.py index f9b325d8..d8105d69 100644 --- a/pyperformance/compile.py +++ b/pyperformance/compile.py @@ -411,6 +411,10 @@ def compile_install(self): class BenchmarkRevision(Application): + + #_dryrun = False + _dryrun = True + def __init__(self, conf, revision, branch=None, patch=None, setup_log=True, filename=None, commit_date=None, options=None): @@ -496,8 +500,10 @@ def compile_install(self): def create_venv(self): # Create venv - cmd = [self.python.program, '-u', '-m', 'pyperformance', - 'venv', 'recreate'] + python = self.python.program + if self._dryrun: + python = sys.executable + cmd = [python, '-u', '-m', 'pyperformance', 'venv', 'recreate'] if self.conf.venv: cmd.extend(('--venv', self.conf.venv)) if self.options.inherit_environ: @@ -508,7 +514,10 @@ def create_venv(self): def run_benchmark(self): self.safe_makedirs(os.path.dirname(self.filename)) - cmd = [self.python.program, '-u', + python = self.python.program + if self._dryrun: + python = sys.executable + cmd = [python, '-u', '-m', 'pyperformance', 'run', '--verbose', @@ -675,10 +684,11 @@ def prepare(self): def compile_bench(self): self.python = Python(self, self.conf) - try: - self.compile_install() - except SystemExit: - sys.exit(EXIT_COMPILE_ERROR) + if not self._dryrun: + try: + self.compile_install() + except SystemExit: + sys.exit(EXIT_COMPILE_ERROR) self.create_venv() From 35166b7ee69797ccf2011b46cb3bb2e3d532a4d5 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 16 Nov 2021 10:44:01 -0700 Subject: [PATCH 106/126] Add BenchmarkManifest.show(). --- pyperformance/_manifest.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/pyperformance/_manifest.py b/pyperformance/_manifest.py index 84b4465a..cf46b6fa 100644 --- a/pyperformance/_manifest.py +++ b/pyperformance/_manifest.py @@ -226,6 +226,26 @@ def resolve_group(self, name, *, fail=True): raise KeyError(name) yield from benchmarks or () + def show(self, *, raw=True, resolved=True): + yield self.filename + yield 'groups:' + if raw: + yield f' {self._raw_groups}' + if resolved: + yield f' {self.groups}' + yield 'default:' + if resolved: + for i, bench in enumerate(self.resolve_group('default')): + yield f' {i:>2} {bench}' + if raw: + yield 'benchmarks (raw):' + for i, bench in enumerate(self._raw_benchmarks): + yield f' {i:>2} {bench}' + if resolved: + yield 'benchmarks:' + for i, bench in enumerate(self.benchmarks): + yield f' {i:>2} {bench}' + ####################################### # internal implementation From 561d27195b6595e6426907ec19e8f82f28083f96 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 16 Nov 2021 10:45:09 -0700 Subject: [PATCH 107/126] Use --manifest and --benchmarks when creating venv for "compile". --- pyperformance/compile.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pyperformance/compile.py b/pyperformance/compile.py index d8105d69..88fa36b7 100644 --- a/pyperformance/compile.py +++ b/pyperformance/compile.py @@ -508,6 +508,10 @@ def create_venv(self): cmd.extend(('--venv', self.conf.venv)) if self.options.inherit_environ: cmd.append('--inherit-environ=%s' % ','.join(self.options.inherit_environ)) + if self.conf.manifest: + cmd.extend(('--manifest', self.conf.manifest)) + if self.conf.benchmarks: + cmd.extend(('--benchmarks', self.conf.benchmarks)) exitcode = self.run_nocheck(*cmd) if exitcode: sys.exit(EXIT_VENV_ERROR) From 683150889f51b297d958a6fccef1c55f620f0b15 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 16 Nov 2021 10:45:46 -0700 Subject: [PATCH 108/126] Add the resolve_file() util. --- pyperformance/_utils.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/pyperformance/_utils.py b/pyperformance/_utils.py index 3518d34b..53d49591 100644 --- a/pyperformance/_utils.py +++ b/pyperformance/_utils.py @@ -53,6 +53,19 @@ def check_dir(dirname): raise ValueError(f'directory missing ({dirname})') +def resolve_file(filename, relroot=None): + resolved = os.path.normpath(filename) + resolved = os.path.expanduser(resolved) + #resolved = os.path.expandvars(filename) + if not os.path.isabs(resolved): + if not relroot: + relroot = os.getcwd() + elif not os.path.isabs(relroot): + raise NotImplementedError(relroot) + resolved = os.path.join(relroot, resolved) + return resolved + + ####################################### # platform utils From bb8834142ebb8f9b73d1928800eb63f223ff76b2 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 16 Nov 2021 10:46:23 -0700 Subject: [PATCH 109/126] Resolve the manifest file in includes. --- pyperformance/_manifest.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pyperformance/_manifest.py b/pyperformance/_manifest.py index cf46b6fa..831472f4 100644 --- a/pyperformance/_manifest.py +++ b/pyperformance/_manifest.py @@ -275,18 +275,22 @@ def _iter_sections(lines): def _parse_manifest_file(filename): - filename = os.path.abspath(filename) + relroot = os.path.dirname(filename) + filename = _utils.resolve_file(filename, relroot) with open(filename) as infile: yield from _parse_manifest(infile, filename) def _parse_manifest(lines, filename): + relroot = os.path.dirname(filename) for section, seclines in _iter_sections(lines): if section == 'includes': yield filename, section, list(seclines) for line in seclines: if line == '': line = DEFAULT_MANIFEST + else: + line = _utils.resolve_file(line, relroot) yield from _parse_manifest_file(line) elif section == 'benchmarks': yield filename, section, list(_parse_benchmarks_section(seclines)) From 1e51f8eb2cffd1661e47748365d9fa330568bc2a Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 16 Nov 2021 10:51:29 -0700 Subject: [PATCH 110/126] Default BenchmarkRevision._dryrun to False. --- pyperformance/compile.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pyperformance/compile.py b/pyperformance/compile.py index 88fa36b7..aa7cecbe 100644 --- a/pyperformance/compile.py +++ b/pyperformance/compile.py @@ -412,8 +412,7 @@ def compile_install(self): class BenchmarkRevision(Application): - #_dryrun = False - _dryrun = True + _dryrun = False def __init__(self, conf, revision, branch=None, patch=None, setup_log=True, filename=None, commit_date=None, From ef231fec6ff6c516988f6370145d8e39ab64967f Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 16 Nov 2021 10:56:56 -0700 Subject: [PATCH 111/126] Set the default for --benchmarks manually. --- pyperformance/cli.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pyperformance/cli.py b/pyperformance/cli.py index 9a8d037d..4bc3c026 100644 --- a/pyperformance/cli.py +++ b/pyperformance/cli.py @@ -16,7 +16,7 @@ def comma_separated(values): def filter_opts(cmd): cmd.add_argument("--manifest", help="benchmark manifest file to use") - cmd.add_argument("-b", "--benchmarks", metavar="BM_LIST", default='', + cmd.add_argument("-b", "--benchmarks", metavar="BM_LIST", help=("Comma-separated list of benchmarks to run. Can" " contain both positive and negative arguments:" " --benchmarks=run_this,also_this,-not_this. If" @@ -188,6 +188,10 @@ def parse_args(): sys.exit(1) options.python = abs_python + if hasattr(options, 'benchmarks'): + if not options.benchmarks: + options.benchmarks = '' + return (parser, options) From 648bd180c9e02c908271a77e9e9b03904743e4e6 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 16 Nov 2021 11:11:15 -0700 Subject: [PATCH 112/126] Allow the "venv" command to not install benchmark requirements. --- pyperformance/cli.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/pyperformance/cli.py b/pyperformance/cli.py index 4bc3c026..6317328a 100644 --- a/pyperformance/cli.py +++ b/pyperformance/cli.py @@ -13,7 +13,7 @@ def comma_separated(values): return list(filter(None, values)) -def filter_opts(cmd): +def filter_opts(cmd, *, benchrequired=True): cmd.add_argument("--manifest", help="benchmark manifest file to use") cmd.add_argument("-b", "--benchmarks", metavar="BM_LIST", @@ -23,6 +23,11 @@ def filter_opts(cmd): " there are no positive arguments, we'll run all" " benchmarks except the negative arguments. " " Otherwise we run only the positive arguments.")) + cmd.set_defaults(use_benchmarks_default=True) + if not benchrequired: + cmd.add_argument('--no-benchmarks-default', + dest='use_benchmarks_default', + action='store_false') def parse_args(): @@ -142,10 +147,10 @@ def parse_args(): cmd = venvsubs.add_parser('show') cmds.append(cmd) cmd = venvsubs.add_parser('create') - filter_opts(cmd) + filter_opts(cmd, benchrequired=False) cmds.append(cmd) cmd = venvsubs.add_parser('recreate') - filter_opts(cmd) + filter_opts(cmd, benchrequired=False) cmds.append(cmd) cmd = venvsubs.add_parser('remove') cmds.append(cmd) @@ -189,7 +194,7 @@ def parse_args(): options.python = abs_python if hasattr(options, 'benchmarks'): - if not options.benchmarks: + if not options.benchmarks and options.use_benchmarks_default: options.benchmarks = '' return (parser, options) From 7bb8d943fe6c427128fc30b9f68d4b8700f69994 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 16 Nov 2021 11:16:36 -0700 Subject: [PATCH 113/126] Use as a marker for "no benchmarks". --- pyperformance/cli.py | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/pyperformance/cli.py b/pyperformance/cli.py index 6317328a..3daa2f69 100644 --- a/pyperformance/cli.py +++ b/pyperformance/cli.py @@ -13,21 +13,16 @@ def comma_separated(values): return list(filter(None, values)) -def filter_opts(cmd, *, benchrequired=True): +def filter_opts(cmd): cmd.add_argument("--manifest", help="benchmark manifest file to use") - cmd.add_argument("-b", "--benchmarks", metavar="BM_LIST", + cmd.add_argument("-b", "--benchmarks", metavar="BM_LIST", default='', help=("Comma-separated list of benchmarks to run. Can" " contain both positive and negative arguments:" " --benchmarks=run_this,also_this,-not_this. If" " there are no positive arguments, we'll run all" " benchmarks except the negative arguments. " " Otherwise we run only the positive arguments.")) - cmd.set_defaults(use_benchmarks_default=True) - if not benchrequired: - cmd.add_argument('--no-benchmarks-default', - dest='use_benchmarks_default', - action='store_false') def parse_args(): @@ -147,10 +142,10 @@ def parse_args(): cmd = venvsubs.add_parser('show') cmds.append(cmd) cmd = venvsubs.add_parser('create') - filter_opts(cmd, benchrequired=False) + filter_opts(cmd) cmds.append(cmd) cmd = venvsubs.add_parser('recreate') - filter_opts(cmd, benchrequired=False) + filter_opts(cmd) cmds.append(cmd) cmd = venvsubs.add_parser('remove') cmds.append(cmd) @@ -194,8 +189,8 @@ def parse_args(): options.python = abs_python if hasattr(options, 'benchmarks'): - if not options.benchmarks and options.use_benchmarks_default: - options.benchmarks = '' + if options.benchmarks == '': + options.benchmarks = None return (parser, options) From 15fd5599b6d555066368a439cb431b3583196f3b Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 16 Nov 2021 11:23:10 -0700 Subject: [PATCH 114/126] Require --benchmarks (or default) for some commands. --- pyperformance/cli.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/pyperformance/cli.py b/pyperformance/cli.py index 3daa2f69..3680c886 100644 --- a/pyperformance/cli.py +++ b/pyperformance/cli.py @@ -13,7 +13,7 @@ def comma_separated(values): return list(filter(None, values)) -def filter_opts(cmd): +def filter_opts(cmd, *, allow_no_benchmarks=False): cmd.add_argument("--manifest", help="benchmark manifest file to use") cmd.add_argument("-b", "--benchmarks", metavar="BM_LIST", default='', @@ -23,6 +23,7 @@ def filter_opts(cmd): " there are no positive arguments, we'll run all" " benchmarks except the negative arguments. " " Otherwise we run only the positive arguments.")) + cmd.set_defaults(allow_no_benchmarks=allow_no_benchmarks) def parse_args(): @@ -142,10 +143,10 @@ def parse_args(): cmd = venvsubs.add_parser('show') cmds.append(cmd) cmd = venvsubs.add_parser('create') - filter_opts(cmd) + filter_opts(cmd, allow_no_benchmarks=True) cmds.append(cmd) cmd = venvsubs.add_parser('recreate') - filter_opts(cmd) + filter_opts(cmd, allow_no_benchmarks=True) cmds.append(cmd) cmd = venvsubs.add_parser('remove') cmds.append(cmd) @@ -190,6 +191,8 @@ def parse_args(): if hasattr(options, 'benchmarks'): if options.benchmarks == '': + if not options.allow_no_benchmarks: + parser.error('--benchmarks cannot be empty') options.benchmarks = None return (parser, options) From f04150352fae164cc22eae41b3aca7bc655ea71f Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 16 Nov 2021 12:46:24 -0700 Subject: [PATCH 115/126] Do not always install the first benchmark venv. --- pyperformance/run.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pyperformance/run.py b/pyperformance/run.py index f23ad288..97cda616 100644 --- a/pyperformance/run.py +++ b/pyperformance/run.py @@ -60,6 +60,12 @@ def run_benchmarks(should_run, python, options): benchmarks = {} venvs = set() + if options.venv: + venvs.add(_venv.VirtualEnvironment( + options.python, + options.venv, + inherit_environ=options.inherit_environ, + )) for bench in to_run: bench_runid = runid._replace(bench=bench) venv = _venv.VirtualEnvironment( From 452b5416cdf41c11c9be8f84236606dd2a7f8041 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 16 Nov 2021 14:19:31 -0700 Subject: [PATCH 116/126] Separate creating venv from installing requirements. --- pyperformance/run.py | 5 +- pyperformance/venv.py | 164 ++++++++++++++++++++++++++---------------- 2 files changed, 104 insertions(+), 65 deletions(-) diff --git a/pyperformance/run.py b/pyperformance/run.py index 97cda616..98c93bef 100644 --- a/pyperformance/run.py +++ b/pyperformance/run.py @@ -68,15 +68,16 @@ def run_benchmarks(should_run, python, options): )) for bench in to_run: bench_runid = runid._replace(bench=bench) + assert bench_runid.name, (bench, bench_runid) venv = _venv.VirtualEnvironment( options.python, options.venv, inherit_environ=options.inherit_environ, - bench=bench, name=bench_runid.name, usebase=True, ) - venv.create(refresh=venv.get_path() not in venvs) + venv.ensure(refresh=venv.get_path() not in venvs) + venv.install_reqs(bench) venvs.add(venv.get_path()) benchmarks[bench] = (venv, bench_runid) diff --git a/pyperformance/venv.py b/pyperformance/venv.py index 2b5931f9..46afebc0 100644 --- a/pyperformance/venv.py +++ b/pyperformance/venv.py @@ -68,6 +68,9 @@ def __init__(self): # optional requirements self._optional = set() + def __len__(self): + return len(self.specs) + def iter_non_optional(self): for spec in self.specs: if spec in self._optional: @@ -172,9 +175,7 @@ class VirtualEnvironment(object): def __init__(self, python, root=None, *, inherit_environ=None, - bench=None, name=None, - requirements=None, usebase=False, ): if usebase: @@ -182,18 +183,17 @@ def __init__(self, python, root=None, *, self.python = python self.inherit_environ = inherit_environ or None - self.bench = bench or None self._name = name or None self._venv_path = root or None self._pip_program = None self._force_old_pip = False - self.requirements = requirements + self._prepared = False @property def name(self): if self._name is None: from .run import get_run_id - runid = get_run_id(self.python, self.bench) + runid = get_run_id(self.python) self._name = runid.name return self._name @@ -401,51 +401,32 @@ def exists(self): venv_python = self.get_python_program() return os.path.exists(venv_python) - def _install_reqs(self): + def prepare(self, install=True): + venv_path = self.get_path() + print("Installing the virtual environment %s" % venv_path) + if self._prepared or (self._prepared is None and not install): + print('(already installed)') + return pip_program = self.get_pip_program() - # parse requirements - basereqs = Requirements.from_file(REQUIREMENTS_FILE, ['psutil']) - if self.requirements: - requirements = self.requirements - elif self.bench: - requirements = Requirements.from_benchmarks([self.bench]) - else: - requirements = basereqs - # Every benchmark must depend on pyperf. - if not requirements.get('pyperf'): - pyperf_req = basereqs.get('pyperf') - if not pyperf_req: - raise NotImplementedError - requirements.specs.append(pyperf_req) - - # Upgrade pip - cmd = pip_program + ['install', '-U'] - if self._force_old_pip: - cmd.extend((REQ_OLD_PIP, REQ_OLD_SETUPTOOLS)) - else: - cmd.extend(basereqs.pip) - self.run_cmd(cmd) - - # Upgrade installer dependencies (setuptools, ...) - cmd = pip_program + ['install', '-U'] - cmd.extend(basereqs.installer) - self.run_cmd(cmd) + if not self._prepared: + # parse requirements + basereqs = Requirements.from_file(REQUIREMENTS_FILE, ['psutil']) - # install requirements - cmd = pip_program + ['install'] - cmd.extend(requirements.iter_non_optional()) - self.run_cmd(cmd) + # Upgrade pip + cmd = pip_program + ['install', '-U'] + if self._force_old_pip: + cmd.extend((REQ_OLD_PIP, REQ_OLD_SETUPTOOLS)) + else: + cmd.extend(basereqs.pip) + self.run_cmd(cmd) - # install optional requirements - for req in requirements.iter_optional(): - cmd = pip_program + ['install', '-U', req] - exitcode = self.run_cmd_nocheck(cmd) - if exitcode: - print("WARNING: failed to install %s" % req) - print() + # Upgrade installer dependencies (setuptools, ...) + cmd = pip_program + ['install', '-U'] + cmd.extend(basereqs.installer) + self.run_cmd(cmd) - if not self.bench: + if install: # install pyperformance inside the virtual environment if is_build_dir(): root_dir = os.path.dirname(PERFORMANCE_ROOT) @@ -454,6 +435,9 @@ def _install_reqs(self): version = pyperformance.__version__ cmd = pip_program + ['install', 'pyperformance==%s' % version] self.run_cmd(cmd) + self._prepared = True + else: + self._prepared = None # Display the pip version cmd = pip_program + ['--version'] @@ -463,23 +447,73 @@ def _install_reqs(self): cmd = pip_program + ['freeze'] self.run_cmd(cmd) - def create(self, refresh=True): + def create(self, install=True): venv_path = self.get_path() - if self.exists(): - if refresh: - print("Installing the virtual environment %s" % venv_path) - self._install_reqs() - return - print("Creating the virtual environment %s" % venv_path) + if self.exists(): + raise Exception(f'virtual environment {venv_path} already exists') try: self._create_venv() - self._install_reqs() + self.prepare(install) except: # noqa print() safe_rmtree(venv_path) raise + def ensure(self, refresh=True, install=True): + venv_path = self.get_path() + if self.exists(): + if refresh: + self.prepare(install) + else: + self.create(install) + + def install_reqs(self, requirements=None): + venv_path = self.get_path() + print("Installing requirements into the virtual environment %s" % venv_path) + + # parse requirements + bench = None + if requirements is None: + requirements = Requirements() + elif hasattr(requirements, 'requirements_lockfile'): + bench = requirements + requirements = Requirements.from_benchmarks([bench]) + + # Every benchmark must depend on pyperf. + if requirements and bench is not None: + if not requirements.get('pyperf'): + basereqs = Requirements.from_file(REQUIREMENTS_FILE, ['psutil']) + pyperf_req = basereqs.get('pyperf') + if not pyperf_req: + raise NotImplementedError + requirements.specs.append(pyperf_req) + + pip_program = self.get_pip_program() + if not requirements: + print('(nothing to install)') + else: + self.prepare(install=bench is None) + + # install requirements + cmd = pip_program + ['install'] + cmd.extend(requirements.iter_non_optional()) + self.run_cmd(cmd) + + # install optional requirements + for req in requirements.iter_optional(): + cmd = pip_program + ['install', '-U', req] + exitcode = self.run_cmd_nocheck(cmd) + if exitcode: + print("WARNING: failed to install %s" % req) + print() + + # Dump the package list and their versions: pip freeze + cmd = pip_program + ['freeze'] + self.run_cmd(cmd) + + return requirements + def exec_in_virtualenv(options): venv = VirtualEnvironment( @@ -488,7 +522,7 @@ def exec_in_virtualenv(options): inherit_environ=options.inherit_environ, ) - venv.create() + venv.ensure() venv_python = venv.get_python_program() args = [venv_python, "-m", "pyperformance"] + \ @@ -513,32 +547,36 @@ def cmd_venv(options, benchmarks=None): options.python, options.venv, inherit_environ=options.inherit_environ, - requirements=requirements, ) venv_path = venv.get_path() + exists = venv.exists() if action == 'create': - if not venv.exists(): - venv.create() - print("The virtual environment %s has been created" % venv_path) - else: + if exists: print("The virtual environment %s already exists" % venv_path) - venv.create() + venv.ensure() + venv.install_reqs(requirements) + if not exists: + print("The virtual environment %s has been created" % venv_path) + elif action == 'recreate': - if venv.exists(): + if exists: if venv.get_python_program() == sys.executable: print("The virtual environment %s already exists" % venv_path) print("(it matches the currently running Python executable)") - venv.create() + venv.ensure() + venv.install_reqs(requirements) else: print("The virtual environment %s already exists" % venv_path) shutil.rmtree(venv_path) print("The old virtual environment %s has been removed" % venv_path) print() - venv.create() + venv.ensure() + venv.install_reqs(requirements) print("The virtual environment %s has been recreated" % venv_path) else: venv.create() + venv.install_reqs(requirements) print("The virtual environment %s has been created" % venv_path) elif action == 'remove': From b049a4a8d859880d63e3e293fdbf56fd4146336b Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 16 Nov 2021 14:37:20 -0700 Subject: [PATCH 117/126] Only install per-benchmark requirements when running them. --- pyperformance/compile.py | 3 ++- pyperformance/run.py | 11 ++++++++--- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/pyperformance/compile.py b/pyperformance/compile.py index aa7cecbe..a50473d8 100644 --- a/pyperformance/compile.py +++ b/pyperformance/compile.py @@ -502,7 +502,8 @@ def create_venv(self): python = self.python.program if self._dryrun: python = sys.executable - cmd = [python, '-u', '-m', 'pyperformance', 'venv', 'recreate'] + cmd = [python, '-u', '-m', 'pyperformance', 'venv', 'recreate', + '--benchmarks', ''] if self.conf.venv: cmd.extend(('--venv', self.conf.venv)) if self.options.inherit_environ: diff --git a/pyperformance/run.py b/pyperformance/run.py index 98c93bef..a4b1410c 100644 --- a/pyperformance/run.py +++ b/pyperformance/run.py @@ -61,11 +61,13 @@ def run_benchmarks(should_run, python, options): benchmarks = {} venvs = set() if options.venv: - venvs.add(_venv.VirtualEnvironment( + venv = _venv.VirtualEnvironment( options.python, options.venv, inherit_environ=options.inherit_environ, - )) + ) + venv.ensure(refresh=False) + venvs.add(venv.get_path()) for bench in to_run: bench_runid = runid._replace(bench=bench) assert bench_runid.name, (bench, bench_runid) @@ -76,7 +78,10 @@ def run_benchmarks(should_run, python, options): name=bench_runid.name, usebase=True, ) - venv.ensure(refresh=venv.get_path() not in venvs) + print(f'creating venv for benchmark ({bench.name})') + alreadyseen = venv.get_path() in venvs + venv.ensure(refresh=not alreadyseen) + # XXX Do not override when there is a requirements collision. venv.install_reqs(bench) venvs.add(venv.get_path()) benchmarks[bench] = (venv, bench_runid) From a083cd5aac2ad9c8ee9d998f27901639106c7ddb Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 16 Nov 2021 15:41:34 -0700 Subject: [PATCH 118/126] Print the benchmark number. --- pyperformance/run.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyperformance/run.py b/pyperformance/run.py index a4b1410c..c70eeb9a 100644 --- a/pyperformance/run.py +++ b/pyperformance/run.py @@ -68,7 +68,7 @@ def run_benchmarks(should_run, python, options): ) venv.ensure(refresh=False) venvs.add(venv.get_path()) - for bench in to_run: + for i, bench in enumerate(to_run): bench_runid = runid._replace(bench=bench) assert bench_runid.name, (bench, bench_runid) venv = _venv.VirtualEnvironment( @@ -78,7 +78,7 @@ def run_benchmarks(should_run, python, options): name=bench_runid.name, usebase=True, ) - print(f'creating venv for benchmark ({bench.name})') + print(f'({i+1:>2}/{len(to_run)}) creating venv for benchmark ({bench.name})') alreadyseen = venv.get_path() in venvs venv.ensure(refresh=not alreadyseen) # XXX Do not override when there is a requirements collision. From 7b1875357fb62b4fbcfd8362cf491520250787e4 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 16 Nov 2021 17:01:42 -0700 Subject: [PATCH 119/126] Factor out Python.resolve_program(). --- pyperformance/compile.py | 48 ++++++++++++++++++++++------------------ 1 file changed, 27 insertions(+), 21 deletions(-) diff --git a/pyperformance/compile.py b/pyperformance/compile.py index a50473d8..ebcfd932 100644 --- a/pyperformance/compile.py +++ b/pyperformance/compile.py @@ -247,6 +247,25 @@ def __init__(self, app, conf): self.program = None self.hexversion = None + def resolve_program(self): + if sys.platform in ('darwin', 'win32'): + program_ext = '.exe' + else: + program_ext = '' + + if self.conf.install: + prefix = self.conf.prefix + + if sys.platform == 'darwin': + program_ext = '' + + program = os.path.join(prefix, "bin", "python3" + program_ext) + if not os.path.exists(program): + program = os.path.join(prefix, "bin", "python" + program_ext) + else: + program = os.path.join(self.conf.build_dir, "python" + program_ext) + return program + def patch(self, filename): if not filename: return @@ -288,28 +307,13 @@ def compile(self): self.run('make') def install_python(self): - if sys.platform in ('darwin', 'win32'): - program_ext = '.exe' - else: - program_ext = '' - + program = self.resolve_program() if self.conf.install: - prefix = self.conf.prefix - self.app.safe_rmdir(prefix) - self.app.safe_makedirs(prefix) - + self.app.safe_rmdir(self.conf.prefix) + self.app.safe_makedirs(self.conf.prefix) self.run('make', 'install') - - if sys.platform == 'darwin': - program_ext = '' - - self.program = os.path.join(prefix, "bin", "python" + program_ext) - if not os.path.exists(self.program): - self.program = os.path.join(prefix, "bin", "python3" + program_ext) - else: - # don't install: run python from the compilation directory - self.program = os.path.join(self.conf.build_dir, - "python" + program_ext) + # else don't install: run python from the compilation directory + self.program = program def get_version(self): # Dump the Python version @@ -501,7 +505,9 @@ def create_venv(self): # Create venv python = self.python.program if self._dryrun: - python = sys.executable + python = self.python.resolve_program() + if not os.path.exists(python): + python = sys.executable cmd = [python, '-u', '-m', 'pyperformance', 'venv', 'recreate', '--benchmarks', ''] if self.conf.venv: From c8a77891b07d68638a904310e2748f8382d3bcbe Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 16 Nov 2021 17:37:35 -0700 Subject: [PATCH 120/126] Do not pass --benchmarks when creating venv for "compile". --- pyperformance/compile.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/pyperformance/compile.py b/pyperformance/compile.py index ebcfd932..c8616e38 100644 --- a/pyperformance/compile.py +++ b/pyperformance/compile.py @@ -514,10 +514,6 @@ def create_venv(self): cmd.extend(('--venv', self.conf.venv)) if self.options.inherit_environ: cmd.append('--inherit-environ=%s' % ','.join(self.options.inherit_environ)) - if self.conf.manifest: - cmd.extend(('--manifest', self.conf.manifest)) - if self.conf.benchmarks: - cmd.extend(('--benchmarks', self.conf.benchmarks)) exitcode = self.run_nocheck(*cmd) if exitcode: sys.exit(EXIT_VENV_ERROR) From 6f6df4d66cc8d4730dc69f525220467332798449 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Wed, 17 Nov 2021 09:22:34 -0700 Subject: [PATCH 121/126] Skip a benchmark if its requirements could not be installed. --- pyperformance/run.py | 15 +++++++++++---- pyperformance/venv.py | 23 ++++++++++++++++------- 2 files changed, 27 insertions(+), 11 deletions(-) diff --git a/pyperformance/run.py b/pyperformance/run.py index c70eeb9a..f543013a 100644 --- a/pyperformance/run.py +++ b/pyperformance/run.py @@ -79,11 +79,16 @@ def run_benchmarks(should_run, python, options): usebase=True, ) print(f'({i+1:>2}/{len(to_run)}) creating venv for benchmark ({bench.name})') - alreadyseen = venv.get_path() in venvs + venv_path = venv.get_path() + alreadyseen = venv_path in venvs venv.ensure(refresh=not alreadyseen) - # XXX Do not override when there is a requirements collision. - venv.install_reqs(bench) - venvs.add(venv.get_path()) + try: + # XXX Do not override when there is a requirements collision. + venv.install_reqs(bench) + except _venv.RequirementsInstallationFailedError: + print('(benchmark will be skipped)') + venv = None + venvs.add(venv_path) benchmarks[bench] = (venv, bench_runid) suite = None @@ -118,6 +123,8 @@ def add_bench(dest_suite, obj): bench_venv, bench_runid = benchmarks.get(bench) try: + if bench_venv is None: + raise Exception('could not install requirements') result = bench.run( python, bench_runid, diff --git a/pyperformance/venv.py b/pyperformance/venv.py index 46afebc0..d65031e5 100644 --- a/pyperformance/venv.py +++ b/pyperformance/venv.py @@ -27,6 +27,10 @@ def is_build_dir(): return os.path.exists(os.path.join(root_dir, 'setup.py')) +class RequirementsInstallationFailedError(Exception): + pass + + class Requirements(object): @classmethod @@ -468,7 +472,7 @@ def ensure(self, refresh=True, install=True): else: self.create(install) - def install_reqs(self, requirements=None): + def install_reqs(self, requirements=None, *, exitonerror=False): venv_path = self.get_path() print("Installing requirements into the virtual environment %s" % venv_path) @@ -497,8 +501,13 @@ def install_reqs(self, requirements=None): # install requirements cmd = pip_program + ['install'] - cmd.extend(requirements.iter_non_optional()) - self.run_cmd(cmd) + reqs = list(requirements.iter_non_optional()) + cmd.extend(reqs) + exitcode = self.run_cmd_nocheck(cmd) + if exitcode: + if exitonerror: + sys.exit(exitcode) + raise RequirementsInstallationFailedError(reqs) # install optional requirements for req in requirements.iter_optional(): @@ -555,7 +564,7 @@ def cmd_venv(options, benchmarks=None): if exists: print("The virtual environment %s already exists" % venv_path) venv.ensure() - venv.install_reqs(requirements) + venv.install_reqs(requirements, exitonerror=True) if not exists: print("The virtual environment %s has been created" % venv_path) @@ -565,18 +574,18 @@ def cmd_venv(options, benchmarks=None): print("The virtual environment %s already exists" % venv_path) print("(it matches the currently running Python executable)") venv.ensure() - venv.install_reqs(requirements) + venv.install_reqs(requirements, exitonerror=True) else: print("The virtual environment %s already exists" % venv_path) shutil.rmtree(venv_path) print("The old virtual environment %s has been removed" % venv_path) print() venv.ensure() - venv.install_reqs(requirements) + venv.install_reqs(requirements, exitonerror=True) print("The virtual environment %s has been recreated" % venv_path) else: venv.create() - venv.install_reqs(requirements) + venv.install_reqs(requirements, exitonerror=True) print("The virtual environment %s has been created" % venv_path) elif action == 'remove': From b379536e99bf722bb8680d1c3e5f3b405de187f4 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Wed, 17 Nov 2021 09:24:12 -0700 Subject: [PATCH 122/126] Set Python.program to None if the resolved path does not exist. --- pyperformance/compile.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pyperformance/compile.py b/pyperformance/compile.py index c8616e38..d5084ddc 100644 --- a/pyperformance/compile.py +++ b/pyperformance/compile.py @@ -264,6 +264,8 @@ def resolve_program(self): program = os.path.join(prefix, "bin", "python" + program_ext) else: program = os.path.join(self.conf.build_dir, "python" + program_ext) + if not os.path.exists(program): + program = None return program def patch(self, filename): From 686a96daaa0ac4b09f37d1bc2b53ab10e67da9a5 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Wed, 17 Nov 2021 10:16:04 -0700 Subject: [PATCH 123/126] Factor out resolve_python(). --- pyperformance/compile.py | 59 ++++++++++++++++++++++++---------------- 1 file changed, 35 insertions(+), 24 deletions(-) diff --git a/pyperformance/compile.py b/pyperformance/compile.py index d5084ddc..015cf8ca 100644 --- a/pyperformance/compile.py +++ b/pyperformance/compile.py @@ -237,6 +237,29 @@ def safe_makedirs(self, directory): raise +def resolve_python(prefix, builddir, *, fallback=True): + if sys.platform in ('darwin', 'win32'): + program_ext = '.exe' + else: + program_ext = '' + + if prefix: + if sys.platform == 'darwin': + program_ext = '' + program = os.path.join(prefix, "bin", "python3" + program_ext) + exists = os.path.exists(program) + if not exists and fallback: + program2 = os.path.join(prefix, "bin", "python" + program_ext) + if os.path.exists(program2): + program = program2 + exists = True + else: + assert builddir + program = os.path.join(builddir, "python" + program_ext) + exists = os.path.exists(program) + return program, exists + + class Python(Task): def __init__(self, app, conf): super().__init__(app, conf.build_dir) @@ -247,27 +270,6 @@ def __init__(self, app, conf): self.program = None self.hexversion = None - def resolve_program(self): - if sys.platform in ('darwin', 'win32'): - program_ext = '.exe' - else: - program_ext = '' - - if self.conf.install: - prefix = self.conf.prefix - - if sys.platform == 'darwin': - program_ext = '' - - program = os.path.join(prefix, "bin", "python3" + program_ext) - if not os.path.exists(program): - program = os.path.join(prefix, "bin", "python" + program_ext) - else: - program = os.path.join(self.conf.build_dir, "python" + program_ext) - if not os.path.exists(program): - program = None - return program - def patch(self, filename): if not filename: return @@ -309,11 +311,17 @@ def compile(self): self.run('make') def install_python(self): - program = self.resolve_program() + program, _ = resolve_python( + self.conf.prefix if self.conf.install else None, + self.conf.build_dir, + ) if self.conf.install: + program, _ = resolve_python(self.conf.prefix, self.conf.build_dir) self.app.safe_rmdir(self.conf.prefix) self.app.safe_makedirs(self.conf.prefix) self.run('make', 'install') + else: + program, _ = resolve_python(None, self.conf.build_dir) # else don't install: run python from the compilation directory self.program = program @@ -507,8 +515,11 @@ def create_venv(self): # Create venv python = self.python.program if self._dryrun: - python = self.python.resolve_program() - if not os.path.exists(python): + program, exists = resolve_python( + self.conf.prefix if self.conf.installed else None, + self.conf.build_dir, + ) + if not exists: python = sys.executable cmd = [python, '-u', '-m', 'pyperformance', 'venv', 'recreate', '--benchmarks', ''] From 664a9099a0f0bfd0429299d76f57473ceccd0508 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Wed, 17 Nov 2021 10:21:31 -0700 Subject: [PATCH 124/126] Add a blank line. --- pyperformance/run.py | 1 + 1 file changed, 1 insertion(+) diff --git a/pyperformance/run.py b/pyperformance/run.py index f543013a..5268bd78 100644 --- a/pyperformance/run.py +++ b/pyperformance/run.py @@ -87,6 +87,7 @@ def run_benchmarks(should_run, python, options): venv.install_reqs(bench) except _venv.RequirementsInstallationFailedError: print('(benchmark will be skipped)') + print() venv = None venvs.add(venv_path) benchmarks[bench] = (venv, bench_runid) From 9899813fd4eac032c452eb4fd2c3563defeec0f1 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Wed, 17 Nov 2021 10:29:18 -0700 Subject: [PATCH 125/126] Do not print a traceback for skipped benchmarks. --- pyperformance/run.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/pyperformance/run.py b/pyperformance/run.py index 5268bd78..8e196547 100644 --- a/pyperformance/run.py +++ b/pyperformance/run.py @@ -123,9 +123,11 @@ def add_bench(dest_suite, obj): return dest_suite bench_venv, bench_runid = benchmarks.get(bench) + if bench_venv is None: + print("ERROR: Benchmark %s failed: could not install requirements" % name) + errors.append(name) + continue try: - if bench_venv is None: - raise Exception('could not install requirements') result = bench.run( python, bench_runid, From da1b6c341487c91ff06a47b114cbc306e7da7344 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Wed, 17 Nov 2021 10:48:51 -0700 Subject: [PATCH 126/126] Fix a typo. --- pyperformance/compile.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyperformance/compile.py b/pyperformance/compile.py index 015cf8ca..2e3b60dd 100644 --- a/pyperformance/compile.py +++ b/pyperformance/compile.py @@ -516,10 +516,10 @@ def create_venv(self): python = self.python.program if self._dryrun: program, exists = resolve_python( - self.conf.prefix if self.conf.installed else None, + self.conf.prefix if self.conf.install else None, self.conf.build_dir, ) - if not exists: + if not python or not exists: python = sys.executable cmd = [python, '-u', '-m', 'pyperformance', 'venv', 'recreate', '--benchmarks', '']