From c1b7593207fe4dc65e77b28139e4693ad7db1b83 Mon Sep 17 00:00:00 2001 From: Michael Droettboom Date: Tue, 10 May 2022 11:41:57 -0400 Subject: [PATCH 1/2] Support building C extensions in benchmarks Add ctypes and ctypes_argtypes benchmarks --- .gitignore | 2 +- MANIFEST.in | 1 + doc/benchmarks.rst | 10 ++ doc/custom_benchmarks.rst | 24 +++-- pyperformance/_benchmark.py | 7 ++ pyperformance/_benchmark_metadata.py | 4 + pyperformance/data-files/benchmarks/MANIFEST | 3 + .../bm_ctypes/bm_ctypes_argtypes.toml | 11 +++ .../data-files/benchmarks/bm_ctypes/cmodule.c | 54 +++++++++++ .../benchmarks/bm_ctypes/pyproject.toml | 11 +++ .../benchmarks/bm_ctypes/requirements.txt | 1 + .../benchmarks/bm_ctypes/run_benchmark.py | 95 +++++++++++++++++++ .../data-files/benchmarks/bm_ctypes/setup.py | 11 +++ .../benchmarks/bm_ctypes/src/__init__.py | 0 pyperformance/venv.py | 4 + 15 files changed, 227 insertions(+), 11 deletions(-) create mode 100644 pyperformance/data-files/benchmarks/bm_ctypes/bm_ctypes_argtypes.toml create mode 100644 pyperformance/data-files/benchmarks/bm_ctypes/cmodule.c create mode 100644 pyperformance/data-files/benchmarks/bm_ctypes/pyproject.toml create mode 100644 pyperformance/data-files/benchmarks/bm_ctypes/requirements.txt create mode 100644 pyperformance/data-files/benchmarks/bm_ctypes/run_benchmark.py create mode 100644 pyperformance/data-files/benchmarks/bm_ctypes/setup.py create mode 100644 pyperformance/data-files/benchmarks/bm_ctypes/src/__init__.py diff --git a/.gitignore b/.gitignore index ee5e45bf..ed358651 100644 --- a/.gitignore +++ b/.gitignore @@ -10,7 +10,7 @@ # Created by setup.py sdist build/ dist/ -pyperformance.egg-info/ +*.egg-info/ # Created by the pyperformance script venv/ diff --git a/MANIFEST.in b/MANIFEST.in index 9f89da35..8cf042db 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -15,5 +15,6 @@ include pyperformance/data-files/benchmarks/MANIFEST include pyperformance/data-files/benchmarks/bm_*/*.toml include pyperformance/data-files/benchmarks/bm_*/*.py include pyperformance/data-files/benchmarks/bm_*/requirements.txt +include pyperformance/data-files/benchmarks/bm_*/*.c recursive-include pyperformance/data-files/benchmarks/bm_*/data * recursive-exclude pyperformance/tests * diff --git a/doc/benchmarks.rst b/doc/benchmarks.rst index a5fce2d8..d81ebaca 100644 --- a/doc/benchmarks.rst +++ b/doc/benchmarks.rst @@ -130,6 +130,16 @@ deepcopy Benchmark the Python `copy.deepcopy` method. The `deepcopy` method is performed on a nested dictionary and a dataclass. +ctypes +------ + +Benchmark to measure the function call overhead of calling C functions using ctypes. + +The ``ctypes`` benchmark lets ``ctypes`` infer the argument types from the passed in +values. The ``ctypes_argtypes`` benchmark `explicitly specifies the argument types +`_, +which is slower than inferred argument types. + deltablue --------- diff --git a/doc/custom_benchmarks.rst b/doc/custom_benchmarks.rst index 01028f70..c62fa82b 100644 --- a/doc/custom_benchmarks.rst +++ b/doc/custom_benchmarks.rst @@ -324,16 +324,17 @@ All other PEP 621 fields are optional (e.g. ``requires-python = ">=3.8"``, The ``[tool.pyperformance]`` Section ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -=============== ===== === === === -field type R B F -=============== ===== === === === -tool.name str X X -tool.tags [str] X -tool.extra_opts [str] X -tool.inherits file -tool.runscript file X -tool.datadir file X -=============== ===== === === === +================== ===== === === === +field type R B F +================== ===== === === === +tool.name str X X +tool.tags [str] X +tool.extra_opts [str] X +tool.inherits file +tool.runscript file X +tool.datadir file X +tool.install_setup bool +================== ===== === === === "R": required "B": inferred from the inherited metadata @@ -342,3 +343,6 @@ tool.datadir file X * tags: optional list of names to group benchmarks * extra_opts: optional list of args to pass to ``tool.runscript`` * runscript: the benchmark script to use instead of run_benchmark.py. +* install_setup: when ``true``, run ``pip install -e .`` in the + benchmark directory to install it in the virtual environment. This has the + effect of running a ``setup.py`` file, if present. diff --git a/pyperformance/_benchmark.py b/pyperformance/_benchmark.py index fc0df2b5..6546df85 100644 --- a/pyperformance/_benchmark.py +++ b/pyperformance/_benchmark.py @@ -164,6 +164,13 @@ def runscript(self): def extra_opts(self): return self._get_metadata_value('extra_opts', ()) + @property + def setup_py(self): + if not self._get_metadata_value('install_setup', False): + return None + filename = os.path.join(os.path.dirname(self.metafile), 'setup.py') + return filename if os.path.exists(filename) else None + # Other metadata keys: # * base # * python diff --git a/pyperformance/_benchmark_metadata.py b/pyperformance/_benchmark_metadata.py index 94273f67..ed59e5b9 100644 --- a/pyperformance/_benchmark_metadata.py +++ b/pyperformance/_benchmark_metadata.py @@ -32,6 +32,7 @@ 'datadir': None, 'runscript': None, 'extra_opts': None, + 'install_setup': None, } @@ -228,6 +229,9 @@ def _resolve_value(field, value, rootdir): for opt in value: if not opt or not isinstance(opt, str): raise TypeError(f'extra_opts should be a list of strings, got {value!r}') + elif field == 'install_setup': + if not isinstance(value, bool): + raise TypeError(f'install_setup should be a bool, got {value!r}') else: raise NotImplementedError(field) return value diff --git a/pyperformance/data-files/benchmarks/MANIFEST b/pyperformance/data-files/benchmarks/MANIFEST index 43489311..fa221d6c 100644 --- a/pyperformance/data-files/benchmarks/MANIFEST +++ b/pyperformance/data-files/benchmarks/MANIFEST @@ -12,6 +12,8 @@ generators chameleon chaos crypto_pyaes +ctypes +ctypes_argtypes deepcopy deltablue django_template @@ -71,6 +73,7 @@ xml_etree #apps #math #template +#extension [group default] diff --git a/pyperformance/data-files/benchmarks/bm_ctypes/bm_ctypes_argtypes.toml b/pyperformance/data-files/benchmarks/bm_ctypes/bm_ctypes_argtypes.toml new file mode 100644 index 00000000..c04addbe --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_ctypes/bm_ctypes_argtypes.toml @@ -0,0 +1,11 @@ +[project] +name = "pyperformance_bm_ctypes_argtypes" +requires-python = ">=3.7" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "ctypes_argtypes" +tags = "extension" +extra_opts = ["--argtypes"] diff --git a/pyperformance/data-files/benchmarks/bm_ctypes/cmodule.c b/pyperformance/data-files/benchmarks/bm_ctypes/cmodule.c new file mode 100644 index 00000000..79cc83b8 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_ctypes/cmodule.c @@ -0,0 +1,54 @@ +#include + +#if defined(_WIN32) || defined(__CYGWIN__) +#define EXPORTED_SYMBOL __declspec(dllexport) +#else +#define EXPORTED_SYMBOL +#endif + + +EXPORTED_SYMBOL +void void_foo_void(void) { + +} + +EXPORTED_SYMBOL +int int_foo_int(int a) { + return a + 1; +} + +EXPORTED_SYMBOL +void void_foo_int(int a) { + +} + +EXPORTED_SYMBOL +void void_foo_int_int(int a, int b) { + +} + +EXPORTED_SYMBOL +void void_foo_int_int_int(int a, int b, int c) { + +} + +EXPORTED_SYMBOL +void void_foo_int_int_int_int(int a, int b, int c, int d) { + +} + +EXPORTED_SYMBOL +void void_foo_constchar(const char* str) { + +} + +PyMODINIT_FUNC +PyInit_cmodule(void) { + // DELIBERATELY EMPTY + + // This isn't actually a Python extension module (it's used via ctypes), so + // this entry point function will never be called. However, we are utilizing + // setuptools to build it, and on Windows, setuptools explicitly passes the + // flag /EXPORT:PyInit_cmodule, so it must be defined. + return NULL; +} diff --git a/pyperformance/data-files/benchmarks/bm_ctypes/pyproject.toml b/pyperformance/data-files/benchmarks/bm_ctypes/pyproject.toml new file mode 100644 index 00000000..da953d2b --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_ctypes/pyproject.toml @@ -0,0 +1,11 @@ +[project] +name = "pyperformance_bm_ctypes" +requires-python = ">=3.7" +dependencies = ["pyperf", "setuptools"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "ctypes" +tags = "extension" +install_setup = true diff --git a/pyperformance/data-files/benchmarks/bm_ctypes/requirements.txt b/pyperformance/data-files/benchmarks/bm_ctypes/requirements.txt new file mode 100644 index 00000000..03bd4e19 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_ctypes/requirements.txt @@ -0,0 +1 @@ +setuptools==62.4.0 diff --git a/pyperformance/data-files/benchmarks/bm_ctypes/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_ctypes/run_benchmark.py new file mode 100644 index 00000000..9a0fd414 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_ctypes/run_benchmark.py @@ -0,0 +1,95 @@ +""" +Test the function call overhead of ctypes. +""" +import pyperf + + +import ctypes +import importlib.util + + +spec = importlib.util.find_spec("bm_ctypes.cmodule") +if spec is None: + raise ImportError("Can't find bm_ctypes.cmodule shared object file") +ext = ctypes.cdll.LoadLibrary(spec.origin) + + +def benchmark_argtypes(loops): + void_foo_void = ext.void_foo_void + void_foo_void.argtypes = [] + void_foo_void.restype = None + + int_foo_int = ext.void_foo_int + int_foo_int.argtypes = [ctypes.c_int] + int_foo_int.restype = ctypes.c_int + + void_foo_int = ext.void_foo_int + void_foo_int.argtypes = [ctypes.c_int] + void_foo_int.restype = None + + void_foo_int_int = ext.void_foo_int_int + void_foo_int_int.argtypes = [ctypes.c_int, ctypes.c_int] + void_foo_int_int.restype = None + + void_foo_int_int_int = ext.void_foo_int_int_int + void_foo_int_int_int.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_int] + void_foo_int_int_int.restype = None + + void_foo_int_int_int_int = ext.void_foo_int_int_int_int + void_foo_int_int_int_int.argtypes = [ + ctypes.c_int, + ctypes.c_int, + ctypes.c_int, + ctypes.c_int, + ] + void_foo_int_int_int_int.restype = None + + void_foo_constchar = ext.void_foo_constchar + void_foo_constchar.argtypes = [ctypes.c_char_p] + void_foo_constchar.restype = None + + return benchmark(loops) + + +def benchmark(loops): + void_foo_void = ext.void_foo_void + int_foo_int = ext.int_foo_int + void_foo_int = ext.void_foo_int + void_foo_int_int = ext.void_foo_int_int + void_foo_int_int_int = ext.void_foo_int_int_int + void_foo_int_int_int_int = ext.void_foo_int_int_int_int + void_foo_constchar = ext.void_foo_constchar + + range_it = range(loops) + + # Test calling the functions using the implied arguments mechanism + t0 = pyperf.perf_counter() + + for _ in range_it: + void_foo_void() + int_foo_int(1) + void_foo_int(1) + void_foo_int_int(1, 2) + void_foo_int_int_int(1, 2, 3) + void_foo_int_int_int_int(1, 2, 3, 4) + void_foo_constchar(b"bytes") + + return pyperf.perf_counter() - t0 + + +def add_cmdline_args(cmd, args): + if args.argtypes: + cmd.append("--argtypes") + + +if __name__ == "__main__": + runner = pyperf.Runner(add_cmdline_args=add_cmdline_args) + runner.metadata["description"] = "ctypes function call overhead benchmark" + + runner.argparser.add_argument("--argtypes", action="store_true") + options = runner.parse_args() + + if options.argtypes: + runner.bench_time_func("ctypes_argtypes", benchmark_argtypes) + else: + runner.bench_time_func("ctypes", benchmark) diff --git a/pyperformance/data-files/benchmarks/bm_ctypes/setup.py b/pyperformance/data-files/benchmarks/bm_ctypes/setup.py new file mode 100644 index 00000000..2f089ff5 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_ctypes/setup.py @@ -0,0 +1,11 @@ +from setuptools import setup, Extension + +# Compile the C shared object containing functions to call through ctypes. It +# isn't technically a Python C extension, but this is the easiest way to build +# it in a cross-platform way. + +setup( + name="pyperformance_bm_ctypes", + ext_modules=[Extension("bm_ctypes.cmodule", sources=["cmodule.c"])], + package_dir={"bm_ctypes": "src"}, +) diff --git a/pyperformance/data-files/benchmarks/bm_ctypes/src/__init__.py b/pyperformance/data-files/benchmarks/bm_ctypes/src/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/pyperformance/venv.py b/pyperformance/venv.py index 5a703024..074da3c0 100644 --- a/pyperformance/venv.py +++ b/pyperformance/venv.py @@ -24,6 +24,10 @@ def from_benchmarks(cls, benchmarks): for bench in benchmarks or (): filename = bench.requirements_lockfile self._add_from_file(filename) + if bench.setup_py: + # pip doesn't support installing a setup.py, + # but it does support installing from the directory it is in. + self._add(os.path.dirname(bench.setup_py)) return self def __init__(self): From 91a73e2b9155787c045bd81d011c801c8c54b7b8 Mon Sep 17 00:00:00 2001 From: Michael Droettboom Date: Wed, 27 Jul 2022 09:34:49 -0400 Subject: [PATCH 2/2] Handle setup.py in _pip.py --- pyperformance/_pip.py | 14 ++++++++++---- pyperformance/venv.py | 2 +- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/pyperformance/_pip.py b/pyperformance/_pip.py index 7eb10e04..59283a3b 100644 --- a/pyperformance/_pip.py +++ b/pyperformance/_pip.py @@ -149,10 +149,16 @@ def install_requirements(reqs, *extra, args = [] if upgrade: args.append('-U') # --upgrade - for reqs in [reqs, *extra]: - if os.path.isfile(reqs) and reqs.endswith('.txt'): - args.append('-r') # --requirement - args.append(reqs) + for req in [reqs, *extra]: + if os.path.isfile(req): + name = os.path.basename(req) + if name == "setup.py": + req = os.path.dirname(req) + elif name == "requirements.txt": + args.append('-r') # --requirement + else: + raise ValueError(f"pip doesn't know how to install {req}") + args.append(req) return run_pip('install', *args, **kwargs) diff --git a/pyperformance/venv.py b/pyperformance/venv.py index 074da3c0..93ae97b2 100644 --- a/pyperformance/venv.py +++ b/pyperformance/venv.py @@ -27,7 +27,7 @@ def from_benchmarks(cls, benchmarks): if bench.setup_py: # pip doesn't support installing a setup.py, # but it does support installing from the directory it is in. - self._add(os.path.dirname(bench.setup_py)) + self._add(bench.setup_py) return self def __init__(self):