From 0f387d994a78e7c38cdc7ae29191a7358561b6e5 Mon Sep 17 00:00:00 2001 From: David Stansby Date: Wed, 17 Jan 2024 10:39:46 +0000 Subject: [PATCH] Remove mentions of Python 2.7 --- asv.conf.json | 8 ++--- asv/commands/common_args.py | 2 +- asv/template/asv.conf.json | 8 ++--- docs/source/asv.conf.json.rst | 24 +++++++------- docs/source/using.rst | 62 +++++++++++++++++------------------ 5 files changed, 52 insertions(+), 52 deletions(-) diff --git a/asv.conf.json b/asv.conf.json index f58c90011..3c7b648ff 100644 --- a/asv.conf.json +++ b/asv.conf.json @@ -43,7 +43,7 @@ // The Pythons you'd like to test against. If not provided, defaults // to the current version of Python used to run `asv`. - "pythons": ["2.7"], + "pythons": ["3.12"], // The matrix of dependencies to test. Each key is the name of a // package (in PyPI) and the values are version numbers. An empty @@ -86,10 +86,10 @@ // ], // // "include": [ - // // additional env for python2.7 - // {"python": "2.7", "numpy": "1.8"}, + // // additional env for python3.12 + // {"python": "3.12", "numpy": "1.26"}, // // additional env if run on windows+conda - // {"platform": "win32", "environment_type": "conda", "python": "2.7", "libpython": ""}, + // {"platform": "win32", "environment_type": "conda", "python": "3.12", "libpython": ""}, // ], // The directory (relative to the current directory) that benchmarks are diff --git a/asv/commands/common_args.py b/asv/commands/common_args.py index 5db2c5c1d..eb88d96a7 100644 --- a/asv/commands/common_args.py +++ b/asv/commands/common_args.py @@ -213,7 +213,7 @@ def __call__(self, parser, namespace, values, option_string=None): def add_environment(parser, default_same=False): help = """Specify the environment and Python versions for running the benchmarks. String of the format 'environment_type:python_version', - for example 'conda:2.7'. If the Python version is not specified, + for example 'conda:3.12'. If the Python version is not specified, all those listed in the configuration file are run. The special environment type 'existing:/path/to/python' runs the benchmarks using the given Python interpreter; if the path is omitted, diff --git a/asv/template/asv.conf.json b/asv/template/asv.conf.json index bbc8dfcf0..a1d305957 100644 --- a/asv/template/asv.conf.json +++ b/asv/template/asv.conf.json @@ -65,7 +65,7 @@ // The Pythons you'd like to test against. If not provided, defaults // to the current version of Python used to run `asv`. - // "pythons": ["2.7", "3.8"], + // "pythons": ["3.8", "3.12"], // The list of conda channel names to be searched for benchmark // dependency packages in the specified order @@ -138,10 +138,10 @@ // ], // // "include": [ - // // additional env for python2.7 - // {"python": "2.7", "req": {"numpy": "1.8"}, "env_nobuild": {"FOO": "123"}}, + // // additional env for python3.12 + // {"python": "3.12", "req": {"numpy": "1.26"}, "env_nobuild": {"FOO": "123"}}, // // additional env if run on windows+conda - // {"platform": "win32", "environment_type": "conda", "python": "2.7", "req": {"libpython": ""}}, + // {"platform": "win32", "environment_type": "conda", "python": "3.12", "req": {"libpython": ""}}, // ], // The directory (relative to the current directory) that benchmarks are diff --git a/docs/source/asv.conf.json.rst b/docs/source/asv.conf.json.rst index 43b5113b4..55d306799 100644 --- a/docs/source/asv.conf.json.rst +++ b/docs/source/asv.conf.json.rst @@ -226,7 +226,7 @@ If provided, it must be a dictionary, containing some of the keys "matrix": { "req": { - "numpy": ["1.7", "1.8"], + "numpy": ["1.25", "1.26"], "Cython": [] "six": ["", null] }, @@ -249,7 +249,7 @@ version and not installed at all:: "matrix": { "req": { - "numpy": ["1.7", "1.8"], + "numpy": ["1.25", "1.26"], "Cython": [] "six": ["", null], } @@ -351,14 +351,14 @@ For example:: "pythons": ["3.8", "3.9"], "matrix": { "req": { - "numpy": ["1.7", "1.8"], + "numpy": ["1.25", "1.26"], "Cython": ["", null], "colorama": ["", null] }, "env": {"FOO": ["1", "2"]}, }, "exclude": [ - {"python": "3.8", "req": {"numpy": "1.7"}}, + {"python": "3.8", "req": {"numpy": "1.25"}}, {"sys_platform": "(?!win32).*", "req": {"colorama": ""}}, {"sys_platform": "win32", "req": {"colorama": null}}, {"env": {"FOO": "1"}}, @@ -368,12 +368,12 @@ This will generate all combinations of Python version and items in the matrix, except those with Python 3.8 and Numpy 3.9. In other words, the combinations:: - python==3.8 numpy==1.8 Cython==latest (colorama==latest) FOO=2 - python==3.8 numpy==1.8 (colorama==latest) FOO=2 - python==3.9 numpy==1.7 Cython==latest (colorama==latest) FOO=2 - python==3.9 numpy==1.7 (colorama==latest) FOO=2 - python==3.9 numpy==1.8 Cython==latest (colorama==latest) FOO=2 - python==3.9 numpy==1.8 (colorama==latest) FOO=2 + python==3.8 numpy==1.26 Cython==latest (colorama==latest) FOO=2 + python==3.8 numpy==1.26 (colorama==latest) FOO=2 + python==3.9 numpy==1.25 Cython==latest (colorama==latest) FOO=2 + python==3.9 numpy==1.25 (colorama==latest) FOO=2 + python==3.9 numpy==1.26 Cython==latest (colorama==latest) FOO=2 + python==3.9 numpy==1.26 (colorama==latest) FOO=2 The ``colorama`` package will be installed only if the current platform is Windows. @@ -402,9 +402,9 @@ The exclude rules are not applied to includes. For example:: "include": [ - {"python": "3.9", "req": {"numpy": "1.8.2"}, "env": {"FOO": "true"}}, + {"python": "3.9", "req": {"numpy": "1.26"}, "env": {"FOO": "true"}}, {"platform": "win32", "environment_type": "conda", - "req": {"python": "2.7", "libpython": ""}} + "req": {"python": "3.12", "libpython": ""}} ] This corresponds to two additional environments. One runs on Python 3.9 diff --git a/docs/source/using.rst b/docs/source/using.rst index faf445776..ce244ba69 100644 --- a/docs/source/using.rst +++ b/docs/source/using.rst @@ -190,10 +190,10 @@ for you, but it expects to find the Python versions specified in the ``asv.conf.json`` file available on the ``PATH``. For example, if the ``asv.conf.json`` file has:: - "pythons": ["2.7", "3.6"] + "pythons": ["3.7", "3.12"] -then it will use the executables named ``python2.7`` and -``python3.6`` on the path. There are many ways to get multiple +then it will use the executables named ``python3.7`` and +``python3.12`` on the path. There are many ways to get multiple versions of Python installed -- your package manager, ``apt-get``, ``yum``, ``MacPorts`` or ``homebrew`` probably has them, or you can also use `pyenv `__. @@ -215,21 +215,21 @@ Finally, the benchmarks are run:: · Fetching recent changes · Creating environments...... · Discovering benchmarks - ·· Uninstalling from virtualenv-py2.7 - ·· Building 4238c44d
for virtualenv-py2.7 - ·· Installing into virtualenv-py2.7. + ·· Uninstalling from virtualenv-py3.7 + ·· Building 4238c44d
for virtualenv-py3.7 + ·· Installing into virtualenv-py3.7. · Running 10 total benchmarks (1 commits * 2 environments * 5 benchmarks) [ 0.00%] · For project commit 4238c44d
: - [ 0.00%] ·· Building for virtualenv-py2.7. - [ 0.00%] ·· Benchmarking virtualenv-py2.7 + [ 0.00%] ·· Building for virtualenv-py3.7. + [ 0.00%] ·· Benchmarking virtualenv-py3.7 [ 10.00%] ··· Running (benchmarks.TimeSuite.time_iterkeys--).... [ 30.00%] ··· benchmarks.MemSuite.mem_list 2.42k [ 35.00%] ··· benchmarks.TimeSuite.time_iterkeys 11.1±0.01μs [ 40.00%] ··· benchmarks.TimeSuite.time_keys 11.2±0.01μs [ 45.00%] ··· benchmarks.TimeSuite.time_range 32.9±0.01μs [ 50.00%] ··· benchmarks.TimeSuite.time_xrange 30.3±0.01μs - [ 50.00%] ·· Building for virtualenv-py3.6.. - [ 50.00%] ·· Benchmarking virtualenv-py3.6 + [ 50.00%] ·· Building for virtualenv-py3.12.. + [ 50.00%] ·· Benchmarking virtualenv-py3.12 [ 60.00%] ··· Running (benchmarks.TimeSuite.time_iterkeys--).... [ 80.00%] ··· benchmarks.MemSuite.mem_list 2.11k [ 85.00%] ··· benchmarks.TimeSuite.time_iterkeys failed @@ -337,11 +337,11 @@ results from previous runs on the command line:: $ asv show main Commit: 4238c44d
- benchmarks.MemSuite.mem_list [mymachine/virtualenv-py2.7] + benchmarks.MemSuite.mem_list [mymachine/virtualenv-py3.7] 2.42k started: 2018-08-19 18:46:47, duration: 1.00s - benchmarks.TimeSuite.time_iterkeys [mymachine/virtualenv-py2.7] + benchmarks.TimeSuite.time_iterkeys [mymachine/virtualenv-py3.7] 11.1±0.06μs started: 2018-08-19 18:46:47, duration: 1.00s @@ -410,9 +410,9 @@ The ``asv rm`` command will prompt before performing any operations. Passing the ``-y`` option will skip the prompt. Here is a more complex example, to remove all of the benchmarks on -Python 2.7 and the machine named ``giraffe``:: +Python 3.7 and the machine named ``giraffe``:: - asv rm python=2.7 machine=giraffe + asv rm python=3.7 machine=giraffe Finding a commit that produces a large regression @@ -504,9 +504,9 @@ simple table summary of profiling results is displayed:: ncalls tottime percall cumtime percall filename:lineno(function) 1 0.000 0.000 6.844 6.844 asv/benchmark.py:171(method_caller) 1 0.000 0.000 6.844 6.844 asv/benchmark.py:197(run) - 1 0.000 0.000 6.844 6.844 /usr/lib64/python2.7/timeit.py:201(repeat) - 3 0.000 0.000 6.844 2.281 /usr/lib64/python2.7/timeit.py:178(timeit) - 3 0.104 0.035 6.844 2.281 /usr/lib64/python2.7/timeit.py:96(inner) + 1 0.000 0.000 6.844 6.844 /usr/lib64/python3.7/timeit.py:201(repeat) + 3 0.000 0.000 6.844 2.281 /usr/lib64/python3.7/timeit.py:178(timeit) + 3 0.104 0.035 6.844 2.281 /usr/lib64/python3.7/timeit.py:96(inner) 300000 0.398 0.000 6.740 0.000 benchmarks/time_units.py:20(time_very_simple_unit_parse) 300000 1.550 0.000 6.342 0.000 astropy/units/core.py:1673(__call__) 300000 0.495 0.000 2.416 0.000 astropy/units/format/generic.py:361(parse) @@ -516,7 +516,7 @@ simple table summary of profiling results is displayed:: 3000002 0.735 0.000 0.735 0.000 {isinstance} 300000 0.403 0.000 0.403 0.000 {method 'decode' of 'str' objects} 300000 0.216 0.000 0.216 0.000 astropy/units/format/generic.py:32(__init__) - 300000 0.152 0.000 0.188 0.000 /usr/lib64/python2.7/inspect.py:59(isclass) + 300000 0.152 0.000 0.188 0.000 /usr/lib64/python3.7/inspect.py:59(isclass) 900000 0.170 0.000 0.170 0.000 {method 'lower' of 'unicode' objects} 300000 0.133 0.000 0.133 0.000 {method 'count' of 'unicode' objects} 300000 0.078 0.000 0.078 0.000 astropy/units/core.py:272(get_current_unit_registry) @@ -525,13 +525,13 @@ simple table summary of profiling results is displayed:: 300000 0.038 0.000 0.038 0.000 {method 'strip' of 'str' objects} 300003 0.037 0.000 0.037 0.000 {globals} 300000 0.033 0.000 0.033 0.000 {len} - 3 0.000 0.000 0.000 0.000 /usr/lib64/python2.7/timeit.py:143(setup) - 1 0.000 0.000 0.000 0.000 /usr/lib64/python2.7/timeit.py:121(__init__) + 3 0.000 0.000 0.000 0.000 /usr/lib64/python3.7/timeit.py:143(setup) + 1 0.000 0.000 0.000 0.000 /usr/lib64/python3.7/timeit.py:121(__init__) 6 0.000 0.000 0.000 0.000 {time.time} 1 0.000 0.000 0.000 0.000 {min} 1 0.000 0.000 0.000 0.000 {range} 1 0.000 0.000 0.000 0.000 {hasattr} - 1 0.000 0.000 0.000 0.000 /usr/lib64/python2.7/timeit.py:94(_template_func) + 1 0.000 0.000 0.000 0.000 /usr/lib64/python3.7/timeit.py:94(_template_func) 3 0.000 0.000 0.000 0.000 {gc.enable} 3 0.000 0.000 0.000 0.000 {method 'append' of 'list' objects} 3 0.000 0.000 0.000 0.000 {gc.disable} @@ -590,16 +590,16 @@ revisions of the project. You can do so with the ``compare`` command:: before after ratio [3bfda9c6] [bf719488] - 40.4m 40.4m 1.00 benchmarks.MemSuite.mem_list [amulet.localdomain/virtualenv-py2.7-numpy] - failed 35.2m n/a benchmarks.MemSuite.mem_list [amulet.localdomain/virtualenv-py3.6-numpy] - 11.5±0.08μs 11.0±0μs 0.96 benchmarks.TimeSuite.time_iterkeys [amulet.localdomain/virtualenv-py2.7-numpy] - failed failed n/a benchmarks.TimeSuite.time_iterkeys [amulet.localdomain/virtualenv-py3.6-numpy] - 11.5±1μs 11.2±0.02μs 0.97 benchmarks.TimeSuite.time_keys [amulet.localdomain/virtualenv-py2.7-numpy] - failed 8.40±0.02μs n/a benchmarks.TimeSuite.time_keys [amulet.localdomain/virtualenv-py3.6-numpy] - 34.6±0.09μs 32.9±0.01μs 0.95 benchmarks.TimeSuite.time_range [amulet.localdomain/virtualenv-py2.7-numpy] - failed 35.6±0.05μs n/a benchmarks.TimeSuite.time_range [amulet.localdomain/virtualenv-py3.6-numpy] - 31.6±0.1μs 30.2±0.02μs 0.95 benchmarks.TimeSuite.time_xrange [amulet.localdomain/virtualenv-py2.7-numpy] - failed failed n/a benchmarks.TimeSuite.time_xrange [amulet.localdomain/virtualenv-py3.6-numpy] + 40.4m 40.4m 1.00 benchmarks.MemSuite.mem_list [amulet.localdomain/virtualenv-py3.7-numpy] + failed 35.2m n/a benchmarks.MemSuite.mem_list [amulet.localdomain/virtualenv-py3.12-numpy] + 11.5±0.08μs 11.0±0μs 0.96 benchmarks.TimeSuite.time_iterkeys [amulet.localdomain/virtualenv-py3.7-numpy] + failed failed n/a benchmarks.TimeSuite.time_iterkeys [amulet.localdomain/virtualenv-py3.12-numpy] + 11.5±1μs 11.2±0.02μs 0.97 benchmarks.TimeSuite.time_keys [amulet.localdomain/virtualenv-py3.7-numpy] + failed 8.40±0.02μs n/a benchmarks.TimeSuite.time_keys [amulet.localdomain/virtualenv-py3.12-numpy] + 34.6±0.09μs 32.9±0.01μs 0.95 benchmarks.TimeSuite.time_range [amulet.localdomain/virtualenv-py3.7-numpy] + failed 35.6±0.05μs n/a benchmarks.TimeSuite.time_range [amulet.localdomain/virtualenv-py3.12-numpy] + 31.6±0.1μs 30.2±0.02μs 0.95 benchmarks.TimeSuite.time_xrange [amulet.localdomain/virtualenv-py3.7-numpy] + failed failed n/a benchmarks.TimeSuite.time_xrange [amulet.localdomain/virtualenv-py3.12-numpy] This will show the times for each benchmark for the first and second revision, and the ratio of the second to the first. In addition, the